summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_root.c97
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c11
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/libahci.c16
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/pata_cmd64x.c6
-rw-r--r--drivers/ata/pata_legacy.c15
-rw-r--r--drivers/ata/pata_winbond.c282
-rw-r--r--drivers/ata/sata_dwc_460ex.c6
-rw-r--r--drivers/ata/sata_mv.c44
-rw-r--r--drivers/base/firmware_class.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/block/xsysace.c1
-rw-r--r--drivers/char/agp/intel-agp.c40
-rw-r--r--drivers/char/agp/intel-agp.h21
-rw-r--r--drivers/char/agp/intel-gtt.c66
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/hvsi.c2
-rw-r--r--drivers/char/hw_random/n2-drv.c2
-rw-r--r--drivers/char/ip2/ip2main.c4
-rw-r--r--drivers/char/pty.c4
-rw-r--r--drivers/char/rocket.c1
-rw-r--r--drivers/char/synclink_gt.c4
-rw-r--r--drivers/char/sysrq.c53
-rw-r--r--drivers/char/tty_io.c94
-rw-r--r--drivers/char/vt.c26
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c1
-rw-r--r--drivers/edac/amd64_edac.c10
-rw-r--r--drivers/edac/edac_mce_amd.c17
-rw-r--r--drivers/firewire/core-transaction.c13
-rw-r--r--drivers/firewire/net.c28
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firewire/sbp2.c23
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c24
-rw-r--r--drivers/gpu/drm/drm_drv.c25
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c5
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c24
-rw-r--r--drivers/gpu/drm/drm_modes.c5
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c30
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c28
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c53
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c123
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c66
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h72
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c365
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c271
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c135
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c10
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c97
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c53
-rw-r--r--drivers/gpu/drm/i915/intel_display.c725
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c654
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h39
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c136
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c77
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c106
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c101
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c111
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c111
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2148
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h50
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c162
-rw-r--r--drivers/gpu/drm/mga/mga_state.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c100
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c31
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c13
-rw-r--r--drivers/gpu/drm/r128/r128_state.c35
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c99
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c48
-rw-r--r--drivers/gpu/drm/radeon/r600.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c105
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c223
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c56
-rw-r--r--drivers/gpu/drm/radeon/rv770.c61
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c8
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c12
-rw-r--r--drivers/gpu/drm/via/via_dma.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c34
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-egalax.c9
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-picolcd.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hwmon/Kconfig6
-rw-r--r--drivers/hwmon/ads7871.c38
-rw-r--r--drivers/hwmon/coretemp.c1
-rw-r--r--drivers/hwmon/f71882fg.c83
-rw-r--r--drivers/hwmon/k8temp.c35
-rw-r--r--drivers/ieee1394/ohci1394.c2
-rw-r--r--drivers/input/keyboard/hil_kbd.c12
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c2
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mousedev.c8
-rw-r--r--drivers/macintosh/via-pmu.c42
-rw-r--r--drivers/md/.gitignore4
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/md.c65
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c21
-rw-r--r--drivers/md/raid10.c17
-rw-r--r--drivers/md/raid5.c13
-rw-r--r--drivers/media/dvb/mantis/Kconfig2
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci.c3
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/maps/physmap_of.c1
-rw-r--r--drivers/mtd/nand/nand_base.c11
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/mtd/ubi/Kconfig.debug2
-rw-r--r--drivers/mtd/ubi/cdev.c12
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/3c59x.c30
-rw-r--r--drivers/net/b44.c9
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c8
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c1
-rw-r--r--drivers/net/benet/be_hw.h7
-rw-r--r--drivers/net/benet/be_main.c47
-rw-r--r--drivers/net/bonding/bond_main.c56
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/ibm_newemac/debug.c2
-rw-r--r--drivers/net/ks8851.c39
-rw-r--r--drivers/net/netxen/netxen_nic_main.c9
-rw-r--r--drivers/net/niu.c16
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/pxa168_eth.c60
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/qlge/qlge_main.c4
-rw-r--r--drivers/net/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/usb/ipheth.c12
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c6
-rw-r--r--drivers/pci/hotplug/pciehp.h16
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c4
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c36
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c14
-rw-r--r--drivers/pci/pcie/pme.c (renamed from drivers/pci/pcie/pme/pcie_pme.c)66
-rw-r--r--drivers/pci/pcie/pme/Makefile8
-rw-r--r--drivers/pci/pcie/pme/pcie_pme.h28
-rw-r--r--drivers/pci/pcie/pme/pcie_pme_acpi.c54
-rw-r--r--drivers/pci/pcie/portdrv.h22
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/portdrv_core.c53
-rw-r--r--drivers/pci/pcie/portdrv_pci.c38
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/platform/x86/Kconfig4
-rw-r--r--drivers/platform/x86/asus_acpi.c6
-rw-r--r--drivers/platform/x86/compal-laptop.c9
-rw-r--r--drivers/platform/x86/dell-laptop.c7
-rw-r--r--drivers/platform/x86/hp-wmi.c64
-rw-r--r--drivers/platform/x86/intel_ips.c15
-rw-r--r--drivers/platform/x86/intel_rar_register.c2
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c171
-rw-r--r--drivers/s390/char/ctrlchar.c4
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/serial/68328serial.c29
-rw-r--r--drivers/serial/8250_early.c4
-rw-r--r--drivers/serial/bfin_sport_uart.c2
-rw-r--r--drivers/serial/of_serial.c3
-rw-r--r--drivers/serial/sn_console.c2
-rw-r--r--drivers/serial/suncore.c15
-rw-r--r--drivers/spi/coldfire_qspi.c1
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c4
-rw-r--r--drivers/staging/batman-adv/hard-interface.c29
-rw-r--r--drivers/staging/batman-adv/icmp_socket.c12
-rw-r--r--drivers/staging/batman-adv/main.c7
-rw-r--r--drivers/staging/batman-adv/originator.c14
-rw-r--r--drivers/staging/batman-adv/routing.c16
-rw-r--r--drivers/staging/batman-adv/types.h1
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c1
-rw-r--r--drivers/staging/hv/netvsc_drv.c3
-rw-r--r--drivers/staging/hv/ring_buffer.c3
-rw-r--r--drivers/staging/hv/storvsc_api.h4
-rw-r--r--drivers/staging/hv/storvsc_drv.c11
-rw-r--r--drivers/staging/octeon/Kconfig2
-rw-r--r--drivers/staging/pohmelfs/path_entry.c8
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c41
-rw-r--r--drivers/staging/sep/Kconfig10
-rw-r--r--drivers/staging/sep/Makefile2
-rw-r--r--drivers/staging/sep/TODO8
-rw-r--r--drivers/staging/sep/sep_dev.h110
-rw-r--r--drivers/staging/sep/sep_driver.c2742
-rw-r--r--drivers/staging/sep/sep_driver_api.h425
-rw-r--r--drivers/staging/sep/sep_driver_config.h225
-rw-r--r--drivers/staging/sep/sep_driver_hw_defs.h232
-rw-r--r--drivers/staging/spectra/Kconfig1
-rw-r--r--drivers/staging/spectra/ffsport.c30
-rw-r--r--drivers/staging/spectra/flash.c420
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c3
-rw-r--r--drivers/staging/zram/zram_drv.c1
-rw-r--r--drivers/usb/atm/cxacru.c24
-rw-r--r--drivers/usb/class/cdc-acm.c23
-rw-r--r--drivers/usb/core/message.c22
-rw-r--r--drivers/usb/gadget/composite.c4
-rw-r--r--drivers/usb/gadget/m66592-udc.c1
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/rndis.c12
-rw-r--r--drivers/usb/gadget/rndis.h2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c2
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c12
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/xhci-ring.c6
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/otg/twl4030-usb.c6
-rw-r--r--drivers/usb/serial/cp210x.c15
-rw-r--r--drivers/usb/serial/ftdi_sio.c15
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h21
-rw-r--r--drivers/usb/serial/generic.c11
-rw-r--r--drivers/usb/serial/io_ti.c4
-rw-r--r--drivers/usb/serial/mos7840.c32
-rw-r--r--drivers/usb/serial/navman.c1
-rw-r--r--drivers/usb/serial/option.c126
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/ssu100.c258
-rw-r--r--drivers/usb/serial/usb-serial.c23
-rw-r--r--drivers/vhost/vhost.c85
-rw-r--r--drivers/video/amba-clcd.c10
-rw-r--r--drivers/video/matrox/matroxfb_base.h4
-rw-r--r--drivers/xen/events.c21
-rw-r--r--drivers/xen/manage.c2
283 files changed, 5821 insertions, 9099 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1f67057af2a..3ba8d1f44a7 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,7 +33,6 @@
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
-#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
@@ -226,22 +225,31 @@ static acpi_status acpi_pci_run_osc(acpi_handle handle,
return status;
}
-static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags)
+static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
+ u32 support,
+ u32 *control)
{
acpi_status status;
- u32 support_set, result, capbuf[3];
+ u32 result, capbuf[3];
+
+ support &= OSC_PCI_SUPPORT_MASKS;
+ support |= root->osc_support_set;
- /* do _OSC query for all possible controls */
- support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = support_set;
- capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+ capbuf[OSC_SUPPORT_TYPE] = support;
+ if (control) {
+ *control &= OSC_PCI_CONTROL_MASKS;
+ capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+ } else {
+ /* Run _OSC query for all possible controls. */
+ capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+ }
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
if (ACPI_SUCCESS(status)) {
- root->osc_support_set = support_set;
- root->osc_control_qry = result;
- root->osc_queried = 1;
+ root->osc_support_set = support;
+ if (control)
+ *control = result;
}
return status;
}
@@ -255,7 +263,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
if (ACPI_FAILURE(status))
return status;
mutex_lock(&osc_lock);
- status = acpi_pci_query_osc(root, flags);
+ status = acpi_pci_query_osc(root, flags, NULL);
mutex_unlock(&osc_lock);
return status;
}
@@ -365,55 +373,70 @@ out:
EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
/**
- * acpi_pci_osc_control_set - commit requested control to Firmware
- * @handle: acpi_handle for the target ACPI object
- * @flags: driver's requested control bits
+ * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
+ * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
+ * @mask: Mask of _OSC bits to request control of, place to store control mask.
+ * @req: Mask of _OSC bits the control of is essential to the caller.
+ *
+ * Run _OSC query for @mask and if that is successful, compare the returned
+ * mask of control bits with @req. If all of the @req bits are set in the
+ * returned mask, run _OSC request for it.
*
- * Attempt to take control from Firmware on requested control bits.
+ * The variable at the @mask address may be modified regardless of whether or
+ * not the function returns success. On success it will contain the mask of
+ * _OSC bits the BIOS has granted control of, but its contents are meaningless
+ * on failure.
**/
-acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags)
+acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
{
+ struct acpi_pci_root *root;
acpi_status status;
- u32 control_req, result, capbuf[3];
+ u32 ctrl, capbuf[3];
acpi_handle tmp;
- struct acpi_pci_root *root;
- status = acpi_get_handle(handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return status;
+ if (!mask)
+ return AE_BAD_PARAMETER;
- control_req = (flags & OSC_PCI_CONTROL_MASKS);
- if (!control_req)
+ ctrl = *mask & OSC_PCI_CONTROL_MASKS;
+ if ((ctrl & req) != req)
return AE_TYPE;
root = acpi_pci_find_root(handle);
if (!root)
return AE_NOT_EXIST;
+ status = acpi_get_handle(handle, "_OSC", &tmp);
+ if (ACPI_FAILURE(status))
+ return status;
+
mutex_lock(&osc_lock);
+
+ *mask = ctrl | root->osc_control_set;
/* No need to evaluate _OSC if the control was already granted. */
- if ((root->osc_control_set & control_req) == control_req)
+ if ((root->osc_control_set & ctrl) == ctrl)
goto out;
- /* Need to query controls first before requesting them */
- if (!root->osc_queried) {
- status = acpi_pci_query_osc(root, root->osc_support_set);
+ /* Need to check the available controls bits before requesting them. */
+ while (*mask) {
+ status = acpi_pci_query_osc(root, root->osc_support_set, mask);
if (ACPI_FAILURE(status))
goto out;
+ if (ctrl == *mask)
+ break;
+ ctrl = *mask;
}
- if ((root->osc_control_qry & control_req) != control_req) {
- printk(KERN_DEBUG
- "Firmware did not grant requested _OSC control\n");
+
+ if ((ctrl & req) != req) {
status = AE_SUPPORT;
goto out;
}
capbuf[OSC_QUERY_TYPE] = 0;
capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
- capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req;
- status = acpi_pci_run_osc(handle, capbuf, &result);
+ capbuf[OSC_CONTROL_TYPE] = ctrl;
+ status = acpi_pci_run_osc(handle, capbuf, mask);
if (ACPI_SUCCESS(status))
- root->osc_control_set = result;
+ root->osc_control_set = *mask;
out:
mutex_unlock(&osc_lock);
return status;
@@ -544,14 +567,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
if (flags != base_flags)
acpi_pci_osc_support(root, flags);
- status = acpi_pci_osc_control_set(root->device->handle,
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-
- if (ACPI_FAILURE(status)) {
- printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n");
- pcie_no_aspm();
- }
-
pci_acpi_add_bus_pm_notifier(device, root->bus);
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 65e3e270837..11ec911016c 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -828,6 +828,7 @@ config PATA_SAMSUNG_CF
config PATA_WINBOND_VLB
tristate "Winbond W83759A VLB PATA support (Experimental)"
depends on ISA && EXPERIMENTAL
+ select PATA_LEGACY
help
Support for the Winbond W83759A controller on Vesa Local Bus
systems.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 158eaa961b1..d5df04a395c 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -89,7 +89,6 @@ obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
-obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
obj-$(CONFIG_PATA_PXA) += pata_pxa.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fe75d8befc3..013727b2041 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -60,6 +60,7 @@ enum board_ids {
board_ahci,
board_ahci_ign_iferr,
board_ahci_nosntf,
+ board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
board_ahci_mcp65,
@@ -132,6 +133,14 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_yes_fbs] =
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
/* by chipsets */
[board_ahci_mcp65] =
{
@@ -362,6 +371,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
+ { PCI_DEVICE(0x1b4b, 0x9123),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 7113c572447..474427b6f99 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -209,6 +209,7 @@ enum {
link offline */
AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
+ AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
/* ap->flags bits */
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 81e772a94d5..666850d31df 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -430,6 +430,12 @@ void ahci_save_initial_config(struct device *dev,
cap &= ~HOST_CAP_SNTF;
}
+ if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can do FBS, turning on CAP_FBS\n");
+ cap |= HOST_CAP_FBS;
+ }
+
if (force_port_map && port_map != force_port_map) {
dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, force_port_map);
@@ -2036,9 +2042,15 @@ static int ahci_port_start(struct ata_port *ap)
u32 cmd = readl(port_mmio + PORT_CMD);
if (cmd & PORT_CMD_FBSCP)
pp->fbs_supported = true;
- else
+ else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
+ dev_printk(KERN_INFO, dev,
+ "port %d can do FBS, forcing FBSCP\n",
+ ap->port_no);
+ pp->fbs_supported = true;
+ } else
dev_printk(KERN_WARNING, dev,
- "The port is not capable of FBS\n");
+ "port %d is not capable of FBS\n",
+ ap->port_no);
}
if (pp->fbs_supported) {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 7ef7c4f216f..c035b3d041e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5111,15 +5111,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
qc->flags |= ATA_QCFLAG_ACTIVE;
ap->qc_active |= 1 << qc->tag;
- /* We guarantee to LLDs that they will have at least one
+ /*
+ * We guarantee to LLDs that they will have at least one
* non-zero sg if the command is a data command.
*/
- BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
+ if (WARN_ON_ONCE(ata_is_data(prot) &&
+ (!qc->sg || !qc->n_elem || !qc->nbytes)))
+ goto sys_err;
if (ata_is_dma(prot) || (ata_is_pio(prot) &&
(ap->flags & ATA_FLAG_PIO_DMA)))
if (ata_sg_setup(qc))
- goto sg_err;
+ goto sys_err;
/* if device is sleeping, schedule reset and abort the link */
if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -5136,7 +5139,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
goto err;
return;
-sg_err:
+sys_err:
qc->err_mask |= AC_ERR_SYSTEM;
err:
ata_qc_complete(qc);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 674c1436491..3b82d8ef76f 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2735,10 +2735,6 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- /* see ata_dma_blacklisted() */
- BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
- qc->tf.protocol == ATAPI_PROT_DMA);
-
/* defer PIO handling to sff_qc_issue */
if (!ata_is_dma(qc->tf.protocol))
return ata_sff_qc_issue(qc);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 9f5da1c7454..905ff76d3cb 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -121,14 +121,8 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
if (pair) {
struct ata_timing tp;
-
ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
- if (pair->dma_mode) {
- ata_timing_compute(pair, pair->dma_mode,
- &tp, T, 0);
- ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP);
- }
}
}
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 9df1ff7e1ea..eaf194138f2 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -44,6 +44,9 @@
* Specific support is included for the ht6560a/ht6560b/opti82c611a/
* opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
*
+ * Support for the Winbond 83759A when operating in advanced mode.
+ * Multichip mode is not currently supported.
+ *
* Use the autospeed and pio_mask options with:
* Appian ADI/2 aka CLPD7220 or AIC25VL01.
* Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
@@ -135,12 +138,18 @@ static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
static int qdi; /* Set to probe QDI controllers */
-static int winbond; /* Set to probe Winbond controllers,
- give I/O port if non standard */
static int autospeed; /* Chip present which snoops speed changes */
static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
+#ifdef PATA_WINBOND_VLB_MODULE
+static int winbond = 1; /* Set to probe Winbond controllers,
+ give I/O port if non standard */
+#else
+static int winbond; /* Set to probe Winbond controllers,
+ give I/O port if non standard */
+#endif
+
/**
* legacy_probe_add - Add interface to probe list
* @port: Controller port
@@ -1297,6 +1306,7 @@ MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for legacy ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("pata_winbond");
module_param(probe_all, int, 0);
module_param(autospeed, int, 0);
@@ -1305,6 +1315,7 @@ module_param(ht6560b, int, 0);
module_param(opti82c611a, int, 0);
module_param(opti82c46x, int, 0);
module_param(qdi, int, 0);
+module_param(winbond, int, 0);
module_param(pio_mask, int, 0);
module_param(iordy_mask, int, 0);
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
deleted file mode 100644
index 6d8619b6f67..00000000000
--- a/drivers/ata/pata_winbond.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * pata_winbond.c - Winbond VLB ATA controllers
- * (C) 2006 Red Hat
- *
- * Support for the Winbond 83759A when operating in advanced mode.
- * Multichip mode is not currently supported.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <scsi/scsi_host.h>
-#include <linux/libata.h>
-#include <linux/platform_device.h>
-
-#define DRV_NAME "pata_winbond"
-#define DRV_VERSION "0.0.3"
-
-#define NR_HOST 4 /* Two winbond controllers, two channels each */
-
-struct winbond_data {
- unsigned long config;
- struct platform_device *platform_dev;
-};
-
-static struct ata_host *winbond_host[NR_HOST];
-static struct winbond_data winbond_data[NR_HOST];
-static int nr_winbond_host;
-
-#ifdef MODULE
-static int probe_winbond = 1;
-#else
-static int probe_winbond;
-#endif
-
-static DEFINE_SPINLOCK(winbond_lock);
-
-static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
-{
- unsigned long flags;
- spin_lock_irqsave(&winbond_lock, flags);
- outb(reg, port + 0x01);
- outb(val, port + 0x02);
- spin_unlock_irqrestore(&winbond_lock, flags);
-}
-
-static u8 winbond_readcfg(unsigned long port, u8 reg)
-{
- u8 val;
-
- unsigned long flags;
- spin_lock_irqsave(&winbond_lock, flags);
- outb(reg, port + 0x01);
- val = inb(port + 0x02);
- spin_unlock_irqrestore(&winbond_lock, flags);
-
- return val;
-}
-
-static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct winbond_data *winbond = ap->host->private_data;
- int active, recovery;
- u8 reg;
- int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
-
- reg = winbond_readcfg(winbond->config, 0x81);
-
- /* Get the timing data in cycles */
- if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
- ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
- else
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
- active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
- recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
- timing = (active << 4) | recovery;
- winbond_writecfg(winbond->config, timing, reg);
-
- /* Load the setup timing */
-
- reg = 0x35;
- if (adev->class != ATA_DEV_ATA)
- reg |= 0x08; /* FIFO off */
- if (!ata_pio_need_iordy(adev))
- reg |= 0x02; /* IORDY off */
- reg |= (clamp_val(t.setup, 0, 3) << 6);
- winbond_writecfg(winbond->config, timing + 1, reg);
-}
-
-
-static unsigned int winbond_data_xfer(struct ata_device *dev,
- unsigned char *buf, unsigned int buflen, int rw)
-{
- struct ata_port *ap = dev->link->ap;
- int slop = buflen & 3;
-
- if (ata_id_has_dword_io(dev->id)) {
- if (rw == READ)
- ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
- else
- iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-
- if (unlikely(slop)) {
- __le32 pad;
- if (rw == READ) {
- pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
- memcpy(buf + buflen - slop, &pad, slop);
- } else {
- memcpy(&pad, buf + buflen - slop, slop);
- iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
- }
- buflen += 4 - slop;
- }
- } else
- buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
-
- return buflen;
-}
-
-static struct scsi_host_template winbond_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations winbond_port_ops = {
- .inherits = &ata_sff_port_ops,
- .sff_data_xfer = winbond_data_xfer,
- .cable_detect = ata_cable_40wire,
- .set_piomode = winbond_set_piomode,
-};
-
-/**
- * winbond_init_one - attach a winbond interface
- * @type: Type to display
- * @io: I/O port start
- * @irq: interrupt line
- * @fast: True if on a > 33Mhz VLB
- *
- * Register a VLB bus IDE interface. Such interfaces are PIO and we
- * assume do not support IRQ sharing.
- */
-
-static __init int winbond_init_one(unsigned long port)
-{
- struct platform_device *pdev;
- u8 reg;
- int i, rc;
-
- reg = winbond_readcfg(port, 0x81);
- reg |= 0x80; /* jumpered mode off */
- winbond_writecfg(port, 0x81, reg);
- reg = winbond_readcfg(port, 0x83);
- reg |= 0xF0; /* local control */
- winbond_writecfg(port, 0x83, reg);
- reg = winbond_readcfg(port, 0x85);
- reg |= 0xF0; /* programmable timing */
- winbond_writecfg(port, 0x85, reg);
-
- reg = winbond_readcfg(port, 0x81);
-
- if (!(reg & 0x03)) /* Disabled */
- return -ENODEV;
-
- for (i = 0; i < 2 ; i ++) {
- unsigned long cmd_port = 0x1F0 - (0x80 * i);
- unsigned long ctl_port = cmd_port + 0x206;
- struct ata_host *host;
- struct ata_port *ap;
- void __iomem *cmd_addr, *ctl_addr;
-
- if (!(reg & (1 << i)))
- continue;
-
- pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- rc = -ENOMEM;
- host = ata_host_alloc(&pdev->dev, 1);
- if (!host)
- goto err_unregister;
- ap = host->ports[0];
-
- rc = -ENOMEM;
- cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
- ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
- if (!cmd_addr || !ctl_addr)
- goto err_unregister;
-
- ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
-
- ap->ops = &winbond_port_ops;
- ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_SLAVE_POSS;
- ap->ioaddr.cmd_addr = cmd_addr;
- ap->ioaddr.altstatus_addr = ctl_addr;
- ap->ioaddr.ctl_addr = ctl_addr;
- ata_sff_std_ports(&ap->ioaddr);
-
- /* hook in a private data structure per channel */
- host->private_data = &winbond_data[nr_winbond_host];
- winbond_data[nr_winbond_host].config = port;
- winbond_data[nr_winbond_host].platform_dev = pdev;
-
- /* activate */
- rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
- &winbond_sht);
- if (rc)
- goto err_unregister;
-
- winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
- }
-
- return 0;
-
- err_unregister:
- platform_device_unregister(pdev);
- return rc;
-}
-
-/**
- * winbond_init - attach winbond interfaces
- *
- * Attach winbond IDE interfaces by scanning the ports it may occupy.
- */
-
-static __init int winbond_init(void)
-{
- static const unsigned long config[2] = { 0x130, 0x1B0 };
-
- int ct = 0;
- int i;
-
- if (probe_winbond == 0)
- return -ENODEV;
-
- /*
- * Check both base addresses
- */
-
- for (i = 0; i < 2; i++) {
- if (probe_winbond & (1<<i)) {
- int ret = 0;
- unsigned long port = config[i];
-
- if (request_region(port, 2, "pata_winbond")) {
- ret = winbond_init_one(port);
- if (ret <= 0)
- release_region(port, 2);
- else ct+= ret;
- }
- }
- }
- if (ct != 0)
- return 0;
- return -ENODEV;
-}
-
-static __exit void winbond_exit(void)
-{
- int i;
-
- for (i = 0; i < nr_winbond_host; i++) {
- ata_host_detach(winbond_host[i]);
- release_region(winbond_data[i].config, 2);
- platform_device_unregister(winbond_data[i].platform_dev);
- }
-}
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-module_init(winbond_init);
-module_exit(winbond_exit);
-
-module_param(probe_winbond, int, 0);
-
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index ea24c1e51be..6cf57c5c2b5 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1459,7 +1459,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
{
struct scatterlist *sg = qc->sg;
struct ata_port *ap = qc->ap;
- u32 dma_chan;
+ int dma_chan;
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
int err;
@@ -1588,7 +1588,7 @@ static const struct ata_port_info sata_dwc_port_info[] = {
},
};
-static int sata_dwc_probe(struct of_device *ofdev,
+static int sata_dwc_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct sata_dwc_device *hsdev;
@@ -1702,7 +1702,7 @@ error_out:
return err;
}
-static int sata_dwc_remove(struct of_device *ofdev)
+static int sata_dwc_remove(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9463c71dd38..81982594a01 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+static void mv_bmdma_stop_ap(struct ata_port *ap)
{
- struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd;
/* clear start/stop bit */
cmd = readl(port_mmio + BMDMA_CMD);
- cmd &= ~ATA_DMA_START;
- writelfl(cmd, port_mmio + BMDMA_CMD);
+ if (cmd & ATA_DMA_START) {
+ cmd &= ~ATA_DMA_START;
+ writelfl(cmd, port_mmio + BMDMA_CMD);
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+ }
+}
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_sff_dma_pause(ap);
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ mv_bmdma_stop_ap(qc->ap);
}
/**
@@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
reg = readl(port_mmio + BMDMA_STATUS);
if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE;
- else
+ else if (reg & ATA_DMA_ERR)
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
+ else {
+ /*
+ * Just because DMA_ACTIVE is 0 (DMA completed),
+ * this does _not_ mean the device is "done".
+ * So we should not yet be signalling ATA_DMA_INTR
+ * in some cases. Eg. DSM/TRIM, and perhaps others.
+ */
+ mv_bmdma_stop_ap(ap);
+ if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
+ status = 0;
+ else
+ status = ATA_DMA_INTR;
+ }
return status;
}
@@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
switch (tf->protocol) {
case ATA_PROT_DMA:
+ if (tf->command == ATA_CMD_DSM)
+ return;
+ /* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
@@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
return;
+ if (tf->command == ATA_CMD_DSM)
+ return; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
+ if (qc->tf.command == ATA_CMD_DSM) {
+ if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
+ return AC_ERR_OTHER;
+ break; /* use bmdma for this */
+ }
+ /* fall thru */
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index c8a44f5e058..40af43ebd92 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -568,7 +568,7 @@ static int _request_firmware(const struct firmware **firmware_p,
out:
if (retval) {
release_firmware(firmware);
- firmware_p = NULL;
+ *firmware_p = NULL;
}
return retval;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ac1b682edec..ab735a605cf 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -834,7 +834,7 @@ static int blkfront_probe(struct xenbus_device *dev,
char *type;
int len;
/* no unplug has been done: do not hook devices != xen vbds */
- if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) {
+ if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
int major;
if (!VDEV_IS_EXTENDED(vdevice))
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 2982b3ee946..057413bb16e 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -94,6 +94,7 @@
#include <linux/hdreg.h>
#include <linux/platform_device.h>
#if defined(CONFIG_OF)
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#endif
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index ddf5def1b0d..eab58db5f91 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,6 +12,7 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
+#include <linux/intel-gtt.h>
#include "intel-gtt.c"
@@ -815,9 +816,19 @@ static const struct intel_driver_description {
"HD Graphics", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
"Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
"Sandybridge", NULL, &intel_gen6_driver },
{ 0, 0, NULL, NULL, NULL }
};
@@ -825,7 +836,8 @@ static const struct intel_driver_description {
static int __devinit intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge)
{
- int i;
+ int i, mask;
+
bridge->driver = NULL;
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
@@ -845,14 +857,19 @@ static int __devinit intel_gmch_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
- if (bridge->driver->mask_memory == intel_i965_mask_memory) {
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask 36bit failed!\n");
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(36));
- }
+ if (bridge->driver->mask_memory == intel_gen6_mask_memory)
+ mask = 40;
+ else if (bridge->driver->mask_memory == intel_i965_mask_memory)
+ mask = 36;
+ else
+ mask = 32;
+
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask %d-bit failed!\n", mask);
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(mask));
return 1;
}
@@ -1036,6 +1053,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
{ }
};
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c05e3e51826..ee189c74d34 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -1,6 +1,8 @@
/*
* Common Intel AGPGART and GTT definitions.
*/
+#ifndef _INTEL_AGP_H
+#define _INTEL_AGP_H
/* Intel registers */
#define INTEL_APSIZE 0xb4
@@ -200,10 +202,16 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -230,7 +238,8 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -243,3 +252,5 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
IS_SNB)
+
+#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index d22ffb811bf..75e0a349788 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -49,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] =
.type = INTEL_AGP_CACHED_MEMORY}
};
+#define INTEL_AGP_UNCACHED_MEMORY 0
+#define INTEL_AGP_CACHED_MEMORY_LLC 1
+#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
+#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
+#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
+
+static struct gatt_mask intel_gen6_masks[] =
+{
+ {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
+ .type = INTEL_AGP_UNCACHED_MEMORY },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
+};
+
static struct _intel_private {
struct pci_dev *pcidev; /* device one */
u8 __iomem *registers;
@@ -178,13 +198,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
off_t pg_start, int mask_type)
{
int i, j;
- u32 cache_bits = 0;
-
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
- {
- cache_bits = GEN6_PTE_LLC_MLC;
- }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -317,6 +330,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
return 0;
}
+static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type)
+{
+ unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
+
+ if (type_mask == AGP_USER_UNCACHED_MEMORY)
+ return INTEL_AGP_UNCACHED_MEMORY;
+ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
+ return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
+ INTEL_AGP_CACHED_MEMORY_LLC_MLC;
+ else /* set 'normal'/'cached' to LLC by default */
+ return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
+ INTEL_AGP_CACHED_MEMORY_LLC;
+}
+
+
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
@@ -588,8 +618,7 @@ static void intel_i830_init_gtt_entries(void)
gtt_entries = 0;
break;
}
- } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+ } else if (IS_SNB) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
@@ -1068,11 +1097,11 @@ static void intel_i9xx_setup_flush(void)
intel_i915_setup_chipset_flush();
}
- if (intel_private.ifp_resource.start) {
+ if (intel_private.ifp_resource.start)
intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
- if (!intel_private.i9xx_flush_page)
- dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
- }
+ if (!intel_private.i9xx_flush_page)
+ dev_err(&intel_private.pcidev->dev,
+ "can't ioremap flush page - no chipset flushing\n");
}
static int intel_i9xx_configure(void)
@@ -1163,7 +1192,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
mask_type != INTEL_AGP_CACHED_MEMORY)
goto out_err;
@@ -1333,8 +1362,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
dma_addr_t addr, int type)
{
- /* Shift high bits down */
- addr |= (addr >> 28) & 0xff;
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
/* Type checking must be done elsewhere */
return addr | bridge->driver->masks[type].mask;
@@ -1359,6 +1388,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
break;
case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
*gtt_offset = MB(2);
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -1563,7 +1593,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
.fetch_size = intel_i9xx_fetch_size,
.cleanup = intel_i915_cleanup,
.mask_memory = intel_gen6_mask_memory,
- .masks = intel_i810_masks,
+ .masks = intel_gen6_masks,
.agp_enable = intel_i810_agp_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = intel_i965_create_gatt_table,
@@ -1576,7 +1606,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush,
#ifdef USE_PCI_DMA_API
.agp_map_page = intel_agp_map_page,
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index e0249722d25..f953c96efc8 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -159,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
if (hangcheck_dump_tasks) {
printk(KERN_CRIT "Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
- handle_sysrq('t', NULL);
+ handle_sysrq('t');
#endif /* CONFIG_MAGIC_SYSRQ */
}
if (hangcheck_reboot) {
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index fa27d1676ee..3afd62e856e 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -651,7 +651,7 @@ int hvc_poll(struct hvc_struct *hp)
if (sysrq_pressed)
continue;
} else if (sysrq_pressed) {
- handle_sysrq(buf[i], tty);
+ handle_sysrq(buf[i]);
sysrq_pressed = 0;
continue;
}
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 1f4b6de65a2..a2bc885ce60 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -403,7 +403,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
hp->sysrq = 1;
continue;
} else if (hp->sysrq) {
- handle_sysrq(c, hp->tty);
+ handle_sysrq(c);
hp->sysrq = 0;
continue;
}
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 1acdb250951..a3f5e381e74 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np)
static int n2rng_data_read(struct hwrng *rng, u32 *data)
{
- struct n2rng *np = rng->priv;
+ struct n2rng *np = (struct n2rng *) rng->priv;
unsigned long ra = __pa(&np->test_data);
int len;
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 07f3ea38b58..d4b71e8d0d2 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -1650,7 +1650,7 @@ ip2_close( PTTY tty, struct file *pFile )
/* disable DSS reporting */
i2QueueCommands(PTYPE_INLINE, pCh, 100, 4,
CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
- if ( !tty || (tty->termios->c_cflag & HUPCL) ) {
+ if (tty->termios->c_cflag & HUPCL) {
i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN);
pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
@@ -2930,6 +2930,8 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg )
if ( pCh )
{
rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
+ if (rc)
+ rc = -EFAULT;
} else {
rc = -ENODEV;
}
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index ad46eae1f9b..c350d01716b 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -675,8 +675,8 @@ static int ptmx_open(struct inode *inode, struct file *filp)
}
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- filp->private_data = tty;
- file_move(filp, &tty->tty_files);
+
+ tty_add_file(tty, filp);
retval = devpts_pty_new(inode, tty->link);
if (retval)
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 79c3bc69165..7c79d243acc 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -1244,6 +1244,7 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
}
info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
configure_r_port(tty, info, NULL);
+ mutex_unlock(&info->port.mutex);
return 0;
}
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index fef80cfcab5..e63b830c86c 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -691,8 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
if (info->port.count == 1) {
/* 1st open on this device, init hardware */
retval = startup(info);
- if (retval < 0)
+ if (retval < 0) {
+ mutex_unlock(&info->port.mutex);
goto cleanup;
+ }
}
mutex_unlock(&info->port.mutex);
retval = block_til_ready(tty, filp, info);
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 878ac0c2cc6..ef31bb81e84 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -18,7 +18,6 @@
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/fs.h>
-#include <linux/tty.h>
#include <linux/mount.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
@@ -76,7 +75,7 @@ static int __init sysrq_always_enabled_setup(char *str)
__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
-static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
+static void sysrq_handle_loglevel(int key)
{
int i;
@@ -93,7 +92,7 @@ static struct sysrq_key_op sysrq_loglevel_op = {
};
#ifdef CONFIG_VT
-static void sysrq_handle_SAK(int key, struct tty_struct *tty)
+static void sysrq_handle_SAK(int key)
{
struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
schedule_work(SAK_work);
@@ -109,7 +108,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
#endif
#ifdef CONFIG_VT
-static void sysrq_handle_unraw(int key, struct tty_struct *tty)
+static void sysrq_handle_unraw(int key)
{
struct kbd_struct *kbd = &kbd_table[fg_console];
@@ -126,7 +125,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
#endif /* CONFIG_VT */
-static void sysrq_handle_crash(int key, struct tty_struct *tty)
+static void sysrq_handle_crash(int key)
{
char *killer = NULL;
@@ -141,7 +140,7 @@ static struct sysrq_key_op sysrq_crash_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_reboot(int key, struct tty_struct *tty)
+static void sysrq_handle_reboot(int key)
{
lockdep_off();
local_irq_enable();
@@ -154,7 +153,7 @@ static struct sysrq_key_op sysrq_reboot_op = {
.enable_mask = SYSRQ_ENABLE_BOOT,
};
-static void sysrq_handle_sync(int key, struct tty_struct *tty)
+static void sysrq_handle_sync(int key)
{
emergency_sync();
}
@@ -165,7 +164,7 @@ static struct sysrq_key_op sysrq_sync_op = {
.enable_mask = SYSRQ_ENABLE_SYNC,
};
-static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
+static void sysrq_handle_show_timers(int key)
{
sysrq_timer_list_show();
}
@@ -176,7 +175,7 @@ static struct sysrq_key_op sysrq_show_timers_op = {
.action_msg = "Show clockevent devices & pending hrtimers (no others)",
};
-static void sysrq_handle_mountro(int key, struct tty_struct *tty)
+static void sysrq_handle_mountro(int key)
{
emergency_remount();
}
@@ -188,7 +187,7 @@ static struct sysrq_key_op sysrq_mountro_op = {
};
#ifdef CONFIG_LOCKDEP
-static void sysrq_handle_showlocks(int key, struct tty_struct *tty)
+static void sysrq_handle_showlocks(int key)
{
debug_show_all_locks();
}
@@ -226,7 +225,7 @@ static void sysrq_showregs_othercpus(struct work_struct *dummy)
static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
-static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
+static void sysrq_handle_showallcpus(int key)
{
/*
* Fall back to the workqueue based printing if the
@@ -252,7 +251,7 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
};
#endif
-static void sysrq_handle_showregs(int key, struct tty_struct *tty)
+static void sysrq_handle_showregs(int key)
{
struct pt_regs *regs = get_irq_regs();
if (regs)
@@ -266,7 +265,7 @@ static struct sysrq_key_op sysrq_showregs_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_showstate(int key, struct tty_struct *tty)
+static void sysrq_handle_showstate(int key)
{
show_state();
}
@@ -277,7 +276,7 @@ static struct sysrq_key_op sysrq_showstate_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
+static void sysrq_handle_showstate_blocked(int key)
{
show_state_filter(TASK_UNINTERRUPTIBLE);
}
@@ -291,7 +290,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
#ifdef CONFIG_TRACING
#include <linux/ftrace.h>
-static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
+static void sysrq_ftrace_dump(int key)
{
ftrace_dump(DUMP_ALL);
}
@@ -305,7 +304,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
#endif
-static void sysrq_handle_showmem(int key, struct tty_struct *tty)
+static void sysrq_handle_showmem(int key)
{
show_mem();
}
@@ -330,7 +329,7 @@ static void send_sig_all(int sig)
}
}
-static void sysrq_handle_term(int key, struct tty_struct *tty)
+static void sysrq_handle_term(int key)
{
send_sig_all(SIGTERM);
console_loglevel = 8;
@@ -349,7 +348,7 @@ static void moom_callback(struct work_struct *ignored)
static DECLARE_WORK(moom_work, moom_callback);
-static void sysrq_handle_moom(int key, struct tty_struct *tty)
+static void sysrq_handle_moom(int key)
{
schedule_work(&moom_work);
}
@@ -361,7 +360,7 @@ static struct sysrq_key_op sysrq_moom_op = {
};
#ifdef CONFIG_BLOCK
-static void sysrq_handle_thaw(int key, struct tty_struct *tty)
+static void sysrq_handle_thaw(int key)
{
emergency_thaw_all();
}
@@ -373,7 +372,7 @@ static struct sysrq_key_op sysrq_thaw_op = {
};
#endif
-static void sysrq_handle_kill(int key, struct tty_struct *tty)
+static void sysrq_handle_kill(int key)
{
send_sig_all(SIGKILL);
console_loglevel = 8;
@@ -385,7 +384,7 @@ static struct sysrq_key_op sysrq_kill_op = {
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
-static void sysrq_handle_unrt(int key, struct tty_struct *tty)
+static void sysrq_handle_unrt(int key)
{
normalize_rt_tasks();
}
@@ -493,7 +492,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
sysrq_key_table[i] = op_p;
}
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
+void __handle_sysrq(int key, bool check_mask)
{
struct sysrq_key_op *op_p;
int orig_log_level;
@@ -520,7 +519,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
printk("%s\n", op_p->action_msg);
console_loglevel = orig_log_level;
- op_p->handler(key, tty);
+ op_p->handler(key);
} else {
printk("This sysrq operation is disabled.\n");
}
@@ -545,10 +544,10 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
-void handle_sysrq(int key, struct tty_struct *tty)
+void handle_sysrq(int key)
{
if (sysrq_on())
- __handle_sysrq(key, tty, 1);
+ __handle_sysrq(key, true);
}
EXPORT_SYMBOL(handle_sysrq);
@@ -597,7 +596,7 @@ static bool sysrq_filter(struct input_handle *handle, unsigned int type,
default:
if (sysrq_down && value && value != 2)
- __handle_sysrq(sysrq_xlate[code], NULL, 1);
+ __handle_sysrq(sysrq_xlate[code], true);
break;
}
@@ -765,7 +764,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
if (get_user(c, buf))
return -EFAULT;
- __handle_sysrq(c, NULL, 0);
+ __handle_sysrq(c, false);
}
return count;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 0350c42375a..613c852ee0f 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -136,6 +136,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
DEFINE_MUTEX(tty_mutex);
EXPORT_SYMBOL(tty_mutex);
+/* Spinlock to protect the tty->tty_files list */
+DEFINE_SPINLOCK(tty_files_lock);
+
static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -185,6 +188,41 @@ void free_tty_struct(struct tty_struct *tty)
kfree(tty);
}
+static inline struct tty_struct *file_tty(struct file *file)
+{
+ return ((struct tty_file_private *)file->private_data)->tty;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+ struct tty_file_private *priv;
+
+ /* XXX: must implement proper error handling in callers */
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL);
+
+ priv->tty = tty;
+ priv->file = file;
+ file->private_data = priv;
+
+ spin_lock(&tty_files_lock);
+ list_add(&priv->list, &tty->tty_files);
+ spin_unlock(&tty_files_lock);
+}
+
+/* Delete file from its tty */
+void tty_del_file(struct file *file)
+{
+ struct tty_file_private *priv = file->private_data;
+
+ spin_lock(&tty_files_lock);
+ list_del(&priv->list);
+ spin_unlock(&tty_files_lock);
+ file->private_data = NULL;
+ kfree(priv);
+}
+
+
#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
/**
@@ -235,11 +273,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
struct list_head *p;
int count = 0;
- file_list_lock();
+ spin_lock(&tty_files_lock);
list_for_each(p, &tty->tty_files) {
count++;
}
- file_list_unlock();
+ spin_unlock(&tty_files_lock);
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_SLAVE &&
tty->link && tty->link->count)
@@ -317,7 +355,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
if (*stp == '\0')
stp = NULL;
- if (tty_line >= 0 && tty_line <= p->num && p->ops &&
+ if (tty_line >= 0 && tty_line < p->num && p->ops &&
p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
res = tty_driver_kref_get(p);
*line = tty_line;
@@ -497,6 +535,7 @@ void __tty_hangup(struct tty_struct *tty)
struct file *cons_filp = NULL;
struct file *filp, *f = NULL;
struct task_struct *p;
+ struct tty_file_private *priv;
int closecount = 0, n;
unsigned long flags;
int refs = 0;
@@ -506,7 +545,7 @@ void __tty_hangup(struct tty_struct *tty)
spin_lock(&redirect_lock);
- if (redirect && redirect->private_data == tty) {
+ if (redirect && file_tty(redirect) == tty) {
f = redirect;
redirect = NULL;
}
@@ -519,9 +558,10 @@ void __tty_hangup(struct tty_struct *tty)
workqueue with the lock held */
check_tty_count(tty, "tty_hangup");
- file_list_lock();
+ spin_lock(&tty_files_lock);
/* This breaks for file handles being sent over AF_UNIX sockets ? */
- list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) {
+ list_for_each_entry(priv, &tty->tty_files, list) {
+ filp = priv->file;
if (filp->f_op->write == redirected_tty_write)
cons_filp = filp;
if (filp->f_op->write != tty_write)
@@ -530,7 +570,7 @@ void __tty_hangup(struct tty_struct *tty)
__tty_fasync(-1, filp, 0); /* can't block */
filp->f_op = &hung_up_tty_fops;
}
- file_list_unlock();
+ spin_unlock(&tty_files_lock);
tty_ldisc_hangup(tty);
@@ -889,12 +929,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
int i;
- struct tty_struct *tty;
- struct inode *inode;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
- tty = file->private_data;
- inode = file->f_path.dentry->d_inode;
if (tty_paranoia_check(tty, inode, "tty_read"))
return -EIO;
if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
@@ -1065,12 +1103,11 @@ void tty_write_message(struct tty_struct *tty, char *msg)
static ssize_t tty_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct tty_struct *tty;
struct inode *inode = file->f_path.dentry->d_inode;
+ struct tty_struct *tty = file_tty(file);
+ struct tty_ldisc *ld;
ssize_t ret;
- struct tty_ldisc *ld;
- tty = file->private_data;
if (tty_paranoia_check(tty, inode, "tty_write"))
return -EIO;
if (!tty || !tty->ops->write ||
@@ -1424,9 +1461,9 @@ static void release_one_tty(struct work_struct *work)
tty_driver_kref_put(driver);
module_put(driver->owner);
- file_list_lock();
+ spin_lock(&tty_files_lock);
list_del_init(&tty->tty_files);
- file_list_unlock();
+ spin_unlock(&tty_files_lock);
put_pid(tty->pgrp);
put_pid(tty->session);
@@ -1507,13 +1544,13 @@ static void release_tty(struct tty_struct *tty, int idx)
int tty_release(struct inode *inode, struct file *filp)
{
- struct tty_struct *tty, *o_tty;
+ struct tty_struct *tty = file_tty(filp);
+ struct tty_struct *o_tty;
int pty_master, tty_closing, o_tty_closing, do_sleep;
int devpts;
int idx;
char buf[64];
- tty = filp->private_data;
if (tty_paranoia_check(tty, inode, "tty_release_dev"))
return 0;
@@ -1671,8 +1708,7 @@ int tty_release(struct inode *inode, struct file *filp)
* - do_tty_hangup no longer sees this file descriptor as
* something that needs to be handled for hangups.
*/
- file_kill(filp);
- filp->private_data = NULL;
+ tty_del_file(filp);
/*
* Perform some housekeeping before deciding whether to return.
@@ -1839,8 +1875,8 @@ got_driver:
return PTR_ERR(tty);
}
- filp->private_data = tty;
- file_move(filp, &tty->tty_files);
+ tty_add_file(tty, filp);
+
check_tty_count(tty, "tty_open");
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
@@ -1916,11 +1952,10 @@ got_driver:
static unsigned int tty_poll(struct file *filp, poll_table *wait)
{
- struct tty_struct *tty;
+ struct tty_struct *tty = file_tty(filp);
struct tty_ldisc *ld;
int ret = 0;
- tty = filp->private_data;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
return 0;
@@ -1933,11 +1968,10 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
static int __tty_fasync(int fd, struct file *filp, int on)
{
- struct tty_struct *tty;
+ struct tty_struct *tty = file_tty(filp);
unsigned long flags;
int retval = 0;
- tty = filp->private_data;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
goto out;
@@ -2491,13 +2525,13 @@ EXPORT_SYMBOL(tty_pair_get_pty);
*/
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct tty_struct *tty, *real_tty;
+ struct tty_struct *tty = file_tty(file);
+ struct tty_struct *real_tty;
void __user *p = (void __user *)arg;
int retval;
struct tty_ldisc *ld;
struct inode *inode = file->f_dentry->d_inode;
- tty = file->private_data;
if (tty_paranoia_check(tty, inode, "tty_ioctl"))
return -EINVAL;
@@ -2619,7 +2653,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct inode *inode = file->f_dentry->d_inode;
- struct tty_struct *tty = file->private_data;
+ struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
int retval = -ENOIOCTLCMD;
@@ -2711,7 +2745,7 @@ void __do_SAK(struct tty_struct *tty)
if (!filp)
continue;
if (filp->f_op->read == tty_read &&
- filp->private_data == tty) {
+ file_tty(filp) == tty) {
printk(KERN_NOTICE "SAK: killed process %d"
" (%s): fd#%d opened to the tty\n",
task_pid_nr(p), p->comm, i);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index c734f9b1263..281aada7b4a 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -194,10 +194,11 @@ static DECLARE_WORK(console_work, console_callback);
int fg_console;
int last_console;
int want_console = -1;
-int saved_fg_console;
-int saved_last_console;
-int saved_want_console;
-int saved_vc_mode;
+static int saved_fg_console;
+static int saved_last_console;
+static int saved_want_console;
+static int saved_vc_mode;
+static int saved_console_blanked;
/*
* For each existing display, we have a pointer to console currently visible
@@ -905,22 +906,16 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
* bottom of buffer
*/
old_origin += (old_rows - new_rows) * old_row_size;
- end = vc->vc_scr_end;
} else {
/*
* Cursor is in no man's land, copy 1/2 screenful
* from the top and bottom of cursor position
*/
old_origin += (vc->vc_y - new_rows/2) * old_row_size;
- end = old_origin + (old_row_size * new_rows);
}
- } else
- /*
- * Cursor near the top, copy contents from the top of buffer
- */
- end = (old_rows > new_rows) ? old_origin +
- (old_row_size * new_rows) :
- vc->vc_scr_end;
+ }
+
+ end = old_origin + old_row_size * min(old_rows, new_rows);
update_attr(vc);
@@ -3074,8 +3069,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
old_was_color = vc->vc_can_do_color;
vc->vc_sw->con_deinit(vc);
- if (!vc->vc_origin)
- vc->vc_origin = (unsigned long)vc->vc_screenbuf;
+ vc->vc_origin = (unsigned long)vc->vc_screenbuf;
visual_init(vc, i, 0);
set_origin(vc);
update_attr(vc);
@@ -3449,6 +3443,7 @@ int con_debug_enter(struct vc_data *vc)
saved_last_console = last_console;
saved_want_console = want_console;
saved_vc_mode = vc->vc_mode;
+ saved_console_blanked = console_blanked;
vc->vc_mode = KD_TEXT;
console_blanked = 0;
if (vc->vc_sw->con_debug_enter)
@@ -3492,6 +3487,7 @@ int con_debug_leave(void)
fg_console = saved_fg_console;
last_console = saved_last_console;
want_console = saved_want_console;
+ console_blanked = saved_console_blanked;
vc_cons[fg_console].d->vc_mode = saved_vc_mode;
vc = vc_cons[fg_console].d;
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 0ed763cd2e7..b663d573aad 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -94,6 +94,7 @@
#ifdef CONFIG_OF
/* For open firmware. */
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#endif
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 670239ab751..e7d5d6b5dcf 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2071,16 +2071,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
amd64_handle_ce(mci, info);
else if (ecc_type == 1)
amd64_handle_ue(mci, info);
-
- /*
- * If main error is CE then overflow must be CE. If main error is UE
- * then overflow is unknown. We'll call the overflow a CE - if
- * panic_on_ue is set then we're already panic'ed and won't arrive
- * here. Else, then apparently someone doesn't think that UE's are
- * catastrophic.
- */
- if (info->nbsh & K8_NBSH_OVERFLOW)
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow");
}
void amd64_decode_bus_error(int node_id, struct err_regs *regs)
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index bae9351e947..9014df6f605 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -365,11 +365,10 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
pr_emerg("MC%d_STATUS: ", m->bank);
- pr_cont("%sorrected error, report: %s, MiscV: %svalid, "
+ pr_cont("%sorrected error, other errors lost: %s, "
"CPU context corrupt: %s",
((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
- ((m->status & MCI_STATUS_EN) ? "yes" : "no"),
- ((m->status & MCI_STATUS_MISCV) ? "" : "in"),
+ ((m->status & MCI_STATUS_OVER) ? "yes" : "no"),
((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
/* do the two bits[14:13] together */
@@ -426,11 +425,15 @@ static struct notifier_block amd_mce_dec_nb = {
static int __init mce_amd_init(void)
{
/*
- * We can decode MCEs for Opteron and later CPUs:
+ * We can decode MCEs for K8, F10h and F11h CPUs:
*/
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
- (boot_cpu_data.x86 >= 0xf))
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return 0;
+
+ if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
+ return 0;
+
+ atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
return 0;
}
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index ca7ca56661e..b42a0bde849 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,6 +81,10 @@ static int close_transaction(struct fw_transaction *transaction,
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
+ if (!del_timer(&t->split_timeout_timer)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ goto timed_out;
+ }
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
@@ -89,11 +93,11 @@ static int close_transaction(struct fw_transaction *transaction,
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
- del_timer_sync(&t->split_timeout_timer);
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
+ timed_out:
return -ENOENT;
}
@@ -921,6 +925,10 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
+ if (!del_timer(&t->split_timeout_timer)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ goto timed_out;
+ }
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
@@ -929,6 +937,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link == &card->transaction_list) {
+ timed_out:
fw_notify("Unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
return;
@@ -963,8 +972,6 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
- del_timer_sync(&t->split_timeout_timer);
-
/*
* The response handler may be executed while the request handler
* is still pending. Cancel the request handler.
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 51e8a35ebd7..18fdd9703b4 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -578,7 +578,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
if (!peer) {
fw_notify("No peer for ARP packet from %016llx\n",
(unsigned long long)peer_guid);
- goto failed_proto;
+ goto no_peer;
}
/*
@@ -655,7 +655,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
return 0;
- failed_proto:
+ no_peer:
net->stats.rx_errors++;
net->stats.rx_dropped++;
@@ -663,7 +663,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
if (netif_queue_stopped(net))
netif_wake_queue(net);
- return 0;
+ return -ENOENT;
}
static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
@@ -700,7 +700,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fw_error("out of memory\n");
net->stats.rx_dropped++;
- return -1;
+ return -ENOMEM;
}
skb_reserve(skb, (net->hard_header_len + 15) & ~15);
memcpy(skb_put(skb, len), buf, len);
@@ -725,8 +725,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
spin_lock_irqsave(&dev->lock, flags);
peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
- if (!peer)
- goto bad_proto;
+ if (!peer) {
+ retval = -ENOENT;
+ goto fail;
+ }
pd = fwnet_pd_find(peer, datagram_label);
if (pd == NULL) {
@@ -740,7 +742,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
dg_size, buf, fg_off, len);
if (pd == NULL) {
retval = -ENOMEM;
- goto bad_proto;
+ goto fail;
}
peer->pdg_size++;
} else {
@@ -754,9 +756,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
pd = fwnet_pd_new(net, peer, datagram_label,
dg_size, buf, fg_off, len);
if (pd == NULL) {
- retval = -ENOMEM;
peer->pdg_size--;
- goto bad_proto;
+ retval = -ENOMEM;
+ goto fail;
}
} else {
if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
@@ -767,7 +769,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
*/
fwnet_pd_delete(pd);
peer->pdg_size--;
- goto bad_proto;
+ retval = -ENOMEM;
+ goto fail;
}
}
} /* new datagram or add to existing one */
@@ -793,14 +796,13 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
-
- bad_proto:
+ fail:
spin_unlock_irqrestore(&dev->lock, flags);
if (netif_queue_stopped(net))
netif_wake_queue(net);
- return 0;
+ return retval;
}
static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7f03540cabe..be29b0bb247 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -694,7 +694,15 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
log_ar_at_event('R', p.speed, p.header, evt);
/*
- * The OHCI bus reset handler synthesizes a phy packet with
+ * Several controllers, notably from NEC and VIA, forget to
+ * write ack_complete status at PHY packet reception.
+ */
+ if (evt == OHCI1394_evt_no_status &&
+ (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
+ p.ack = ACK_COMPLETE;
+
+ /*
+ * The OHCI bus reset handler synthesizes a PHY packet with
* the new generation number when a bus reset happens (see
* section 8.4.2.3). This helps us determine when a request
* was received and make sure we send the response in the same
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 9f76171717e..bfae4b30979 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -450,7 +450,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
if (&orb->link != &lu->orb_list) {
orb->callback(orb, &status);
- kref_put(&orb->kref, free_orb);
+ kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
fw_error("status write for unknown orb\n");
}
@@ -472,20 +472,28 @@ static void complete_transaction(struct fw_card *card, int rcode,
* So this callback only sets the rcode if it hasn't already
* been set and only does the cleanup if the transaction
* failed and we didn't already get a status write.
+ *
+ * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
+ * OXUF936QSE firmwares occasionally respond after Split_Timeout and
+ * complete the ORB just fine. Note, we also get RCODE_CANCELLED
+ * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
*/
spin_lock_irqsave(&card->lock, flags);
if (orb->rcode == -1)
orb->rcode = rcode;
- if (orb->rcode != RCODE_COMPLETE) {
+
+ if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
list_del(&orb->link);
spin_unlock_irqrestore(&card->lock, flags);
+
orb->callback(orb, NULL);
+ kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
spin_unlock_irqrestore(&card->lock, flags);
}
- kref_put(&orb->kref, free_orb);
+ kref_put(&orb->kref, free_orb); /* transaction callback reference */
}
static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
@@ -501,9 +509,8 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
list_add_tail(&orb->link, &lu->orb_list);
spin_unlock_irqrestore(&device->card->lock, flags);
- /* Take a ref for the orb list and for the transaction callback. */
- kref_get(&orb->kref);
- kref_get(&orb->kref);
+ kref_get(&orb->kref); /* transaction callback reference */
+ kref_get(&orb->kref); /* orb callback reference */
fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
node_id, generation, device->max_speed, offset,
@@ -525,11 +532,11 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
list_for_each_entry_safe(orb, next, &list, link) {
retval = 0;
- if (fw_cancel_transaction(device->card, &orb->t) == 0)
- continue;
+ fw_cancel_transaction(device->card, &orb->t);
orb->rcode = RCODE_CANCELLED;
orb->callback(orb, NULL);
+ kref_put(&orb->kref, free_orb); /* orb callback reference */
}
return retval;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7e31d434834..d2ab01e90a9 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,9 @@
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
+static bool drm_kms_helper_poll = true;
+module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
@@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->status = connector_status_disconnected;
if (connector->funcs->force)
connector->funcs->force(connector);
- } else
+ } else {
connector->status = connector->funcs->detect(connector);
+ drm_helper_hpd_irq_event(dev);
+ }
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
}
count = (*connector_funcs->get_modes)(connector);
- if (!count) {
+ if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
- if (!count)
- return 0;
- }
+ if (count == 0)
+ goto prune;
drm_mode_connector_list_update(connector);
@@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work)
enum drm_connector_status old_status, status;
bool repoll = false, changed = false;
+ if (!drm_kms_helper_poll)
+ return;
+
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
bool poll = false;
struct drm_connector *connector;
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ return;
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
poll = true;
@@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
+
/* kill timer and schedule immediate execution, this doesn't block */
cancel_delayed_work(&dev->mode_config.output_poll_work);
- queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+ if (drm_kms_helper_poll)
+ queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 90288ec7c28..84da748555b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -55,6 +55,9 @@
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+
/** Ioctl table */
static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
@@ -421,6 +424,7 @@ long drm_ioctl(struct file *filp,
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
+ unsigned int usize, asize;
dev = file_priv->minor->dev;
atomic_inc(&dev->ioctl_count);
@@ -436,11 +440,18 @@ long drm_ioctl(struct file *filp,
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ u32 drv_size;
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ drv_size = _IOC_SIZE(ioctl->cmd_drv);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+ }
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
ioctl = &drm_ioctls[nr];
cmd = ioctl->cmd;
+ usize = asize = _IOC_SIZE(cmd);
} else
goto err_i1;
@@ -460,10 +471,10 @@ long drm_ioctl(struct file *filp,
retcode = -EACCES;
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
- if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
+ if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
- kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+ kdata = kmalloc(asize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
@@ -473,11 +484,13 @@ long drm_ioctl(struct file *filp,
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg,
- _IOC_SIZE(cmd)) != 0) {
+ usize) != 0) {
retcode = -EFAULT;
goto err_i1;
}
- }
+ } else
+ memset(kdata, 0, usize);
+
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
else {
@@ -488,7 +501,7 @@ long drm_ioctl(struct file *filp,
if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
- _IOC_SIZE(cmd)) != 0)
+ usize) != 0)
retcode = -EFAULT;
}
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index de82e201d68..6a5e403f9aa 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -94,10 +94,11 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_connector *connector = fb_helper_conn->connector;
+ struct drm_connector *connector;
if (!fb_helper_conn)
return false;
+ connector = fb_helper_conn->connector;
cmdline_mode = &fb_helper_conn->cmdline_mode;
if (!mode_option)
@@ -369,7 +370,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
-static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3)
+static void drm_fb_helper_sysrq(int dummy1)
{
schedule_work(&drm_fb_helper_restore_work);
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a652a65546..b744dad5c23 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -41,6 +41,7 @@
/* from BKL pushdown: note that nothing else serializes idr_find() */
DEFINE_MUTEX(drm_global_mutex);
+EXPORT_SYMBOL(drm_global_mutex);
static int drm_open_helper(struct inode *inode, struct file *filp,
struct drm_device * dev);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index e2f70a516c3..9bf93bc9a32 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -92,7 +92,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
/* Contention */
+ mutex_unlock(&drm_global_mutex);
schedule();
+ mutex_lock(&drm_global_mutex);
if (signal_pending(current)) {
ret = -EINTR;
break;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index da99edc5088..a6bfc302ed9 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -285,21 +285,21 @@ void drm_mm_put_block(struct drm_mm_node *cur)
EXPORT_SYMBOL(drm_mm_put_block);
-static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
- unsigned alignment)
+static int check_free_hole(unsigned long start, unsigned long end,
+ unsigned long size, unsigned alignment)
{
unsigned wasted = 0;
- if (entry->size < size)
+ if (end - start < size)
return 0;
if (alignment) {
- register unsigned tmp = entry->start % alignment;
+ unsigned tmp = start % alignment;
if (tmp)
wasted = alignment - tmp;
}
- if (entry->size >= size + wasted) {
+ if (end >= start + size + wasted) {
return 1;
}
@@ -320,7 +320,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
best_size = ~0UL;
list_for_each_entry(entry, &mm->free_stack, free_stack) {
- if (!check_free_mm_node(entry, size, alignment))
+ if (!check_free_hole(entry->start, entry->start + entry->size,
+ size, alignment))
continue;
if (!best_match)
@@ -353,10 +354,12 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
best_size = ~0UL;
list_for_each_entry(entry, &mm->free_stack, free_stack) {
- if (entry->start > end || (entry->start+entry->size) < start)
- continue;
+ unsigned long adj_start = entry->start < start ?
+ start : entry->start;
+ unsigned long adj_end = entry->start + entry->size > end ?
+ end : entry->start + entry->size;
- if (!check_free_mm_node(entry, size, alignment))
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
if (!best_match)
@@ -449,7 +452,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->free_stack.prev = prev_free;
node->free_stack.next = next_free;
- if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
+ if (check_free_hole(node->start, node->start + node->size,
+ mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = node->start;
mm->scan_hit_size = node->size;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f1f473ea97d..949326d2a8e 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -251,7 +251,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
/* Fill in HSync values */
drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
- drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
+ drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+ /* Fill in VSync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 3778360ecee..fda67468e60 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -138,7 +138,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
break;
}
- if (!agpmem)
+ if (&agpmem->head == &dev->agp->memory)
goto vm_fault_error;
/*
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 0e6c131313d..61b4caf220f 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -1255,21 +1255,21 @@ long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
struct drm_ioctl_desc i810_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
};
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 5168862c922..671aa18415a 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -1524,20 +1524,20 @@ long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
struct drm_ioctl_desc i830_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
};
int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index da78f2c0d90..5c8e53458ed 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -8,6 +8,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
+ i915_gem_evict.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
@@ -18,6 +19,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
+ intel_panel.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 0d6ff640e1c..8c2ad014c47 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -30,20 +30,17 @@
#include "intel_drv.h"
struct intel_dvo_device {
- char *name;
+ const char *name;
int type;
/* DVOA/B/C output register */
u32 dvo_reg;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
- struct i2c_adapter *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
-
- struct drm_display_mode *panel_fixed_mode;
- bool panel_wants_dither;
+ struct i2c_adapter *i2c_bus;
};
struct intel_dvo_dev_ops {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9214119c015..5e43d707678 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
+#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ unsigned long flags;
+ struct intel_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ const char *pipe = crtc->pipe ? "B" : "A";
+ const char *plane = crtc->plane ? "B" : "A";
+ struct intel_unpin_work *work;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = crtc->unpin_work;
+ if (work == NULL) {
+ seq_printf(m, "No flip due on pipe %s (plane %s)\n",
+ pipe, plane);
+ } else {
+ if (!work->pending) {
+ seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
+ pipe, plane);
+ } else {
+ seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
+ pipe, plane);
+ }
+ if (work->enable_stall_check)
+ seq_printf(m, "Stall check enabled, ");
+ else
+ seq_printf(m, "Stall check waiting for page flip ioctl, ");
+ seq_printf(m, "%d prepares\n", work->pending);
+
+ if (work->old_fb_obj) {
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
+ if(obj_priv)
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ }
+ if (work->pending_flip_obj) {
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
+ if(obj_priv)
+ seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ return 0;
+}
+
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -467,6 +516,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
+ if (error->overlay)
+ intel_overlay_print_error_state(m, error->overlay);
+
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -774,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f19ffe87af3..9d67b485303 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -499,6 +499,13 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
}
}
+
+ if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
i915_emit_breadcrumb(dev);
return 0;
@@ -613,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_free;
+ }
}
mutex_lock(&dev->struct_mutex);
@@ -655,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_batch_free;
+ }
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -669,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_clip_free;
+ }
}
mutex_lock(&dev->struct_mutex);
@@ -878,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
- int ret = 0;
+ int ret;
if (IS_I965G(dev))
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
@@ -888,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
- ret = 0;
- goto out;
- }
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
#endif
/* Get some space for it */
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
+ dev_priv->mch_res.name = "i915 MCHBAR";
+ dev_priv->mch_res.flags = IORESOURCE_MEM;
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+ &dev_priv->mch_res,
MCHBAR_SIZE, MCHBAR_SIZE,
PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
+ 0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
- goto out;
+ return ret;
}
if (IS_I965G(dev))
@@ -912,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
pci_write_config_dword(dev_priv->bridge_dev, reg,
lower_32_bits(dev_priv->mch_res.start));
-out:
- return ret;
+ return 0;
}
/* Setup MCHBAR if possible, return true if we should disable it again */
@@ -2075,6 +2088,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN2(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+
dev_priv->regs = ioremap(base, size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
@@ -2360,46 +2377,46 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
}
struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
- DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
- DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
- DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5044f653e8e..216deb57978 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,91 +61,86 @@ extern int intel_agp_enabled;
.driver_data = (unsigned long) info }
static const struct intel_device_info intel_i830_info = {
- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+ .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_845g_info = {
- .is_i8xx = 1,
+ .gen = 2, .is_i8xx = 1,
};
static const struct intel_device_info intel_i85x_info = {
- .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i865g_info = {
- .is_i8xx = 1,
+ .gen = 2, .is_i8xx = 1,
};
static const struct intel_device_info intel_i915g_info = {
- .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+ .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i915gm_info = {
- .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_i9xx = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i945g_info = {
- .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i945gm_info = {
- .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i965g_info = {
- .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+ .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+ .has_hotplug = 1,
};
static const struct intel_device_info intel_i965gm_info = {
- .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
- .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_hotplug = 1,
+ .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_g33_info = {
- .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_hotplug = 1,
+ .gen = 3, .is_g33 = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_g45_info = {
- .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+ .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_gm45_info = {
- .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+ .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_pineview_info = {
- .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
- .need_gfx_hws = 1,
- .has_hotplug = 1,
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_ironlake_d_info = {
- .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+ .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
- .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_hotplug = 1,
+ .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
- .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_hotplug = 1, .is_gen6 = 1,
+ .gen = 6, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
- .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_hotplug = 1, .is_gen6 = 1,
+ .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -180,7 +175,12 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 906663b9929..af4a263cf25 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -113,6 +113,9 @@ struct intel_opregion {
int enabled;
};
+struct intel_overlay;
+struct intel_overlay_error_state;
+
struct drm_i915_master_private {
drm_local_map_t *sarea;
struct _drm_i915_sarea *sarea_priv;
@@ -166,6 +169,7 @@ struct drm_i915_error_state {
u32 purgeable:1;
} *active_bo;
u32 active_bo_count;
+ struct intel_overlay_error_state *overlay;
};
struct drm_i915_display_funcs {
@@ -186,9 +190,8 @@ struct drm_i915_display_funcs {
/* clock gating init */
};
-struct intel_overlay;
-
struct intel_device_info {
+ u8 gen;
u8 is_mobile : 1;
u8 is_i8xx : 1;
u8 is_i85x : 1;
@@ -204,7 +207,6 @@ struct intel_device_info {
u8 is_broadwater : 1;
u8 is_crestline : 1;
u8 is_ironlake : 1;
- u8 is_gen6 : 1;
u8 has_fbc : 1;
u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
@@ -242,6 +244,7 @@ typedef struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_ring_buffer render_ring;
struct intel_ring_buffer bsd_ring;
+ uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
void *seqno_page;
@@ -251,6 +254,7 @@ typedef struct drm_i915_private {
drm_local_map_t hws_map;
struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
+ struct drm_gem_object *renderctx;
struct resource mch_res;
@@ -285,6 +289,9 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
+ u32 flush_rings;
+#define FLUSH_RENDER_RING 0x1
+#define FLUSH_BSD_RING 0x2
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
@@ -568,8 +575,6 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
- uint32_t next_gem_seqno;
-
/**
* Waiting sequence number, if any
*/
@@ -610,6 +615,8 @@ typedef struct drm_i915_private {
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
+ /* Panel fitter placement and size for Ironlake+ */
+ u32 pch_pf_pos, pch_pf_size;
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -669,6 +676,8 @@ struct drm_i915_gem_object {
struct list_head list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
+ /** This object's place on eviction list */
+ struct list_head evict_list;
/**
* This is set if the object is on the active or flushing lists
@@ -978,6 +987,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
+int i915_gpu_idle(struct drm_device *dev);
int i915_gem_idle(struct drm_device *dev);
uint32_t i915_add_request(struct drm_device *dev,
struct drm_file *file_priv,
@@ -991,7 +1001,9 @@ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj, int id);
+ struct drm_gem_object *obj,
+ int id,
+ int align);
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
@@ -1003,6 +1015,11 @@ int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
+/* i915_gem_evict.c */
+int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
+int i915_gem_evict_everything(struct drm_device *dev);
+int i915_gem_evict_inactive(struct drm_device *dev);
+
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
@@ -1066,6 +1083,10 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
+/* overlay */
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
/**
* Lock test for when it's just for synchronization of ring access.
*
@@ -1092,26 +1113,26 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define I915_VERBOSE 0
#define BEGIN_LP_RING(n) do { \
- drm_i915_private_t *dev_priv = dev->dev_private; \
+ drm_i915_private_t *dev_priv__ = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
- intel_ring_begin(dev, &dev_priv->render_ring, (n)); \
+ intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
} while (0)
#define OUT_RING(x) do { \
- drm_i915_private_t *dev_priv = dev->dev_private; \
+ drm_i915_private_t *dev_priv__ = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
- intel_ring_emit(dev, &dev_priv->render_ring, x); \
+ intel_ring_emit(dev, &dev_priv__->render_ring, x); \
} while (0)
#define ADVANCE_LP_RING() do { \
- drm_i915_private_t *dev_priv = dev->dev_private; \
+ drm_i915_private_t *dev_priv__ = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG("ADVANCE_LP_RING %x\n", \
- dev_priv->render_ring.tail); \
- intel_ring_advance(dev, &dev_priv->render_ring); \
+ dev_priv__->render_ring.tail); \
+ intel_ring_advance(dev, &dev_priv__->render_ring); \
} while(0)
/**
@@ -1141,7 +1162,6 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1160,27 +1180,13 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_GEN3(dev) (IS_I915G(dev) || \
- IS_I915GM(dev) || \
- IS_I945G(dev) || \
- IS_I945GM(dev) || \
- IS_G33(dev) || \
- IS_PINEVIEW(dev))
-#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
- (dev)->pci_device == 0x2982 || \
- (dev)->pci_device == 0x2992 || \
- (dev)->pci_device == 0x29A2 || \
- (dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12 || \
- (dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2A42 || \
- (dev)->pci_device == 0x2E42)
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0758c7802e6..16fca1d1799 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,7 +34,9 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
+#include <linux/intel-gtt.h>
+static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -48,8 +50,6 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_evict_something(struct drm_device *dev, int min_size);
-static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
@@ -58,6 +58,14 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
+static inline bool
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+{
+ return obj_priv->gtt_space &&
+ !obj_priv->active &&
+ obj_priv->pin_count == 0;
+}
+
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
@@ -128,12 +136,15 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- drm_gem_object_unreference_unlocked(obj);
- if (ret)
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
return ret;
+ }
- args->handle = handle;
+ /* Sink the floating reference from kref_init(handlecount) */
+ drm_gem_object_handle_unreference_unlocked(obj);
+ args->handle = handle;
return 0;
}
@@ -313,7 +324,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
if (ret == -ENOMEM) {
struct drm_device *dev = obj->dev;
- ret = i915_gem_evict_something(dev, obj->size);
+ ret = i915_gem_evict_something(dev, obj->size,
+ i915_gem_get_gtt_alignment(obj));
if (ret)
return ret;
@@ -1036,6 +1048,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
+
+ /* Maintain LRU order of "inactive" objects */
+ if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1137,7 +1154,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
@@ -1155,8 +1172,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unlock;
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto unlock;
@@ -1169,6 +1184,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
+ if (i915_gem_object_is_inactive(obj_priv))
+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset;
@@ -1363,7 +1381,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_mmap_gtt *args = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
@@ -1409,7 +1426,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return ret;
}
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
}
drm_gem_object_unreference(obj);
@@ -1493,9 +1509,16 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*. Here we mirror the actions taken
+ * when by shmem_delete_inode() to release the backing store.
+ */
inode = obj->filp->f_path.dentry->d_inode;
- if (inode->i_op->truncate)
- inode->i_op->truncate (inode);
+ truncate_inode_pages(inode->i_mapping, 0);
+ if (inode->i_op->truncate_range)
+ inode->i_op->truncate_range(inode, 0, (loff_t)-1);
obj_priv->madv = __I915_MADV_PURGED;
}
@@ -1887,19 +1910,6 @@ i915_gem_flush(struct drm_device *dev,
flush_domains);
}
-static void
-i915_gem_flush_ring(struct drm_device *dev,
- uint32_t invalidate_domains,
- uint32_t flush_domains,
- struct intel_ring_buffer *ring)
-{
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- drm_agp_chipset_flush(dev);
- ring->flush(dev, ring,
- invalidate_domains,
- flush_domains);
-}
-
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
@@ -1973,8 +1983,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* cause memory corruption through use-after-free.
*/
- BUG_ON(obj_priv->active);
-
/* release the fence reg _after_ flushing */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
@@ -2010,34 +2018,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
-static struct drm_gem_object *
-i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *best = NULL;
- struct drm_gem_object *first = NULL;
-
- /* Try to find the smallest clean object */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- struct drm_gem_object *obj = &obj_priv->base;
- if (obj->size >= min_size) {
- if ((!obj_priv->dirty ||
- i915_gem_object_is_purgeable(obj_priv)) &&
- (!best || obj->size < best->size)) {
- best = obj;
- if (best->size == min_size)
- return best;
- }
- if (!first)
- first = obj;
- }
- }
-
- return best ? best : first;
-}
-
-static int
+int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2078,155 +2059,6 @@ i915_gpu_idle(struct drm_device *dev)
return ret;
}
-static int
-i915_gem_evict_everything(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
- bool lists_empty;
-
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
- if (lists_empty)
- return -ENOSPC;
-
- /* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev);
- if (ret)
- return ret;
-
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
- ret = i915_gem_evict_from_inactive_list(dev);
- if (ret)
- return ret;
-
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
- BUG_ON(!lists_empty);
-
- return 0;
-}
-
-static int
-i915_gem_evict_something(struct drm_device *dev, int min_size)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- int ret;
-
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
- struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
- for (;;) {
- i915_gem_retire_requests(dev);
-
- /* If there's an inactive buffer available now, grab it
- * and be done.
- */
- obj = i915_gem_find_inactive_object(dev, min_size);
- if (obj) {
- struct drm_i915_gem_object *obj_priv;
-
-#if WATCH_LRU
- DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
- obj_priv = to_intel_bo(obj);
- BUG_ON(obj_priv->pin_count != 0);
- BUG_ON(obj_priv->active);
-
- /* Wait on the rendering and unbind the buffer. */
- return i915_gem_object_unbind(obj);
- }
-
- /* If we didn't get anything, but the ring is still processing
- * things, wait for the next to finish and hopefully leave us
- * a buffer to evict.
- */
- if (!list_empty(&render_ring->request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&render_ring->request_list,
- struct drm_i915_gem_request,
- list);
-
- ret = i915_wait_request(dev,
- request->seqno, request->ring);
- if (ret)
- return ret;
-
- continue;
- }
-
- if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&bsd_ring->request_list,
- struct drm_i915_gem_request,
- list);
-
- ret = i915_wait_request(dev,
- request->seqno, request->ring);
- if (ret)
- return ret;
-
- continue;
- }
-
- /* If we didn't have anything on the request list but there
- * are buffers awaiting a flush, emit one and try again.
- * When we wait on it, those buffers waiting for that flush
- * will get moved to inactive.
- */
- if (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_i915_gem_object *obj_priv;
-
- /* Find an object that we can immediately reuse */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- obj = &obj_priv->base;
- if (obj->size >= min_size)
- break;
-
- obj = NULL;
- }
-
- if (obj != NULL) {
- uint32_t seqno;
-
- i915_gem_flush_ring(dev,
- obj->write_domain,
- obj->write_domain,
- obj_priv->ring);
- seqno = i915_add_request(dev, NULL,
- obj->write_domain,
- obj_priv->ring);
- if (seqno == 0)
- return -ENOMEM;
- continue;
- }
- }
-
- /* If we didn't do any of the above, there's no single buffer
- * large enough to swap out for the new one, so just evict
- * everything and start again. (This should be rare.)
- */
- if (!list_empty (&dev_priv->mm.inactive_list))
- return i915_gem_evict_from_inactive_list(dev);
- else
- return i915_gem_evict_everything(dev);
- }
-}
-
int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
@@ -2666,7 +2498,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- ret = i915_gem_evict_something(dev, obj->size);
+ ret = i915_gem_evict_something(dev, obj->size, alignment);
if (ret)
return ret;
@@ -2684,7 +2516,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
- ret = i915_gem_evict_something(dev, obj->size);
+ ret = i915_gem_evict_something(dev, obj->size,
+ alignment);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
@@ -2714,7 +2547,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- ret = i915_gem_evict_something(dev, obj->size);
+ ret = i915_gem_evict_something(dev, obj->size, alignment);
if (ret)
return ret;
@@ -2723,6 +2556,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
atomic_inc(&dev->gtt_count);
atomic_add(obj->size, &dev->gtt_memory);
+ /* keep track of bounds object by adding it to the inactive list */
+ list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
@@ -3117,6 +2953,7 @@ static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
@@ -3179,6 +3016,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->pending_write_domain = obj->write_domain;
obj->read_domains = obj->pending_read_domains;
+ if (flush_domains & I915_GEM_GPU_DOMAINS) {
+ if (obj_priv->ring == &dev_priv->render_ring)
+ dev_priv->flush_rings |= FLUSH_RENDER_RING;
+ else if (obj_priv->ring == &dev_priv->bsd_ring)
+ dev_priv->flush_rings |= FLUSH_BSD_RING;
+ }
+
dev->invalidate_domains |= invalidate_domains;
dev->flush_domains |= flush_domains;
#if WATCH_BUF
@@ -3718,7 +3562,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->render_ring;
}
-
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
@@ -3746,6 +3589,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret != 0) {
DRM_ERROR("copy %d cliprects failed: %d\n",
args->num_cliprects, ret);
+ ret = -EFAULT;
goto pre_mutex_err;
}
}
@@ -3892,6 +3736,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
+ dev_priv->flush_rings = 0;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
@@ -3912,16 +3757,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
+ if (dev_priv->flush_rings & FLUSH_RENDER_RING)
(void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->render_ring);
-
- if (HAS_BSD(dev))
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->bsd_ring);
- }
+ dev->flush_domains,
+ &dev_priv->render_ring);
+ if (dev_priv->flush_rings & FLUSH_BSD_RING)
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains,
+ &dev_priv->bsd_ring);
}
for (i = 0; i < args->buffer_count; i++) {
@@ -4192,6 +4035,10 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
if (obj_priv->gtt_offset & (alignment - 1)) {
+ WARN(obj_priv->pin_count,
+ "bo is already pinned with incorrect alignment:"
+ " offset=%x, req.alignment=%x\n",
+ obj_priv->gtt_offset, alignment);
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
@@ -4213,8 +4060,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
atomic_inc(&dev->pin_count);
atomic_add(obj->size, &dev->pin_memory);
if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
- !list_empty(&obj_priv->list))
+ (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
list_del_init(&obj_priv->list);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4359,22 +4205,34 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- /* Update the active list for the hardware's current position.
- * Otherwise this only updates on a delayed timer or when irqs are
- * actually unmasked, and our working set ends up being larger than
- * required.
- */
- i915_gem_retire_requests(dev);
- obj_priv = to_intel_bo(obj);
- /* Don't count being on the flushing list against the object being
- * done. Otherwise, a buffer left on the flushing list but not getting
- * flushed (because nobody's flushing that domain) won't ever return
- * unbusy and get reused by libdrm's bo cache. The other expected
- * consumer of this interface, OpenGL's occlusion queries, also specs
- * that the objects get unbusy "eventually" without any interference.
+ /* Count all active objects as busy, even if they are currently not used
+ * by the gpu. Users of this interface expect objects to eventually
+ * become non-busy without any further actions, therefore emit any
+ * necessary flushes here.
*/
- args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
+ obj_priv = to_intel_bo(obj);
+ args->busy = obj_priv->active;
+ if (args->busy) {
+ /* Unconditionally flush objects, even when the gpu still uses this
+ * object. Userspace calling this function indicates that it wants to
+ * use this buffer rather sooner than later, so issuing the required
+ * flush earlier is beneficial.
+ */
+ if (obj->write_domain) {
+ i915_gem_flush(dev, 0, obj->write_domain);
+ (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
+ }
+
+ /* Update the active list for the hardware's current position.
+ * Otherwise this only updates on a delayed timer or when irqs
+ * are actually unmasked, and our working set ends up being
+ * larger than required.
+ */
+ i915_gem_retire_requests_ring(dev, obj_priv->ring);
+
+ args->busy = obj_priv->active;
+ }
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -4514,30 +4372,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_free_object_tail(obj);
}
-/** Unbinds all inactive objects. */
-static int
-i915_gem_evict_from_inactive_list(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
-
- obj = &list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list)->base;
-
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
-
int
i915_gem_idle(struct drm_device *dev)
{
@@ -4562,7 +4396,7 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_from_inactive_list(dev);
+ ret = i915_gem_evict_inactive(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4680,6 +4514,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
goto cleanup_render_ring;
}
+ dev_priv->next_seqno = 1;
+
return 0;
cleanup_render_ring:
@@ -4841,7 +4677,7 @@ i915_gem_load(struct drm_device *dev)
* e.g. for cursor + overlay regs
*/
int i915_gem_init_phys_object(struct drm_device *dev,
- int id, int size)
+ int id, int size, int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
@@ -4856,7 +4692,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj->id = id;
- phys_obj->handle = drm_pci_alloc(dev, size, 0);
+ phys_obj->handle = drm_pci_alloc(dev, size, align);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
@@ -4938,7 +4774,9 @@ out:
int
i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj, int id)
+ struct drm_gem_object *obj,
+ int id,
+ int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
@@ -4957,11 +4795,10 @@ i915_gem_attach_phys_object(struct drm_device *dev,
i915_gem_detach_phys_object(dev, obj);
}
-
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
- obj->size);
+ obj->size, align);
if (ret) {
DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
new file mode 100644
index 00000000000..72cae3cccad
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+
+static struct drm_i915_gem_object *
+i915_gem_next_active_object(struct drm_device *dev,
+ struct list_head **render_iter,
+ struct list_head **bsd_iter)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
+
+ if (*render_iter != &dev_priv->render_ring.active_list)
+ render_obj = list_entry(*render_iter,
+ struct drm_i915_gem_object,
+ list);
+
+ if (HAS_BSD(dev)) {
+ if (*bsd_iter != &dev_priv->bsd_ring.active_list)
+ bsd_obj = list_entry(*bsd_iter,
+ struct drm_i915_gem_object,
+ list);
+
+ if (render_obj == NULL) {
+ *bsd_iter = (*bsd_iter)->next;
+ return bsd_obj;
+ }
+
+ if (bsd_obj == NULL) {
+ *render_iter = (*render_iter)->next;
+ return render_obj;
+ }
+
+ /* XXX can we handle seqno wrapping? */
+ if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
+ *render_iter = (*render_iter)->next;
+ return render_obj;
+ } else {
+ *bsd_iter = (*bsd_iter)->next;
+ return bsd_obj;
+ }
+ } else {
+ *render_iter = (*render_iter)->next;
+ return render_obj;
+ }
+}
+
+static bool
+mark_free(struct drm_i915_gem_object *obj_priv,
+ struct list_head *unwind)
+{
+ list_add(&obj_priv->evict_list, unwind);
+ return drm_mm_scan_add_block(obj_priv->gtt_space);
+}
+
+#define i915_for_each_active_object(OBJ, R, B) \
+ *(R) = dev_priv->render_ring.active_list.next; \
+ *(B) = dev_priv->bsd_ring.active_list.next; \
+ while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
+
+int
+i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head eviction_list, unwind_list;
+ struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
+ struct list_head *render_iter, *bsd_iter;
+ int ret = 0;
+
+ i915_gem_retire_requests(dev);
+
+ /* Re-check for free space after retiring requests */
+ if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0))
+ return 0;
+
+ /*
+ * The goal is to evict objects and amalgamate space in LRU order.
+ * The oldest idle objects reside on the inactive list, which is in
+ * retirement order. The next objects to retire are those on the (per
+ * ring) active list that do not have an outstanding flush. Once the
+ * hardware reports completion (the seqno is updated after the
+ * batchbuffer has been finished) the clean buffer objects would
+ * be retired to the inactive list. Any dirty objects would be added
+ * to the tail of the flushing list. So after processing the clean
+ * active objects we need to emit a MI_FLUSH to retire the flushing
+ * list, hence the retirement order of the flushing list is in
+ * advance of the dirty objects on the active lists.
+ *
+ * The retirement sequence is thus:
+ * 1. Inactive objects (already retired)
+ * 2. Clean active objects
+ * 3. Flushing list
+ * 4. Dirty active objects.
+ *
+ * On each list, the oldest objects lie at the HEAD with the freshest
+ * object on the TAIL.
+ */
+
+ INIT_LIST_HEAD(&unwind_list);
+ drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+
+ /* First see if there is a large enough contiguous idle region... */
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ if (mark_free(obj_priv, &unwind_list))
+ goto found;
+ }
+
+ /* Now merge in the soon-to-be-expired objects... */
+ i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+ /* Does the object require an outstanding flush? */
+ if (obj_priv->base.write_domain || obj_priv->pin_count)
+ continue;
+
+ if (mark_free(obj_priv, &unwind_list))
+ goto found;
+ }
+
+ /* Finally add anything with a pending flush (in order of retirement) */
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ if (obj_priv->pin_count)
+ continue;
+
+ if (mark_free(obj_priv, &unwind_list))
+ goto found;
+ }
+ i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+ if (! obj_priv->base.write_domain || obj_priv->pin_count)
+ continue;
+
+ if (mark_free(obj_priv, &unwind_list))
+ goto found;
+ }
+
+ /* Nothing found, clean up and bail out! */
+ list_for_each_entry(obj_priv, &unwind_list, evict_list) {
+ ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+ BUG_ON(ret);
+ }
+
+ /* We expect the caller to unpin, evict all and try again, or give up.
+ * So calling i915_gem_evict_everything() is unnecessary.
+ */
+ return -ENOSPC;
+
+found:
+ INIT_LIST_HEAD(&eviction_list);
+ list_for_each_entry_safe(obj_priv, tmp_obj_priv,
+ &unwind_list, evict_list) {
+ if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
+ /* drm_mm doesn't allow any other other operations while
+ * scanning, therefore store to be evicted objects on a
+ * temporary list. */
+ list_move(&obj_priv->evict_list, &eviction_list);
+ }
+ }
+
+ /* Unbinding will emit any required flushes */
+ list_for_each_entry_safe(obj_priv, tmp_obj_priv,
+ &eviction_list, evict_list) {
+#if WATCH_LRU
+ DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+ ret = i915_gem_object_unbind(&obj_priv->base);
+ if (ret)
+ return ret;
+ }
+
+ /* The just created free hole should be on the top of the free stack
+ * maintained by drm_mm, so this BUG_ON actually executes in O(1).
+ * Furthermore all accessed data has just recently been used, so it
+ * should be really fast, too. */
+ BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
+ alignment, 0));
+
+ return 0;
+}
+
+int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ bool lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return -ENOSPC;
+
+ /* Flush everything (on to the inactive lists) and evict */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+ ret = i915_gem_evict_inactive(dev);
+ if (ret)
+ return ret;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!lists_empty);
+
+ return 0;
+}
+
+/** Unbinds all inactive objects. */
+int
+i915_gem_evict_inactive(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ while (!list_empty(&dev_priv->mm.inactive_list)) {
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->base;
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0) {
+ DRM_ERROR("Error unbinding object: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 85785a8844e..59457e83b01 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -425,9 +425,11 @@ static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
struct drm_gem_object *src)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst;
struct drm_i915_gem_object *src_priv;
int page, page_count;
+ u32 reloc_offset;
if (src == NULL)
return NULL;
@@ -442,18 +444,27 @@ i915_error_object_create(struct drm_device *dev,
if (dst == NULL)
return NULL;
+ reloc_offset = src_priv->gtt_offset;
for (page = 0; page < page_count; page++) {
- void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
unsigned long flags;
+ void __iomem *s;
+ void *d;
+ d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
if (d == NULL)
goto unwind;
+
local_irq_save(flags);
- s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
- memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s, KM_IRQ0);
+ s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc_offset,
+ KM_IRQ0);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s, KM_IRQ0);
local_irq_restore(flags);
+
dst->pages[page] = d;
+
+ reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
dst->gtt_offset = src_priv->gtt_offset;
@@ -489,6 +500,7 @@ i915_error_state_free(struct drm_device *dev,
i915_error_object_free(error->batchbuffer[1]);
i915_error_object_free(error->ringbuffer);
kfree(error->active_bo);
+ kfree(error->overlay);
kfree(error);
}
@@ -612,18 +624,57 @@ static void i915_capture_error_state(struct drm_device *dev)
if (batchbuffer[1] == NULL &&
error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size &&
- batchbuffer[0] != obj)
+ error->acthd < obj_priv->gtt_offset + obj->size)
batchbuffer[1] = obj;
count++;
}
+ /* Scan the other lists for completeness for those bizarre errors. */
+ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+ if (batchbuffer[0] == NULL &&
+ bbaddr >= obj_priv->gtt_offset &&
+ bbaddr < obj_priv->gtt_offset + obj->size)
+ batchbuffer[0] = obj;
+
+ if (batchbuffer[1] == NULL &&
+ error->acthd >= obj_priv->gtt_offset &&
+ error->acthd < obj_priv->gtt_offset + obj->size)
+ batchbuffer[1] = obj;
+
+ if (batchbuffer[0] && batchbuffer[1])
+ break;
+ }
+ }
+ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+ if (batchbuffer[0] == NULL &&
+ bbaddr >= obj_priv->gtt_offset &&
+ bbaddr < obj_priv->gtt_offset + obj->size)
+ batchbuffer[0] = obj;
+
+ if (batchbuffer[1] == NULL &&
+ error->acthd >= obj_priv->gtt_offset &&
+ error->acthd < obj_priv->gtt_offset + obj->size)
+ batchbuffer[1] = obj;
+
+ if (batchbuffer[0] && batchbuffer[1])
+ break;
+ }
+ }
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userpace.
*/
error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
- error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
+ if (batchbuffer[1] != batchbuffer[0])
+ error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
+ else
+ error->batchbuffer[1] = NULL;
/* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev,
@@ -667,6 +718,8 @@ static void i915_capture_error_state(struct drm_device *dev)
do_gettimeofday(&error->time);
+ error->overlay = intel_overlay_capture_error_state(dev);
+
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
dev_priv->first_error = error;
@@ -834,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
queue_work(dev_priv->wq, &dev_priv->error_work);
}
+static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj_priv;
+ struct intel_unpin_work *work;
+ unsigned long flags;
+ bool stall_detected;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+
+ if (work == NULL || work->pending || !work->enable_stall_check) {
+ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
+
+ /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+ obj_priv = to_intel_bo(work->pending_flip_obj);
+ if(IS_I965G(dev)) {
+ int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+ stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+ } else {
+ int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
+ stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+ crtc->y * crtc->fb->pitch +
+ crtc->x * crtc->fb->bits_per_pixel/8);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (stall_detected) {
+ DRM_DEBUG_DRIVER("Pageflip stall detected\n");
+ intel_prepare_page_flip(dev, intel_crtc->plane);
+ }
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -951,15 +1047,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (pipea_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 0);
- if (!dev_priv->flip_pending_is_done)
+ if (!dev_priv->flip_pending_is_done) {
+ i915_pageflip_stall_check(dev, 0);
intel_finish_page_flip(dev, 0);
+ }
}
if (pipeb_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 1);
- if (!dev_priv->flip_pending_is_done)
+ if (!dev_priv->flip_pending_is_done) {
+ i915_pageflip_stall_check(dev, 1);
intel_finish_page_flip(dev, 1);
+ }
}
if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
@@ -1251,6 +1351,16 @@ void i915_hangcheck_elapsed(unsigned long data)
&dev_priv->render_ring),
i915_get_tail_request(dev)->seqno)) {
dev_priv->hangcheck_count = 0;
+
+ /* Issue a wake-up to catch stuck h/w. */
+ if (dev_priv->render_ring.waiting_gem_seqno |
+ dev_priv->bsd_ring.waiting_gem_seqno) {
+ DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
+ if (dev_priv->render_ring.waiting_gem_seqno)
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ if (dev_priv->bsd_ring.waiting_gem_seqno)
+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ }
return;
}
@@ -1318,12 +1428,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
(void) I915_READ(DEIER);
- /* user interrupt should be enabled, but masked initial */
+ /* Gen6 only needs render pipe_control now */
+ if (IS_GEN6(dev))
+ render_mask = GT_PIPE_NOTIFY;
+
dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ if (IS_GEN6(dev))
+ I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index d1bf92b9978..ea5d3fea4b6 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -114,10 +114,6 @@ struct opregion_asle {
#define ASLE_REQ_MSK 0xf
/* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAIL (2<<10)
-#define ASLE_BACKLIGHT_FAIL (2<<12)
-#define ASLE_PFIT_FAIL (2<<14)
-#define ASLE_PWM_FREQ_FAIL (2<<16)
#define ASLE_ALS_ILLUM_FAILED (1<<10)
#define ASLE_BACKLIGHT_FAILED (1<<12)
#define ASLE_PFIT_FAILED (1<<14)
@@ -155,11 +151,11 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
u32 max_backlight, level, shift;
if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAIL;
+ return ASLE_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
if (bclp < 0 || bclp > 255)
- return ASLE_BACKLIGHT_FAIL;
+ return ASLE_BACKLIGHT_FAILED;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
@@ -211,7 +207,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
if (!(pfit & ASLE_PFIT_VALID))
- return ASLE_PFIT_FAIL;
+ return ASLE_PFIT_FAILED;
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 281db6e5403..d094e912922 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -170,6 +170,7 @@
#define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
@@ -180,6 +181,12 @@
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
+#define MI_MM_SPACE_GTT (1<<8)
+#define MI_MM_SPACE_PHYSICAL (0<<8)
+#define MI_SAVE_EXT_STATE_EN (1<<3)
+#define MI_RESTORE_EXT_STATE_EN (1<<2)
+#define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -312,6 +319,7 @@
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
+# define MI_FLUSH_ENABLE (1 << 11)
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
@@ -1100,6 +1108,11 @@
#define PEG_BAND_GAP_DATA 0x14d68
/*
+ * Logical Context regs
+ */
+#define CCID 0x2180
+#define CCID_EN (1<<0)
+/*
* Overlay regs
*/
@@ -2069,6 +2082,7 @@
#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
+#define DSL_LINEMASK 0x00000fff
#define PIPEACONF 0x70008
#define PIPEACONF_ENABLE (1<<31)
#define PIPEACONF_DISABLE 0
@@ -2928,6 +2942,7 @@
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
+#define TRANS_DP_SYNC_MASK (3<<3)
/* SNB eDP training params */
/* SNB A-stepping */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6e2025274db..2c6b98f2440 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,7 +34,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
} else {
dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
fpb1_reg = FPB1;
}
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
}
@@ -395,16 +395,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
- DRM_UDELAY(150);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
}
I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
- DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- DRM_UDELAY(150);
+ POSTING_READ(DPLL_A_MD);
+ }
+ udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
@@ -413,10 +417,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -460,16 +464,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
- DRM_UDELAY(150);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
}
I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
- DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- DRM_UDELAY(150);
+ POSTING_READ(DPLL_B_MD);
+ }
+ udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
@@ -478,10 +486,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +554,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
} else {
dev_priv->saveADPA = I915_READ(ADPA);
}
/* LVDS state */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +579,10 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveLVDS = I915_READ(LVDS);
}
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -602,7 +610,7 @@ void i915_save_display(struct drm_device *dev)
/* Only save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
- if (IS_IRONLAKE_M(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
@@ -618,7 +626,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -660,24 +668,24 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
else
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -708,7 +716,7 @@ void i915_restore_display(struct drm_device *dev)
/* only restore FBC info on the platform that supports FBC*/
if (I915_HAS_FBC(dev)) {
- if (IS_IRONLAKE_M(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_disable_fbc(dev);
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
@@ -723,14 +731,15 @@ void i915_restore_display(struct drm_device *dev)
}
}
/* VGA state */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
- DRM_UDELAY(150);
+ POSTING_READ(VGA_PD);
+ udelay(150);
i915_restore_vga(dev);
}
@@ -748,7 +757,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
/* Interrupt state */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveDEIER = I915_READ(DEIER);
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
@@ -762,7 +771,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- if (IS_IRONLAKE_M(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_disable_drps(dev);
/* Cache mode state */
@@ -820,7 +829,7 @@ int i915_restore_state(struct drm_device *dev)
i915_restore_display(dev);
/* Interrupt state */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(DEIER, dev_priv->saveDEIER);
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -835,7 +844,7 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
- if (IS_IRONLAKE_M(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_enable_drps(dev);
/* Cache mode state */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ee0732b222a..4b7735196cd 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -160,19 +160,20 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa, temp;
bool ret;
+ bool turn_off_dac = false;
temp = adpa = I915_READ(PCH_ADPA);
- if (HAS_PCH_CPT(dev)) {
- /* Disable DAC before force detect */
- I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
- (void)I915_READ(PCH_ADPA);
- } else {
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
- }
+ if (HAS_PCH_SPLIT(dev))
+ turn_off_dac = true;
+
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ if (turn_off_dac)
+ adpa &= ~ADPA_DAC_ENABLE;
+
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -185,10 +186,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
I915_WRITE(PCH_ADPA, adpa);
- while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
- ;
+ if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000, 1))
+ DRM_ERROR("timed out waiting for FORCE_TRIGGER");
- if (HAS_PCH_CPT(dev)) {
+ if (turn_off_dac) {
I915_WRITE(PCH_ADPA, temp);
(void)I915_READ(PCH_ADPA);
}
@@ -237,17 +239,13 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
for (i = 0; i < tries ; i++) {
- unsigned long timeout;
/* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- timeout = jiffies + msecs_to_jiffies(1000);
/* wait for FORCE_DETECT to go off */
- do {
- if (!(I915_READ(PORT_HOTPLUG_EN) &
- CRT_HOTPLUG_FORCE_DETECT))
- break;
- msleep(1);
- } while (time_after(timeout, jiffies));
+ if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT) == 0,
+ 1000, 1))
+ DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
}
stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -331,7 +329,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
st00 = I915_READ8(VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
@@ -508,17 +506,8 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs
.best_encoder = intel_attached_encoder,
};
-static void intel_crt_enc_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- intel_i2c_destroy(intel_encoder->ddc_bus);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
- .destroy = intel_crt_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
void intel_crt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5ec10e02341..40cc5da264a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/vgaarb.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
@@ -976,14 +977,70 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
-void
-intel_wait_for_vblank(struct drm_device *dev)
+/**
+ * intel_wait_for_vblank - wait for vblank on a given pipe
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * Wait for vblank to occur on a given pipe. Needed for various bits of
+ * mode setting code.
+ */
+void intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- if (in_dbg_master())
- mdelay(20); /* The kernel debugger cannot call msleep() */
- else
- msleep(20);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
+
+ /* Clear existing vblank status. Note this will clear any other
+ * sticky status fields as well.
+ *
+ * This races with i915_driver_irq_handler() with the result
+ * that either function could miss a vblank event. Here it is not
+ * fatal, as we will either wait upon the next vblank interrupt or
+ * timeout. Generally speaking intel_wait_for_vblank() is only
+ * called during modeset at which time the GPU should be idle and
+ * should *not* be performing page flips and thus not waiting on
+ * vblanks...
+ * Currently, the result of us stealing a vblank from the irq
+ * handler is that a single frame will be skipped during swapbuffers.
+ */
+ I915_WRITE(pipestat_reg,
+ I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
+ /* Wait for vblank interrupt bit to set */
+ if (wait_for((I915_READ(pipestat_reg) &
+ PIPE_VBLANK_INTERRUPT_STATUS),
+ 50, 0))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
+/**
+ * intel_wait_for_vblank_off - wait for vblank after disabling a pipe
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * After disabling a pipe, we can't wait for vblank in the usual way,
+ * spinning on the vblank interrupt status bit, since we won't actually
+ * see an interrupt when the pipe is disabled.
+ *
+ * So this function waits for the display line value to settle (it
+ * usually ends up stopping at the start of the next frame).
+ */
+void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u32 last_line;
+
+ /* Wait for the display line to settle */
+ do {
+ last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
+ mdelay(5);
+ } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
+ time_after(timeout, jiffies));
+
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
}
/* Parameters have changed, update FBC info */
@@ -1037,7 +1094,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 fbc_ctl;
if (!I915_HAS_FBC(dev))
@@ -1052,16 +1108,11 @@ void i8xx_disable_fbc(struct drm_device *dev)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
- if (time_after(jiffies, timeout)) {
- DRM_DEBUG_DRIVER("FBC idle timed out\n");
- break;
- }
- ; /* do nothing */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
}
- intel_wait_for_vblank(dev);
-
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1118,7 +1169,6 @@ void g4x_disable_fbc(struct drm_device *dev)
dpfc_ctl = I915_READ(DPFC_CONTROL);
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
- intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1179,7 +1229,6 @@ void ironlake_disable_fbc(struct drm_device *dev)
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
- intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1453,7 +1502,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr &= ~DISPPLANE_TILED;
}
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -1462,23 +1511,22 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
Start = obj_priv->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
- DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, fb->pitch);
I915_WRITE(dspstride, fb->pitch);
if (IS_I965G(dev)) {
- I915_WRITE(dspbase, Offset);
- I915_READ(dspbase);
I915_WRITE(dspsurf, Start);
- I915_READ(dspsurf);
I915_WRITE(dsptileoff, (y << 16) | x);
+ I915_WRITE(dspbase, Offset);
} else {
I915_WRITE(dspbase, Start + Offset);
- I915_READ(dspbase);
}
+ POSTING_READ(dspbase);
- if ((IS_I965G(dev) || plane == 0))
+ if (IS_I965G(dev) || plane == 0)
intel_update_fbc(crtc, &crtc->mode);
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_increase_pllclock(crtc, true);
return 0;
@@ -1489,7 +1537,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
@@ -1497,13 +1544,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_gem_object *obj;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- unsigned long Start, Offset;
- int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
- int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- u32 dspcntr;
int ret;
/* no fb bound */
@@ -1539,73 +1579,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- dspcntr = I915_READ(dspcntr_reg);
- /* Mask out pixel format bits in case we change it */
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (crtc->fb->bits_per_pixel) {
- case 8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case 16:
- if (crtc->fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
- break;
- case 24:
- case 32:
- if (crtc->fb->depth == 30)
- dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
- else
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- break;
- default:
- DRM_ERROR("Unknown color depth\n");
+ ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
+ if (ret) {
i915_gem_object_unpin(obj);
mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
- if (IS_I965G(dev)) {
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
- }
-
- if (HAS_PCH_SPLIT(dev))
- /* must disable */
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
- I915_WRITE(dspcntr_reg, dspcntr);
-
- Start = obj_priv->gtt_offset;
- Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
-
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, crtc->fb->pitch);
- I915_WRITE(dspstride, crtc->fb->pitch);
- if (IS_I965G(dev)) {
- I915_WRITE(dspbase, Offset);
- I915_READ(dspbase);
- I915_WRITE(dspsurf, Start);
- I915_READ(dspsurf);
- I915_WRITE(dsptileoff, (y << 16) | x);
- } else {
- I915_WRITE(dspbase, Start + Offset);
- I915_READ(dspbase);
+ return ret;
}
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
-
- intel_wait_for_vblank(dev);
-
if (old_fb) {
intel_fb = to_intel_framebuffer(old_fb);
obj_priv = to_intel_bo(intel_fb->obj);
i915_gem_object_unpin(intel_fb->obj);
}
- intel_increase_pllclock(crtc, true);
mutex_unlock(&dev->struct_mutex);
@@ -1627,54 +1612,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga (struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u8 sr1;
- u32 vga_reg;
-
- if (HAS_PCH_SPLIT(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
-
- if (I915_READ(vga_reg) & VGA_DISP_DISABLE)
- return;
-
- I915_WRITE8(VGA_SR_INDEX, 1);
- sr1 = I915_READ8(VGA_SR_DATA);
- I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
- udelay(100);
-
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
-}
-
-static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- DRM_DEBUG_KMS("\n");
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
-}
-
-static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
- udelay(200);
-}
-
-
static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
@@ -1928,9 +1865,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
- int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
- int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
- int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1945,7 +1879,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
int trans_dpll_sel = (pipe == 0) ? 0 : 1;
u32 temp;
- int n;
u32 pipe_bpc;
temp = I915_READ(pipeconf_reg);
@@ -1958,7 +1891,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
+ DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(PCH_LVDS);
@@ -1968,10 +1901,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
- if (HAS_eDP) {
- /* enable eDP PLL */
- ironlake_enable_pll_edp(crtc);
- } else {
+ if (!HAS_eDP) {
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
@@ -2003,17 +1933,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
/* Enable panel fitting for LVDS */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
- || HAS_eDP || intel_pch_has_edp(crtc)) {
- temp = I915_READ(pf_ctl_reg);
- I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
-
- /* currently full aspect */
- I915_WRITE(pf_win_pos, 0);
-
- I915_WRITE(pf_win_size,
- (dev_priv->panel_fixed_mode->hdisplay << 16) |
- (dev_priv->panel_fixed_mode->vdisplay));
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || HAS_eDP || intel_pch_has_edp(crtc))) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+ PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+ dev_priv->pch_pf_pos);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+ dev_priv->pch_pf_size);
}
/* Enable CPU pipe */
@@ -2097,9 +2029,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int reg;
reg = I915_READ(trans_dp_ctl);
- reg &= ~TRANS_DP_PORT_SEL_MASK;
- reg = TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_ENH_FRAMING;
+ reg &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_SYNC_MASK);
+ reg |= (TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING);
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2137,18 +2070,17 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
- while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
- ;
-
+ if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
+ DRM_ERROR("failed to enable transcoder\n");
}
intel_crtc_load_lut(crtc);
intel_update_fbc(crtc, &crtc->mode);
+ break;
- break;
case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
+ DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
drm_vblank_off(dev, pipe);
/* Disable display plane */
@@ -2164,40 +2096,22 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- i915_disable_vga(dev);
-
/* disable cpu pipe, disable after all planes disabled */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
- n = 0;
+
/* wait for cpu pipe off, pipe state */
- while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) {
- n++;
- if (n < 60) {
- udelay(500);
- continue;
- } else {
- DRM_DEBUG_KMS("pipe %d off delay\n",
- pipe);
- break;
- }
- }
+ if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
+ DRM_ERROR("failed to turn off cpu pipe\n");
} else
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
udelay(100);
/* Disable PF */
- temp = I915_READ(pf_ctl_reg);
- if ((temp & PF_ENABLE) != 0) {
- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
- I915_READ(pf_ctl_reg);
- }
- I915_WRITE(pf_win_size, 0);
- POSTING_READ(pf_win_size);
-
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -2244,20 +2158,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
temp = I915_READ(transconf_reg);
if ((temp & TRANS_ENABLE) != 0) {
I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
- I915_READ(transconf_reg);
- n = 0;
+
/* wait for PCH transcoder off, transcoder state */
- while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) {
- n++;
- if (n < 60) {
- udelay(500);
- continue;
- } else {
- DRM_DEBUG_KMS("transcoder %d off "
- "delay\n", pipe);
- break;
- }
- }
+ if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
+ DRM_ERROR("failed to disable transcoder\n");
}
temp = I915_READ(transconf_reg);
@@ -2294,10 +2198,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
I915_READ(pch_dpll_reg);
- if (HAS_eDP) {
- ironlake_disable_pll_edp(crtc);
- }
-
/* Switch from PCDclk to Rawclk */
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_SEL_PCDCLK;
@@ -2372,8 +2272,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- intel_update_watermarks(dev);
-
/* Enable the DPLL */
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) == 0) {
@@ -2413,8 +2311,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_crtc_dpms_overlay(intel_crtc, true);
break;
case DRM_MODE_DPMS_OFF:
- intel_update_watermarks(dev);
-
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, false);
drm_vblank_off(dev, pipe);
@@ -2423,9 +2319,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- /* Disable the VGA plane that we never use */
- i915_disable_vga(dev);
-
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2435,10 +2328,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
- if (!IS_I9XX(dev)) {
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank(dev);
- }
+ /* Wait for vblank for the disable to take effect */
+ intel_wait_for_vblank_off(dev, pipe);
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipeconf_reg == PIPEACONF &&
@@ -2453,7 +2344,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
}
/* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank_off(dev, pipe);
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2469,9 +2360,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
/**
* Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
*/
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
@@ -2482,9 +2370,29 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
int pipe = intel_crtc->pipe;
bool enabled;
- dev_priv->display.dpms(crtc, mode);
+ if (intel_crtc->dpms_mode == mode)
+ return;
intel_crtc->dpms_mode = mode;
+ intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
+
+ /* When switching on the display, ensure that SR is disabled
+ * with multiple pipes prior to enabling to new pipe.
+ *
+ * When switching off the display, make sure the cursor is
+ * properly hidden prior to disabling the pipe.
+ */
+ if (mode == DRM_MODE_DPMS_ON)
+ intel_update_watermarks(dev);
+ else
+ intel_crtc_update_cursor(crtc);
+
+ dev_priv->display.dpms(crtc, mode);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ intel_crtc_update_cursor(crtc);
+ else
+ intel_update_watermarks(dev);
if (!dev->primary->master)
return;
@@ -2536,6 +2444,20 @@ void intel_encoder_commit (struct drm_encoder *encoder)
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
+void intel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -2867,7 +2789,7 @@ struct cxsr_latency {
unsigned long cursor_hpll_disable;
};
-static struct cxsr_latency cxsr_latency_table[] = {
+static const struct cxsr_latency cxsr_latency_table[] = {
{1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
{1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
{1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
@@ -2905,11 +2827,13 @@ static struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
- int fsb, int mem)
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+ int is_ddr3,
+ int fsb,
+ int mem)
{
+ const struct cxsr_latency *latency;
int i;
- struct cxsr_latency *latency;
if (fsb == 0 || mem == 0)
return NULL;
@@ -2930,13 +2854,9 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
/* deactivate cxsr */
- reg = I915_READ(DSPFW3);
- reg &= ~(PINEVIEW_SELF_REFRESH_EN);
- I915_WRITE(DSPFW3, reg);
- DRM_INFO("Big FIFO is disabled\n");
+ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
}
/*
@@ -3024,12 +2944,12 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
- struct cxsr_latency *latency;
int sr_clock;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
@@ -3075,9 +2995,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */
- reg = I915_READ(DSPFW3);
- reg |= PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, reg);
+ I915_WRITE(DSPFW3,
+ I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
} else {
pineview_disable_cxsr(dev);
@@ -3354,12 +3273,11 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
int line_count;
int planea_htotal = 0, planeb_htotal = 0;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
/* Need htotal for all active display plane */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- if (crtc->enabled) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
if (intel_crtc->plane == 0)
planea_htotal = crtc->mode.htotal;
else
@@ -3519,7 +3437,6 @@ static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
int sr_hdisplay = 0;
unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
int enabled = 0, pixel_size = 0;
@@ -3530,8 +3447,8 @@ static void intel_update_watermarks(struct drm_device *dev)
/* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- if (crtc->enabled) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
enabled++;
if (intel_crtc->plane == 0) {
DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
@@ -3589,10 +3506,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- bool is_edp = false;
+ struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
- struct intel_encoder *intel_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
@@ -3613,12 +3529,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder;
- if (!encoder || encoder->crtc != crtc)
+ if (encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
-
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -3642,7 +3558,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- is_edp = true;
+ has_edp_encoder = intel_encoder;
break;
}
@@ -3720,10 +3636,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int lane = 0, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (is_edp) {
+ if (has_edp_encoder) {
target_clock = mode->clock;
- intel_edp_link_config(intel_encoder,
- &lane, &link_bw);
+ intel_edp_link_config(has_edp_encoder,
+ &lane, &link_bw);
} else {
/* DP over FDI requires target mode clock
instead of link clock */
@@ -3744,7 +3660,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
temp |= PIPE_8BPC;
else
temp |= PIPE_6BPC;
- } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) {
+ } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
switch (dev_priv->edp_bpp/3) {
case 8:
temp |= PIPE_8BPC;
@@ -3817,7 +3733,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(200);
- if (is_edp) {
+ if (has_edp_encoder) {
if (dev_priv->lvds_use_ssc) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3966,9 +3882,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll_reg = pch_dpll_reg;
}
- if (is_edp) {
- ironlake_disable_pll_edp(crtc);
- } else if ((dpll & DPLL_VCO_ENABLE)) {
+ if (!has_edp_encoder) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
I915_READ(dpll_reg);
@@ -4063,7 +3977,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (!is_edp) {
+ if (!has_edp_encoder) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
I915_READ(dpll_reg);
@@ -4142,7 +4056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(link_m1_reg, m_n.link_m);
I915_WRITE(link_n1_reg, m_n.link_n);
- if (is_edp) {
+ if (has_edp_encoder) {
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
} else {
/* enable FDI RX PLL too */
@@ -4167,7 +4081,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(pipeconf_reg, pipeconf);
I915_READ(pipeconf_reg);
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
if (IS_IRONLAKE(dev)) {
/* enable address swizzle for tiling buffer */
@@ -4180,9 +4094,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
-
intel_update_watermarks(dev);
drm_vblank_post_modeset(dev, pipe);
@@ -4216,6 +4127,62 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
}
}
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool visible = base != 0;
+ u32 cntl;
+
+ if (intel_crtc->cursor_visible == visible)
+ return;
+
+ cntl = I915_READ(CURACNTR);
+ if (visible) {
+ /* On these chipsets we can only modify the base whilst
+ * the cursor is disabled.
+ */
+ I915_WRITE(CURABASE, base);
+
+ cntl &= ~(CURSOR_FORMAT_MASK);
+ /* XXX width must be 64, stride 256 => 0x00 << 28 */
+ cntl |= CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB;
+ } else
+ cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+ I915_WRITE(CURACNTR, cntl);
+
+ intel_crtc->cursor_visible = visible;
+}
+
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool visible = base != 0;
+
+ if (intel_crtc->cursor_visible != visible) {
+ uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+ if (base) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ cntl |= pipe << 28; /* Connect to correct pipe */
+ } else {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ }
+ I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+}
+
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc)
{
@@ -4225,12 +4192,12 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
- uint32_t base, pos;
+ u32 base, pos;
bool visible;
pos = 0;
- if (crtc->fb) {
+ if (intel_crtc->cursor_on && crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
base = 0;
@@ -4259,37 +4226,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
pos |= y << CURSOR_Y_SHIFT;
visible = base != 0;
- if (!visible && !intel_crtc->cursor_visble)
+ if (!visible && !intel_crtc->cursor_visible)
return;
I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
- if (intel_crtc->cursor_visble != visible) {
- uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
- if (base) {
- /* Hooray for CUR*CNTR differences */
- if (IS_MOBILE(dev) || IS_I9XX(dev)) {
- cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
- cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
- cntl |= pipe << 28; /* Connect to correct pipe */
- } else {
- cntl &= ~(CURSOR_FORMAT_MASK);
- cntl |= CURSOR_ENABLE;
- cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
- }
- } else {
- if (IS_MOBILE(dev) || IS_I9XX(dev)) {
- cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
- cntl |= CURSOR_MODE_DISABLE;
- } else {
- cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
- }
- }
- I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
-
- intel_crtc->cursor_visble = visible;
- }
- /* and commit changes on next vblank */
- I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+ if (IS_845G(dev) || IS_I865G(dev))
+ i845_update_cursor(crtc, base);
+ else
+ i9xx_update_cursor(crtc, base);
if (visible)
intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
@@ -4354,8 +4298,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = obj_priv->gtt_offset;
} else {
+ int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, bo,
- (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
+ (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
+ align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
@@ -4544,7 +4490,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
encoder_funcs->commit(encoder);
}
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
return crtc;
}
@@ -4749,7 +4695,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg);
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -4793,7 +4739,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg);
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
@@ -4916,15 +4862,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(intel_crtc);
}
-struct intel_unpin_work {
- struct work_struct work;
- struct drm_device *dev;
- struct drm_gem_object *old_fb_obj;
- struct drm_gem_object *pending_flip_obj;
- struct drm_pending_vblank_event *event;
- int pending;
-};
-
static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
@@ -5012,7 +4949,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
- intel_crtc->unpin_work->pending = 1;
+ if ((++intel_crtc->unpin_work->pending) > 1)
+ DRM_ERROR("Prepared flip multiple times\n");
} else {
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
}
@@ -5031,9 +4969,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags, offset;
- int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
- int ret, pipesrc;
- u32 flip_mask;
+ int pipe = intel_crtc->pipe;
+ u32 pf, pipesrc;
+ int ret;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
@@ -5082,34 +5020,73 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
atomic_inc(&obj_priv->pending_flip);
work->pending_flip_obj = obj;
- if (intel_crtc->plane)
- flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
- else
- flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
+ if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ u32 flip_mask;
- /* Wait for any previous flip to finish */
- if (IS_GEN3(dev))
- while (I915_READ(ISR) & flip_mask)
- ;
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
+
+ work->enable_stall_check = true;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = obj_priv->gtt_offset;
- offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
BEGIN_LP_RING(4);
- if (IS_I965G(dev)) {
+ switch(INTEL_INFO(dev)->gen) {
+ case 2:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(offset | obj_priv->tiling_mode);
- pipesrc = I915_READ(pipesrc_reg);
- OUT_RING(pipesrc & 0x0fff0fff);
- } else {
+ OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+ break;
+
+ case 3:
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(offset);
+ OUT_RING(obj_priv->gtt_offset + offset);
OUT_RING(MI_NOOP);
+ break;
+
+ case 4:
+ case 5:
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ break;
+
+ case 6:
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch | obj_priv->tiling_mode);
+ OUT_RING(obj_priv->gtt_offset);
+
+ pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ break;
}
ADVANCE_LP_RING();
@@ -5190,7 +5167,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
intel_crtc->cursor_addr = 0;
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+ intel_crtc->dpms_mode = -1;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_crtc->busy = false;
@@ -5432,37 +5409,37 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
};
static struct drm_gem_object *
-intel_alloc_power_context(struct drm_device *dev)
+intel_alloc_context_page(struct drm_device *dev)
{
- struct drm_gem_object *pwrctx;
+ struct drm_gem_object *ctx;
int ret;
- pwrctx = i915_gem_alloc_object(dev, 4096);
- if (!pwrctx) {
+ ctx = i915_gem_alloc_object(dev, 4096);
+ if (!ctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
}
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(pwrctx, 4096);
+ ret = i915_gem_object_pin(ctx, 4096);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
}
- ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+ ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
if (ret) {
DRM_ERROR("failed to set-domain on power context: %d\n", ret);
goto err_unpin;
}
mutex_unlock(&dev->struct_mutex);
- return pwrctx;
+ return ctx;
err_unpin:
- i915_gem_object_unpin(pwrctx);
+ i915_gem_object_unpin(ctx);
err_unref:
- drm_gem_object_unreference(pwrctx);
+ drm_gem_object_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -5494,7 +5471,6 @@ void ironlake_enable_drps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
- int i = 0;
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
@@ -5538,13 +5514,8 @@ void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
- if (i++ > 100) {
- DRM_ERROR("stuck trying to change perf mode\n");
- break;
- }
- msleep(1);
- }
+ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
+ DRM_ERROR("stuck trying to change perf mode\n");
msleep(1);
ironlake_set_drps(dev, fstart);
@@ -5725,7 +5696,8 @@ void intel_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
- return;
+ if (IS_GEN6(dev))
+ return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5768,6 +5740,31 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
+ if (IS_IRONLAKE_M(dev)) {
+ if (dev_priv->renderctx == NULL)
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (dev_priv->renderctx) {
+ struct drm_i915_gem_object *obj_priv;
+ obj_priv = to_intel_bo(dev_priv->renderctx);
+ if (obj_priv) {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_SET_CONTEXT);
+ OUT_RING(obj_priv->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_FLUSH);
+ ADVANCE_LP_RING();
+ }
+ } else {
+ DRM_DEBUG_KMS("Failed to allocate render context."
+ "Disable RC6\n");
+ return;
+ }
+ }
+
if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_i915_gem_object *obj_priv = NULL;
@@ -5776,7 +5773,7 @@ void intel_init_clock_gating(struct drm_device *dev)
} else {
struct drm_gem_object *pwrctx;
- pwrctx = intel_alloc_power_context(dev);
+ pwrctx = intel_alloc_context_page(dev);
if (pwrctx) {
dev_priv->pwrctx = pwrctx;
obj_priv = to_intel_bo(pwrctx);
@@ -5948,6 +5945,29 @@ static void intel_init_quirks(struct drm_device *dev)
}
}
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 sr1;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(1, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1<<5, VGA_SR_DATA);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ udelay(300);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+}
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5996,6 +6016,9 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_clock_gating(dev);
+ /* Just disable it once at startup */
+ i915_disable_vga(dev);
+
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
@@ -6034,6 +6057,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
+ if (dev_priv->renderctx) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = to_intel_bo(dev_priv->renderctx);
+ I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
+ I915_READ(CCID);
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(dev_priv->renderctx);
+ }
+
if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj_priv;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 40be1fa65be..51d142939a2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,10 +42,11 @@
#define DP_LINK_CONFIGURATION_SIZE 9
-#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
-#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp)
+#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
+#define IS_PCH_eDP(i) ((i)->is_pch_edp)
-struct intel_dp_priv {
+struct intel_dp {
+ struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
@@ -54,40 +55,39 @@ struct intel_dp_priv {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
- struct intel_encoder *intel_encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
};
-static void
-intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
+static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
+}
-static void
-intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
+static void intel_dp_link_train(struct intel_dp *intel_dp);
+static void intel_dp_link_down(struct intel_dp *intel_dp);
void
intel_edp_link_config (struct intel_encoder *intel_encoder,
- int *lane_num, int *link_bw)
+ int *lane_num, int *link_bw)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
- *lane_num = dp_priv->lane_count;
- if (dp_priv->link_bw == DP_LINK_BW_1_62)
+ *lane_num = intel_dp->lane_count;
+ if (intel_dp->link_bw == DP_LINK_BW_1_62)
*link_bw = 162000;
- else if (dp_priv->link_bw == DP_LINK_BW_2_7)
+ else if (intel_dp->link_bw == DP_LINK_BW_2_7)
*link_bw = 270000;
}
static int
-intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
+intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int max_lane_count = 4;
- if (dp_priv->dpcd[0] >= 0x11) {
- max_lane_count = dp_priv->dpcd[2] & 0x1f;
+ if (intel_dp->dpcd[0] >= 0x11) {
+ max_lane_count = intel_dp->dpcd[2] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
@@ -99,10 +99,9 @@ intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
}
static int
-intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- int max_link_bw = dp_priv->dpcd[1];
+ int max_link_bw = intel_dp->dpcd[1];
switch (max_link_bw) {
case DP_LINK_BW_1_62:
@@ -126,13 +125,11 @@ intel_dp_link_clock(uint8_t link_bw)
/* I think this is a fiction */
static int
-intel_dp_link_required(struct drm_device *dev,
- struct intel_encoder *intel_encoder, int pixel_clock)
+intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv))
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
return (pixel_clock * dev_priv->edp_bpp) / 8;
else
return pixel_clock * 3;
@@ -149,14 +146,13 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
- int max_lanes = intel_dp_max_lane_count(intel_encoder);
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+ int max_lanes = intel_dp_max_lane_count(intel_dp);
- if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+ if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
dev_priv->panel_fixed_mode) {
if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
return MODE_PANEL;
@@ -167,8 +163,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
which are outside spec tolerances but somehow work by magic */
- if (!IS_eDP(intel_encoder) &&
- (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
+ if (!IS_eDP(intel_dp) &&
+ (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
@@ -232,19 +228,17 @@ intel_hrawclk(struct drm_device *dev)
}
static int
-intel_dp_aux_ch(struct intel_encoder *intel_encoder,
+intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- uint32_t output_reg = dp_priv->output_reg;
- struct drm_device *dev = intel_encoder->enc.dev;
+ uint32_t output_reg = intel_dp->output_reg;
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
int i;
int recv_bytes;
- uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
int try, precharge;
@@ -253,7 +247,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_dp)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
@@ -268,41 +262,43 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
else
precharge = 5;
+ if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+ DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
+ return -EBUSY;
+ }
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4) {
- uint32_t d = pack_aux(send + i, send_bytes - i);
-
- I915_WRITE(ch_data + i, d);
- }
-
- ctl = (DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
+ for (i = 0; i < send_bytes; i += 4)
+ I915_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
/* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl, ctl);
- (void) I915_READ(ch_ctl);
+ I915_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
for (;;) {
- udelay(100);
status = I915_READ(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
+ udelay(100);
}
/* Clear done status and any errors */
- I915_WRITE(ch_ctl, (status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR));
- (void) I915_READ(ch_ctl);
- if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
+ I915_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ if (status & DP_AUX_CH_CTL_DONE)
break;
}
@@ -329,22 +325,19 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
/* Unload any bytes sent back from the other side */
recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
if (recv_bytes > recv_size)
recv_bytes = recv_size;
- for (i = 0; i < recv_bytes; i += 4) {
- uint32_t d = I915_READ(ch_data + i);
-
- unpack_aux(d, recv + i, recv_bytes - i);
- }
+ for (i = 0; i < recv_bytes; i += 4)
+ unpack_aux(I915_READ(ch_data + i),
+ recv + i, recv_bytes - i);
return recv_bytes;
}
/* Write data to the aux channel in native mode */
static int
-intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
+intel_dp_aux_native_write(struct intel_dp *intel_dp,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
@@ -361,7 +354,7 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
- ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -376,15 +369,15 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
/* Write a single byte to the aux channel in native mode */
static int
-intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
+intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
uint16_t address, uint8_t byte)
{
- return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
+ return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
}
/* read bytes from a native aux channel */
static int
-intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
+intel_dp_aux_native_read(struct intel_dp *intel_dp,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
@@ -403,7 +396,7 @@ intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
reply_bytes = recv_bytes + 1;
for (;;) {
- ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
@@ -426,10 +419,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- struct intel_dp_priv *dp_priv = container_of(adapter,
- struct intel_dp_priv,
- adapter);
- struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
+ struct intel_dp *intel_dp = container_of(adapter,
+ struct intel_dp,
+ adapter);
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
@@ -468,7 +460,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
for (;;) {
- ret = intel_dp_aux_ch(intel_encoder,
+ ret = intel_dp_aux_ch(intel_dp,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
@@ -496,57 +488,42 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-intel_dp_i2c_init(struct intel_encoder *intel_encoder,
+intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
DRM_DEBUG_KMS("i2c_init %s\n", name);
- dp_priv->algo.running = false;
- dp_priv->algo.address = 0;
- dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
-
- memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
- dp_priv->adapter.owner = THIS_MODULE;
- dp_priv->adapter.class = I2C_CLASS_DDC;
- strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
- dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
- dp_priv->adapter.algo_data = &dp_priv->algo;
- dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
-
- return i2c_dp_aux_add_bus(&dp_priv->adapter);
+ intel_dp->algo.running = false;
+ intel_dp->algo.address = 0;
+ intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
+
+ memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+ intel_dp->adapter.owner = THIS_MODULE;
+ intel_dp->adapter.class = I2C_CLASS_DDC;
+ strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+ intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+ intel_dp->adapter.algo_data = &intel_dp->algo;
+ intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
+
+ return i2c_dp_aux_add_bus(&intel_dp->adapter);
}
static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_encoder);
- int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
+ int max_lane_count = intel_dp_max_lane_count(intel_dp);
+ int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+ if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
dev_priv->panel_fixed_mode) {
- struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
-
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
-
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
-
- adjusted_mode->clock = fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
-
+ intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+ intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+ mode, adjusted_mode);
/*
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
@@ -558,31 +535,33 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
+ if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
<= link_avail) {
- dp_priv->link_bw = bws[clock];
- dp_priv->lane_count = lane_count;
- adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = lane_count;
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Display port link bw %02x lane "
"count %d clock %d\n",
- dp_priv->link_bw, dp_priv->lane_count,
+ intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
return true;
}
}
}
- if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
/* okay we failed just pick the highest */
- dp_priv->lane_count = max_lane_count;
- dp_priv->link_bw = bws[max_clock];
- adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+ intel_dp->lane_count = max_lane_count;
+ intel_dp->link_bw = bws[max_clock];
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
"count %d clock %d\n",
- dp_priv->link_bw, dp_priv->lane_count,
+ intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
+
return true;
}
+
return false;
}
@@ -626,17 +605,14 @@ bool intel_pch_has_edp(struct drm_crtc *crtc)
struct drm_encoder *encoder;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_encoder *intel_encoder;
- struct intel_dp_priv *dp_priv;
+ struct intel_dp *intel_dp;
- if (!encoder || encoder->crtc != crtc)
+ if (encoder->crtc != crtc)
continue;
- intel_encoder = enc_to_intel_encoder(encoder);
- dp_priv = intel_encoder->dev_priv;
-
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT)
- return dp_priv->is_pch_edp;
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+ return intel_dp->is_pch_edp;
}
return false;
}
@@ -657,18 +633,15 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* Find the lane count in the intel_encoder private
*/
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_encoder *intel_encoder;
- struct intel_dp_priv *dp_priv;
+ struct intel_dp *intel_dp;
if (encoder->crtc != crtc)
continue;
- intel_encoder = enc_to_intel_encoder(encoder);
- dp_priv = intel_encoder->dev_priv;
-
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
- lane_count = dp_priv->lane_count;
- if (IS_PCH_eDP(dp_priv))
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = intel_dp->lane_count;
+ if (IS_PCH_eDP(intel_dp))
bpp = dev_priv->edp_bpp;
break;
}
@@ -724,107 +697,114 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- struct drm_crtc *crtc = intel_encoder->enc.crtc;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_crtc *crtc = intel_dp->base.enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- dp_priv->DP = (DP_VOLTAGE_0_4 |
+ intel_dp->DP = (DP_VOLTAGE_0_4 |
DP_PRE_EMPHASIS_0);
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- dp_priv->DP |= DP_SYNC_HS_HIGH;
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- dp_priv->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
- dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
else
- dp_priv->DP |= DP_LINK_TRAIN_OFF;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
- switch (dp_priv->lane_count) {
+ switch (intel_dp->lane_count) {
case 1:
- dp_priv->DP |= DP_PORT_WIDTH_1;
+ intel_dp->DP |= DP_PORT_WIDTH_1;
break;
case 2:
- dp_priv->DP |= DP_PORT_WIDTH_2;
+ intel_dp->DP |= DP_PORT_WIDTH_2;
break;
case 4:
- dp_priv->DP |= DP_PORT_WIDTH_4;
+ intel_dp->DP |= DP_PORT_WIDTH_4;
break;
}
- if (dp_priv->has_audio)
- dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ if (intel_dp->has_audio)
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- dp_priv->link_configuration[0] = dp_priv->link_bw;
- dp_priv->link_configuration[1] = dp_priv->lane_count;
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
- if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
- dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- dp_priv->DP |= DP_ENHANCED_FRAMING;
+ if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
}
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
- dp_priv->DP |= DP_PIPEB_SELECT;
+ intel_dp->DP |= DP_PIPEB_SELECT;
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_dp)) {
/* don't miss out required setting for eDP */
- dp_priv->DP |= DP_PLL_ENABLE;
+ intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
- dp_priv->DP |= DP_PLL_FREQ_160MHZ;
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
- dp_priv->DP |= DP_PLL_FREQ_270MHZ;
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
}
static void ironlake_edp_panel_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long timeout = jiffies + msecs_to_jiffies(5000);
- u32 pp, pp_status;
+ u32 pp;
- pp_status = I915_READ(PCH_PP_STATUS);
- if (pp_status & PP_ON)
+ if (I915_READ(PCH_PP_STATUS) & PP_ON)
return;
pp = I915_READ(PCH_PP_CONTROL);
+
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
- do {
- pp_status = I915_READ(PCH_PP_STATUS);
- } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
- if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
+ if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
+ DRM_ERROR("panel on wait timed out: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS));
pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_panel_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long timeout = jiffies + msecs_to_jiffies(5000);
- u32 pp, pp_status;
+ u32 pp;
pp = I915_READ(PCH_PP_CONTROL);
+
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
- do {
- pp_status = I915_READ(PCH_PP_STATUS);
- } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
- if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("panel off wait timed out\n");
+ if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
+ DRM_ERROR("panel off wait timed out: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS));
/* Make sure VDD is enabled so DP AUX will work */
- pp |= EDP_FORCE_VDD;
+ pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -849,33 +829,87 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
I915_WRITE(PCH_PP_CONTROL, pp);
}
+static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("\n");
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+}
+
+static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+ udelay(200);
+}
+
+static void intel_dp_prepare(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (IS_eDP(intel_dp)) {
+ ironlake_edp_backlight_off(dev);
+ ironlake_edp_panel_on(dev);
+ ironlake_edp_pll_on(encoder);
+ }
+ if (dp_reg & DP_PORT_EN)
+ intel_dp_link_down(intel_dp);
+}
+
+static void intel_dp_commit(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (!(dp_reg & DP_PORT_EN)) {
+ intel_dp_link_train(intel_dp);
+ }
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ ironlake_edp_backlight_on(dev);
+}
+
static void
intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(dp_priv->output_reg);
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
- if (dp_reg & DP_PORT_EN) {
- intel_dp_link_down(intel_encoder, dp_priv->DP);
- if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
- ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_off(dev);
- }
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ ironlake_edp_backlight_off(dev);
+ ironlake_edp_panel_off(dev);
}
+ if (dp_reg & DP_PORT_EN)
+ intel_dp_link_down(intel_dp);
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ ironlake_edp_pll_off(encoder);
} else {
if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
- if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_panel_on(dev);
+ intel_dp_link_train(intel_dp);
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_backlight_on(dev);
- }
}
}
- dp_priv->dpms_mode = mode;
+ intel_dp->dpms_mode = mode;
}
/*
@@ -883,12 +917,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_encoder *intel_encoder,
+intel_dp_get_link_status(struct intel_dp *intel_dp,
uint8_t link_status[DP_LINK_STATUS_SIZE])
{
int ret;
- ret = intel_dp_aux_native_read(intel_encoder,
+ ret = intel_dp_aux_native_read(intel_dp,
DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
@@ -965,7 +999,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
-intel_get_adjust_train(struct intel_encoder *intel_encoder,
+intel_get_adjust_train(struct intel_dp *intel_dp,
uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane_count,
uint8_t train_set[4])
@@ -1101,27 +1135,27 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
}
static bool
-intel_dp_set_link_train(struct intel_encoder *intel_encoder,
+intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat,
uint8_t train_set[4],
bool first)
{
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
int ret;
- I915_WRITE(dp_priv->output_reg, dp_reg_value);
- POSTING_READ(dp_priv->output_reg);
+ I915_WRITE(intel_dp->output_reg, dp_reg_value);
+ POSTING_READ(intel_dp->output_reg);
if (first)
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
- intel_dp_aux_native_write_1(intel_encoder,
+ intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
- ret = intel_dp_aux_native_write(intel_encoder,
+ ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET, train_set, 4);
if (ret != 4)
return false;
@@ -1130,12 +1164,10 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
}
static void
-intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
+intel_dp_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int i;
@@ -1145,13 +1177,15 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
bool first = true;
int tries;
u32 reg;
+ uint32_t DP = intel_dp->DP;
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
- link_configuration, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -1162,39 +1196,39 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
- if (!intel_dp_set_link_train(intel_encoder, reg,
+ if (!intel_dp_set_link_train(intel_dp, reg,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_encoder, link_status))
+ if (!intel_dp_get_link_status(intel_dp, link_status))
break;
- if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
+ if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
clock_recovery = true;
break;
}
/* Check to see if we've tried the max voltage */
- for (i = 0; i < dp_priv->lane_count; i++)
+ for (i = 0; i < intel_dp->lane_count; i++)
if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == dp_priv->lane_count)
+ if (i == intel_dp->lane_count)
break;
/* Check to see if we've tried the same voltage 5 times */
@@ -1207,7 +1241,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
+ intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
}
/* channel equalization */
@@ -1217,30 +1251,30 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_encoder, reg,
+ if (!intel_dp_set_link_train(intel_dp, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_encoder, link_status))
+ if (!intel_dp_get_link_status(intel_dp, link_status))
break;
- if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
+ if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
channel_eq = true;
break;
}
@@ -1250,53 +1284,53 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
break;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
+ intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
++tries;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
- I915_WRITE(dp_priv->output_reg, reg);
- POSTING_READ(dp_priv->output_reg);
- intel_dp_aux_native_write_1(intel_encoder,
+ I915_WRITE(intel_dp->output_reg, reg);
+ POSTING_READ(intel_dp->output_reg);
+ intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
}
static void
-intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
+intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ uint32_t DP = intel_dp->DP;
DRM_DEBUG_KMS("\n");
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_dp)) {
DP &= ~DP_PLL_ENABLE;
- I915_WRITE(dp_priv->output_reg, DP);
- POSTING_READ(dp_priv->output_reg);
+ I915_WRITE(intel_dp->output_reg, DP);
+ POSTING_READ(intel_dp->output_reg);
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
- POSTING_READ(dp_priv->output_reg);
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ POSTING_READ(intel_dp->output_reg);
} else {
DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(dp_priv->output_reg);
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ POSTING_READ(intel_dp->output_reg);
}
udelay(17000);
- if (IS_eDP(intel_encoder))
+ if (IS_eDP(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
- I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
- POSTING_READ(dp_priv->output_reg);
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
}
/*
@@ -1309,41 +1343,39 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
*/
static void
-intel_dp_check_link_status(struct intel_encoder *intel_encoder)
+intel_dp_check_link_status(struct intel_dp *intel_dp)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t link_status[DP_LINK_STATUS_SIZE];
- if (!intel_encoder->enc.crtc)
+ if (!intel_dp->base.enc.crtc)
return;
- if (!intel_dp_get_link_status(intel_encoder, link_status)) {
- intel_dp_link_down(intel_encoder, dp_priv->DP);
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ intel_dp_link_down(intel_dp);
return;
}
- if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
- intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
+ if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
+ intel_dp_link_train(intel_dp);
}
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum drm_connector_status status;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_encoder,
- 0x000, dp_priv->dpcd,
- sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
+ if (intel_dp_aux_native_read(intel_dp,
+ 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
- if (dp_priv->dpcd[0] != 0)
+ if (intel_dp->dpcd[0] != 0)
status = connector_status_connected;
}
- DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
- dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
+ DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
+ intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
return status;
}
@@ -1357,19 +1389,18 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
enum drm_connector_status status;
- dp_priv->has_audio = false;
+ intel_dp->has_audio = false;
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
- switch (dp_priv->output_reg) {
+ switch (intel_dp->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
break;
@@ -1389,11 +1420,11 @@ intel_dp_detect(struct drm_connector *connector)
return connector_status_disconnected;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_encoder,
- 0x000, dp_priv->dpcd,
- sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
+ if (intel_dp_aux_native_read(intel_dp,
+ 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
- if (dp_priv->dpcd[0] != 0)
+ if (intel_dp->dpcd[0] != 0)
status = connector_status_connected;
}
return status;
@@ -1402,18 +1433,17 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
if (ret) {
- if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+ if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
!dev_priv->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
@@ -1430,7 +1460,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1452,9 +1482,9 @@ intel_dp_destroy (struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
- .prepare = intel_encoder_prepare,
+ .prepare = intel_dp_prepare,
.mode_set = intel_dp_mode_set,
- .commit = intel_encoder_commit,
+ .commit = intel_dp_commit,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -1470,27 +1500,17 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
.best_encoder = intel_attached_encoder,
};
-static void intel_dp_enc_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
- .destroy = intel_dp_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
- if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
- intel_dp_check_link_status(intel_encoder);
+ if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
+ intel_dp_check_link_status(intel_dp);
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -1500,18 +1520,18 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
- struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_dp *intel_dp;
+
if (encoder->crtc != crtc)
continue;
- intel_encoder = enc_to_intel_encoder(encoder);
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- return dp_priv->output_reg;
- }
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+ return intel_dp->output_reg;
}
+
return -1;
}
@@ -1540,30 +1560,28 @@ intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
+ struct intel_dp *intel_dp;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- struct intel_dp_priv *dp_priv;
const char *name = NULL;
int type;
- intel_encoder = kcalloc(sizeof(struct intel_encoder) +
- sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
- if (!intel_encoder)
+ intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
+ if (!intel_dp)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(intel_dp);
return;
}
+ intel_encoder = &intel_dp->base;
- dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
-
- if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D))
+ if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
if (intel_dpd_is_edp(dev))
- dp_priv->is_pch_edp = true;
+ intel_dp->is_pch_edp = true;
- if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
+ if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
@@ -1584,18 +1602,16 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_encoder))
+ if (IS_eDP(intel_dp))
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- dp_priv->intel_encoder = intel_encoder;
- dp_priv->output_reg = output_reg;
- dp_priv->has_audio = false;
- dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
- intel_encoder->dev_priv = dp_priv;
+ intel_dp->output_reg = output_reg;
+ intel_dp->has_audio = false;
+ intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
@@ -1630,12 +1646,12 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_encoder, intel_connector, name);
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
- intel_encoder->ddc_bus = &dp_priv->adapter;
+ intel_encoder->ddc_bus = &intel_dp->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
- if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
+ if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
/* initialize panel mode from VBT if available for eDP */
if (dev_priv->lfp_lvds_vbt_mode) {
dev_priv->panel_fixed_mode =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b2190148703..ad312ca6b3e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,6 +32,20 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+
+#define wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W) msleep(W); \
+ } \
+ ret__; \
+})
+
/*
* Display related stuff
*/
@@ -102,7 +116,6 @@ struct intel_encoder {
struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
- void *dev_priv;
void (*hot_plug)(struct intel_encoder *);
int crtc_mask;
int clone_mask;
@@ -110,7 +123,6 @@ struct intel_encoder {
struct intel_connector {
struct drm_connector base;
- void *dev_priv;
};
struct intel_crtc;
@@ -156,7 +168,7 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
- bool cursor_visble;
+ bool cursor_visible, cursor_on;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -164,6 +176,16 @@ struct intel_crtc {
#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+struct intel_unpin_work {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_gem_object *old_fb_obj;
+ struct drm_gem_object *pending_flip_obj;
+ struct drm_pending_vblank_event *event;
+ int pending;
+ bool enable_stall_check;
+};
+
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
@@ -188,10 +210,18 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
+extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_pch_panel_fitting(struct drm_device *dev,
+ int fitting_mode,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
+extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
@@ -199,7 +229,8 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void intel_wait_for_vblank(struct drm_device *dev);
+extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
+extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 227feca7cf8..a399f4b2c1c 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -38,7 +38,7 @@
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
-static struct intel_dvo_device intel_dvo_devices[] = {
+static const struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
@@ -77,20 +77,33 @@ static struct intel_dvo_device intel_dvo_devices[] = {
}
};
+struct intel_dvo {
+ struct intel_encoder base;
+
+ struct intel_dvo_device dev;
+
+ struct drm_display_mode *panel_fixed_mode;
+ bool panel_wants_dither;
+};
+
+static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
+}
+
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
- u32 dvo_reg = dvo->dvo_reg;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
if (mode == DRM_MODE_DPMS_ON) {
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
- dvo->dev_ops->dpms(dvo, mode);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
} else {
- dvo->dev_ops->dpms(dvo, mode);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
I915_READ(dvo_reg);
}
@@ -100,38 +113,36 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* XXX: Validate clock range */
- if (dvo->panel_fixed_mode) {
- if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
+ if (intel_dvo->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
- return dvo->dev_ops->mode_valid(dvo, mode);
+ return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- if (dvo->panel_fixed_mode != NULL) {
-#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
+ if (intel_dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
C(hdisplay);
C(hsync_start);
C(hsync_end);
@@ -145,8 +156,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
#undef C
}
- if (dvo->dev_ops->mode_fixup)
- return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
+ if (intel_dvo->dev.dev_ops->mode_fixup)
+ return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
return true;
}
@@ -158,11 +169,10 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
int pipe = intel_crtc->pipe;
u32 dvo_val;
- u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
+ u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
switch (dvo_reg) {
@@ -178,7 +188,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
break;
}
- dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
@@ -214,40 +224,38 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
- return dvo->dev_ops->detect(dvo);
+ return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
-
- if (dvo->panel_fixed_mode != NULL) {
+ if (intel_dvo->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
+ mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
}
+
return 0;
}
-static void intel_dvo_destroy (struct drm_connector *connector)
+static void intel_dvo_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -277,28 +285,20 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- if (dvo) {
- if (dvo->dev_ops->destroy)
- dvo->dev_ops->destroy(dvo);
- if (dvo->panel_fixed_mode)
- kfree(dvo->panel_fixed_mode);
- }
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+
+ if (intel_dvo->dev.dev_ops->destroy)
+ intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
+
+ kfree(intel_dvo->panel_fixed_mode);
+
+ intel_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
-
/**
* Attempts to get a fixed panel timing for LVDS (currently only the i830).
*
@@ -306,15 +306,13 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
* chip being on DVOB/C and having multiple pipes.
*/
static struct drm_display_mode *
-intel_dvo_get_current_mode (struct drm_connector *connector)
+intel_dvo_get_current_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
- uint32_t dvo_reg = dvo->dvo_reg;
- uint32_t dvo_val = I915_READ(dvo_reg);
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
struct drm_display_mode *mode = NULL;
/* If the DVO port is active, that'll be the LVDS, so we can pull out
@@ -327,7 +325,6 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
crtc = intel_get_crtc_from_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
-
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -337,28 +334,32 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
}
}
}
+
return mode;
}
void intel_dvo_init(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
+ struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
- struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
- intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
- if (!intel_encoder)
+
+ intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
+ if (!intel_dvo)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(intel_dvo);
return;
}
+ intel_encoder = &intel_dvo->base;
+
/* Set up the DDC bus */
intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_encoder->ddc_bus)
@@ -367,10 +368,9 @@ void intel_dvo_init(struct drm_device *dev)
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
int gpio;
- dvo = &intel_dvo_devices[i];
-
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
@@ -393,11 +393,8 @@ void intel_dvo_init(struct drm_device *dev)
continue;
}
- if (dvo->dev_ops!= NULL)
- ret = dvo->dev_ops->init(dvo, i2cbus);
- else
- ret = false;
-
+ intel_dvo->dev = *dvo;
+ ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
if (!ret)
continue;
@@ -429,9 +426,6 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- intel_encoder->dev_priv = dvo;
- intel_encoder->i2c_bus = i2cbus;
-
drm_encoder_init(dev, &intel_encoder->enc,
&intel_dvo_enc_funcs, encoder_type);
drm_encoder_helper_add(&intel_encoder->enc,
@@ -447,9 +441,9 @@ void intel_dvo_init(struct drm_device *dev)
* headers, likely), so for now, just get the current
* mode being output through DVO.
*/
- dvo->panel_fixed_mode =
+ intel_dvo->panel_fixed_mode =
intel_dvo_get_current_mode(connector);
- dvo->panel_wants_dither = true;
+ intel_dvo->panel_wants_dither = true;
}
drm_sysfs_connector_add(connector);
@@ -461,6 +455,6 @@ void intel_dvo_init(struct drm_device *dev)
if (i2cbus != NULL)
intel_i2c_destroy(i2cbus);
free_intel:
- kfree(intel_encoder);
+ kfree(intel_dvo);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 197887ed182..ccd4c97e652 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,11 +37,17 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_hdmi_priv {
+struct intel_hdmi {
+ struct intel_encoder base;
u32 sdvox_reg;
bool has_hdmi_sink;
};
+static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
+}
+
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -50,8 +56,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
@@ -60,7 +65,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
- if (hdmi_priv->has_hdmi_sink) {
+ if (intel_hdmi->has_hdmi_sink) {
sdvox |= SDVO_AUDIO_ENABLE;
if (HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT;
@@ -73,26 +78,25 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox |= SDVO_PIPE_B_SELECT;
}
- I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
+ POSTING_READ(intel_hdmi->sdvox_reg);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = I915_READ(hdmi_priv->sdvox_reg);
+ temp = I915_READ(intel_hdmi->sdvox_reg);
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->sdvox_reg);
}
if (mode != DRM_MODE_DPMS_ON) {
@@ -101,15 +105,15 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
temp |= SDVO_ENABLE;
}
- I915_WRITE(hdmi_priv->sdvox_reg, temp);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(hdmi_priv->sdvox_reg, temp);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
}
}
@@ -138,19 +142,17 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
- hdmi_priv->has_hdmi_sink = false;
- edid = drm_get_edid(connector,
- intel_encoder->ddc_bus);
+ intel_hdmi->has_hdmi_sink = false;
+ edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
connector->display_info.raw_edid = NULL;
kfree(edid);
@@ -162,13 +164,13 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -199,18 +201,8 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
.best_encoder = intel_attached_encoder,
};
-static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
- .destroy = intel_hdmi_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
@@ -219,21 +211,19 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- struct intel_hdmi_priv *hdmi_priv;
+ struct intel_hdmi *intel_hdmi;
- intel_encoder = kcalloc(sizeof(struct intel_encoder) +
- sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
- if (!intel_encoder)
+ intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
+ if (!intel_hdmi)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(intel_hdmi);
return;
}
- hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
-
+ intel_encoder = &intel_hdmi->base;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
@@ -274,8 +264,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (!intel_encoder->ddc_bus)
goto err_connector;
- hdmi_priv->sdvox_reg = sdvox_reg;
- intel_encoder->dev_priv = hdmi_priv;
+ intel_hdmi->sdvox_reg = sdvox_reg;
drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
@@ -298,7 +287,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(intel_hdmi);
kfree(intel_connector);
return;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0a2e60059fb..b819c108114 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -41,12 +41,18 @@
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
-struct intel_lvds_priv {
+struct intel_lvds {
+ struct intel_encoder base;
int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
};
+static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
+}
+
/**
* Sets the backlight level.
*
@@ -90,7 +96,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_status, ctl_reg, status_reg, lvds_reg;
+ u32 ctl_reg, status_reg, lvds_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
@@ -108,9 +114,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
POWER_TARGET_ON);
- do {
- pp_status = I915_READ(status_reg);
- } while ((pp_status & PP_ON) == 0);
+ if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
+ DRM_ERROR("timed out waiting to enable LVDS pipe");
intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
} else {
@@ -118,9 +123,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
~POWER_TARGET_ON);
- do {
- pp_status = I915_READ(status_reg);
- } while (pp_status & PP_ON);
+ if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
+ DRM_ERROR("timed out waiting for LVDS pipe to turn off");
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
@@ -219,9 +223,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
/* Should never happen!! */
@@ -241,26 +244,20 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
/* If we don't have a panel mode, there is nothing we can do */
if (dev_priv->panel_fixed_mode == NULL)
return true;
+
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
- adjusted_mode->hsync_start =
- dev_priv->panel_fixed_mode->hsync_start;
- adjusted_mode->hsync_end =
- dev_priv->panel_fixed_mode->hsync_end;
- adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
- adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
- adjusted_mode->vsync_start =
- dev_priv->panel_fixed_mode->vsync_start;
- adjusted_mode->vsync_end =
- dev_priv->panel_fixed_mode->vsync_end;
- adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
- adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+ mode, adjusted_mode);
+ return true;
+ }
/* Make sure pre-965s set dither correctly */
if (!IS_I965G(dev)) {
@@ -273,10 +270,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
- /* full screen scale for now */
- if (HAS_PCH_SPLIT(dev))
- goto out;
-
/* 965+ wants fuzzy fitting */
if (IS_I965G(dev))
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
@@ -288,12 +281,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- if (!HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BCLRPAT_A, 0);
- I915_WRITE(BCLRPAT_B, 0);
- }
+ I915_WRITE(BCLRPAT_A, 0);
+ I915_WRITE(BCLRPAT_B, 0);
- switch (lvds_priv->fitting_mode) {
+ switch (intel_lvds->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
@@ -378,8 +369,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
- lvds_priv->pfit_control = pfit_control;
- lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
+ intel_lvds->pfit_control = pfit_control;
+ intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
dev_priv->lvds_border_bits = border;
/*
@@ -427,8 +418,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+ struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
/*
* The LVDS pin pair will already have been turned on in the
@@ -444,8 +434,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
- I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
}
/**
@@ -600,18 +590,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
struct drm_encoder *encoder = connector->encoder;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+ struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
}
- if (lvds_priv->fitting_mode == value) {
+ if (intel_lvds->fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
- lvds_priv->fitting_mode = value;
+ intel_lvds->fitting_mode = value;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
@@ -647,19 +636,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.destroy = intel_lvds_destroy,
};
-
-static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
- .destroy = intel_lvds_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
@@ -843,13 +821,13 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds *intel_lvds;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
- struct intel_lvds_priv *lvds_priv;
u32 lvds;
int pipe, gpio = GPIOC;
@@ -872,20 +850,20 @@ void intel_lvds_init(struct drm_device *dev)
gpio = PCH_GPIOC;
}
- intel_encoder = kzalloc(sizeof(struct intel_encoder) +
- sizeof(struct intel_lvds_priv), GFP_KERNEL);
- if (!intel_encoder) {
+ intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
+ if (!intel_lvds) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(intel_lvds);
return;
}
- connector = &intel_connector->base;
+ intel_encoder = &intel_lvds->base;
encoder = &intel_encoder->enc;
+ connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -905,8 +883,6 @@ void intel_lvds_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
- intel_encoder->dev_priv = lvds_priv;
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
/*
@@ -916,7 +892,7 @@ void intel_lvds_init(struct drm_device *dev)
drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
- lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT;
+ intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -1024,6 +1000,6 @@ failed:
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
+ kfree(intel_lvds);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d39aea24eab..1d306a458be 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,6 +25,8 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
+
+#include <linux/seq_file.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -1367,7 +1369,8 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->flip_addr = overlay->reg_bo->gtt_offset;
} else {
ret = i915_gem_attach_phys_object(dev, reg_bo,
- I915_GEM_PHYS_OVERLAY_REGS);
+ I915_GEM_PHYS_OVERLAY_REGS,
+ 0);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
@@ -1416,3 +1419,99 @@ void intel_cleanup_overlay(struct drm_device *dev)
kfree(dev_priv->overlay);
}
}
+
+struct intel_overlay_error_state {
+ struct overlay_registers regs;
+ unsigned long base;
+ u32 dovsta;
+ u32 isr;
+};
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay_error_state *error;
+ struct overlay_registers __iomem *regs;
+
+ if (!overlay || !overlay->active)
+ return NULL;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ error->dovsta = I915_READ(DOVSTA);
+ error->isr = I915_READ(ISR);
+ if (OVERLAY_NONPHYSICAL(overlay->dev))
+ error->base = (long) overlay->reg_bo->gtt_offset;
+ else
+ error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs)
+ goto err;
+
+ memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ return error;
+
+err:
+ kfree(error);
+ return NULL;
+}
+
+void
+intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
+{
+ seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+ error->dovsta, error->isr);
+ seq_printf(m, " Register file at 0x%08lx:\n",
+ error->base);
+
+#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x)
+ P(OBUF_0Y);
+ P(OBUF_1Y);
+ P(OBUF_0U);
+ P(OBUF_0V);
+ P(OBUF_1U);
+ P(OBUF_1V);
+ P(OSTRIDE);
+ P(YRGB_VPH);
+ P(UV_VPH);
+ P(HORZ_PH);
+ P(INIT_PHS);
+ P(DWINPOS);
+ P(DWINSZ);
+ P(SWIDTH);
+ P(SWIDTHSW);
+ P(SHEIGHT);
+ P(YRGBSCALE);
+ P(UVSCALE);
+ P(OCLRC0);
+ P(OCLRC1);
+ P(DCLRKV);
+ P(DCLRKM);
+ P(SCLRKVH);
+ P(SCLRKVL);
+ P(SCLRKEN);
+ P(OCONFIG);
+ P(OCMD);
+ P(OSTART_0Y);
+ P(OSTART_1Y);
+ P(OSTART_0U);
+ P(OSTART_0V);
+ P(OSTART_1U);
+ P(OSTART_1V);
+ P(OTILEOFF_0Y);
+ P(OTILEOFF_1Y);
+ P(OTILEOFF_0U);
+ P(OTILEOFF_0V);
+ P(OTILEOFF_1U);
+ P(OTILEOFF_1V);
+ P(FASTHSCALE);
+ P(UVSCALEV);
+#undef P
+}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
new file mode 100644
index 00000000000..e7f5299d9d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright © 2006-2010 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include "intel_drv.h"
+
+void
+intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+void
+intel_pch_panel_fitting(struct drm_device *dev,
+ int fitting_mode,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int x, y, width, height;
+
+ x = y = width = height = 0;
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay)
+ goto done;
+
+ switch (fitting_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ width = mode->hdisplay;
+ height = mode->vdisplay;
+ x = (adjusted_mode->hdisplay - width + 1)/2;
+ y = (adjusted_mode->vdisplay - height + 1)/2;
+ break;
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+ if (scaled_width > scaled_height) { /* pillar */
+ width = scaled_height / mode->vdisplay;
+ x = (adjusted_mode->hdisplay - width + 1) / 2;
+ y = 0;
+ height = adjusted_mode->vdisplay;
+ } else if (scaled_width < scaled_height) { /* letter */
+ height = scaled_width / mode->hdisplay;
+ y = (adjusted_mode->vdisplay - height + 1) / 2;
+ x = 0;
+ width = adjusted_mode->hdisplay;
+ } else {
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ }
+ }
+ break;
+
+ default:
+ case DRM_MODE_SCALE_FULLSCREEN:
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ break;
+ }
+
+done:
+ dev_priv->pch_pf_pos = (x << 16) | y;
+ dev_priv->pch_pf_size = (width << 16) | height;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 26362f8495a..cb3508f78bc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,18 +33,35 @@
#include "i915_drm.h"
#include "i915_trace.h"
+static u32 i915_gem_get_seqno(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 seqno;
+
+ seqno = dev_priv->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++dev_priv->next_seqno == 0)
+ dev_priv->next_seqno = 1;
+
+ return seqno;
+}
+
static void
render_ring_flush(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 cmd;
+
#if WATCH_EXEC
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
- u32 cmd;
- trace_i915_gem_request_flush(dev, ring->next_seqno,
+
+ trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
invalidate_domains, flush_domains);
if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
@@ -203,9 +220,13 @@ static int init_render_ring(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = init_ring_common(dev, ring);
+ int mode;
+
if (IS_I9XX(dev) && !IS_GEN3(dev)) {
- I915_WRITE(MI_MODE,
- (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+ mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ if (IS_GEN6(dev))
+ mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+ I915_WRITE(MI_MODE, mode);
}
return ret;
}
@@ -233,9 +254,10 @@ render_ring_add_request(struct drm_device *dev,
struct drm_file *file_priv,
u32 flush_domains)
{
- u32 seqno;
drm_i915_private_t *dev_priv = dev->dev_private;
- seqno = intel_ring_get_seqno(dev, ring);
+ u32 seqno;
+
+ seqno = i915_gem_get_seqno(dev);
if (IS_GEN6(dev)) {
BEGIN_LP_RING(6);
@@ -405,7 +427,9 @@ bsd_ring_add_request(struct drm_device *dev,
u32 flush_domains)
{
u32 seqno;
- seqno = intel_ring_get_seqno(dev, ring);
+
+ seqno = i915_gem_get_seqno(dev);
+
intel_ring_begin(dev, ring, 4);
intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(dev, ring,
@@ -479,7 +503,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
- trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+ trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
count = nbox ? nbox : 1;
@@ -515,7 +539,16 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
+ if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring, MI_FLUSH |
+ MI_NO_WRITE_FLUSH |
+ MI_INVALIDATE_ISP );
+ intel_ring_emit(dev, ring, MI_NOOP);
+ intel_ring_advance(dev, ring);
+ }
/* XXX breadcrumb */
+
return 0;
}
@@ -588,9 +621,10 @@ err:
int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- int ret;
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
+ int ret;
+
ring->dev = dev;
if (I915_NEED_GFX_HWS(dev)) {
@@ -603,16 +637,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
ret = -ENOMEM;
- goto cleanup;
+ goto err_hws;
}
ring->gem_object = obj;
ret = i915_gem_object_pin(obj, ring->alignment);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- goto cleanup;
- }
+ if (ret)
+ goto err_unref;
obj_priv = to_intel_bo(obj);
ring->map.size = ring->size;
@@ -624,18 +656,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
drm_core_ioremap_wc(&ring->map, dev);
if (ring->map.handle == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
ret = -EINVAL;
- goto cleanup;
+ goto err_unpin;
}
ring->virtual_start = ring->map.handle;
ret = ring->init(dev, ring);
- if (ret != 0) {
- intel_cleanup_ring_buffer(dev, ring);
- return ret;
- }
+ if (ret)
+ goto err_unmap;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
@@ -649,7 +677,15 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
return ret;
-cleanup:
+
+err_unmap:
+ drm_core_ioremapfree(&ring->map, dev);
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+ ring->gem_object = NULL;
+err_hws:
cleanup_status_page(dev, ring);
return ret;
}
@@ -682,9 +718,11 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
}
virt = (unsigned int *)(ring->virtual_start + ring->tail);
- rem /= 4;
- while (rem--)
+ rem /= 8;
+ while (rem--) {
+ *virt++ = MI_NOOP;
*virt++ = MI_NOOP;
+ }
ring->tail = 0;
ring->space = ring->head - 8;
@@ -729,21 +767,14 @@ void intel_ring_begin(struct drm_device *dev,
intel_wrap_ring_buffer(dev, ring);
if (unlikely(ring->space < n))
intel_wait_ring_buffer(dev, ring, n);
-}
-void intel_ring_emit(struct drm_device *dev,
- struct intel_ring_buffer *ring, unsigned int data)
-{
- unsigned int *virt = ring->virtual_start + ring->tail;
- *virt = data;
- ring->tail += 4;
- ring->tail &= ring->size - 1;
- ring->space -= 4;
+ ring->space -= n;
}
void intel_ring_advance(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
+ ring->tail &= ring->size - 1;
ring->advance_ring(dev, ring);
}
@@ -762,18 +793,6 @@ void intel_fill_struct(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
-u32 intel_ring_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- u32 seqno;
- seqno = ring->next_seqno;
-
- /* reserve 0 for non-seqno */
- if (++ring->next_seqno == 0)
- ring->next_seqno = 1;
- return seqno;
-}
-
struct intel_ring_buffer render_ring = {
.name = "render ring",
.regs = {
@@ -791,7 +810,6 @@ struct intel_ring_buffer render_ring = {
.head = 0,
.tail = 0,
.space = 0,
- .next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
@@ -830,7 +848,6 @@ struct intel_ring_buffer bsd_ring = {
.head = 0,
.tail = 0,
.space = 0,
- .next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d5568d3766d..525e7d3edda 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -26,7 +26,6 @@ struct intel_ring_buffer {
unsigned int head;
unsigned int tail;
unsigned int space;
- u32 next_seqno;
struct intel_hw_status_page status_page;
u32 irq_gem_seqno; /* last seq seem at irq time */
@@ -106,8 +105,16 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring);
void intel_ring_begin(struct drm_device *dev,
struct intel_ring_buffer *ring, int n);
-void intel_ring_emit(struct drm_device *dev,
- struct intel_ring_buffer *ring, u32 data);
+
+static inline void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ unsigned int data)
+{
+ unsigned int *virt = ring->virtual_start + ring->tail;
+ *virt = data;
+ ring->tail += 4;
+}
+
void intel_fill_struct(struct drm_device *dev,
struct intel_ring_buffer *ring,
void *data,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d9d4d51aa89..e3b7a7ee39c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -31,8 +31,8 @@
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
-#include "intel_drv.h"
#include "drm_edid.h"
+#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
@@ -47,9 +47,10 @@
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
-static char *tv_format_names[] = {
+static const char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
"PAL_H" , "PAL_I" , "PAL_M" ,
@@ -61,7 +62,9 @@ static char *tv_format_names[] = {
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
-struct intel_sdvo_priv {
+struct intel_sdvo {
+ struct intel_encoder base;
+
u8 slave_addr;
/* Register for the SDVO device: SDVOB or SDVOC */
@@ -95,7 +98,7 @@ struct intel_sdvo_priv {
bool is_tv;
/* This is for current tv format name */
- char *tv_format_name;
+ int tv_format_index;
/**
* This is set if we treat the device as HDMI, instead of DVI.
@@ -132,37 +135,40 @@ struct intel_sdvo_priv {
};
struct intel_sdvo_connector {
+ struct intel_connector base;
+
/* Mark the type of connector */
uint16_t output_flag;
/* This contains all current supported TV format */
- char *tv_format_supported[TV_FORMAT_NUM];
+ u8 tv_format_supported[TV_FORMAT_NUM];
int format_supported_num;
- struct drm_property *tv_format_property;
- struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-
- /**
- * Returned SDTV resolutions allowed for the current format, if the
- * device reported it.
- */
- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+ struct drm_property *tv_format;
/* add the property for the SDVO-TV */
- struct drm_property *left_property;
- struct drm_property *right_property;
- struct drm_property *top_property;
- struct drm_property *bottom_property;
- struct drm_property *hpos_property;
- struct drm_property *vpos_property;
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
/* add the property for the SDVO-TV/LVDS */
- struct drm_property *brightness_property;
- struct drm_property *contrast_property;
- struct drm_property *saturation_property;
- struct drm_property *hue_property;
+ struct drm_property *brightness;
/* Add variable to record current setting for the above property */
u32 left_margin, right_margin, top_margin, bottom_margin;
+
/* this is to get the range of margin.*/
u32 max_hscan, max_vscan;
u32 max_hpos, cur_hpos;
@@ -171,36 +177,54 @@ struct intel_sdvo_connector {
u32 cur_contrast, max_contrast;
u32 cur_saturation, max_saturation;
u32 cur_hue, max_hue;
+ u32 cur_sharpness, max_sharpness;
+ u32 cur_flicker_filter, max_flicker_filter;
+ u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
+ u32 cur_flicker_filter_2d, max_flicker_filter_2d;
+ u32 cur_tv_chroma_filter, max_tv_chroma_filter;
+ u32 cur_tv_luma_filter, max_tv_luma_filter;
+ u32 cur_dot_crawl, max_dot_crawl;
};
+static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
+}
+
+static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
+}
+
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
- uint16_t flags);
-static void
-intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
-static void
-intel_sdvo_create_enhance_property(struct drm_connector *connector);
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+static bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type);
+static bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
*/
-static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
+static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct drm_device *dev = intel_sdvo->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 bval = val, cval = val;
int i;
- if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
- I915_WRITE(sdvo_priv->sdvo_reg, val);
- I915_READ(sdvo_priv->sdvo_reg);
+ if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(intel_sdvo->sdvo_reg, val);
+ I915_READ(intel_sdvo->sdvo_reg);
return;
}
- if (sdvo_priv->sdvo_reg == SDVOB) {
+ if (intel_sdvo->sdvo_reg == SDVOB) {
cval = I915_READ(SDVOC);
} else {
bval = I915_READ(SDVOB);
@@ -219,33 +243,27 @@ static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
}
}
-static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
- u8 *ch)
+static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- u8 out_buf[2];
+ u8 out_buf[2] = { addr, 0 };
u8 buf[2];
- int ret;
-
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
}
};
+ int ret;
- out_buf[0] = addr;
- out_buf[1] = 0;
-
- if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
+ if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
{
*ch = buf[0];
return true;
@@ -255,35 +273,26 @@ static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
return false;
}
-static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
- u8 ch)
+static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- u8 out_buf[2];
+ u8 out_buf[2] = { addr, ch };
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
}
};
- out_buf[0] = addr;
- out_buf[1] = ch;
-
- if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
- {
- return true;
- }
- return false;
+ return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
}
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
u8 cmd;
- char *name;
+ const char *name;
} sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
@@ -328,13 +337,14 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
/* Add the op code for SDVO enhancements */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
@@ -353,6 +363,27 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
/* HDMI op code */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
@@ -377,17 +408,15 @@ static const struct _sdvo_cmd_name {
};
#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
-#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
-#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
-static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
- void *args, int args_len)
+static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
DRM_DEBUG_KMS("%s: W: %02X ",
- SDVO_NAME(sdvo_priv), cmd);
+ SDVO_NAME(intel_sdvo), cmd);
for (i = 0; i < args_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
for (; i < 8; i++)
@@ -403,19 +432,20 @@ static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
DRM_LOG_KMS("\n");
}
-static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
- void *args, int args_len)
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
{
int i;
- intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
+ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
- ((u8*)args)[i]);
+ if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
+ ((u8*)args)[i]))
+ return false;
}
- intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
+ return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
}
static const char *cmd_status_names[] = {
@@ -428,14 +458,13 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
+static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len,
u8 status)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
for (i = 0; i < response_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
for (; i < 8; i++)
@@ -447,8 +476,8 @@ static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
DRM_LOG_KMS("\n");
}
-static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
- void *response, int response_len)
+static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+ void *response, int response_len)
{
int i;
u8 status;
@@ -457,24 +486,26 @@ static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
while (retry--) {
/* Read the command response */
for (i = 0; i < response_len; i++) {
- intel_sdvo_read_byte(intel_encoder,
- SDVO_I2C_RETURN_0 + i,
- &((u8 *)response)[i]);
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ return false;
}
/* read the return status */
- intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
- &status);
+ if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
+ &status))
+ return false;
- intel_sdvo_debug_response(intel_encoder, response, response_len,
+ intel_sdvo_debug_response(intel_sdvo, response, response_len,
status);
if (status != SDVO_CMD_STATUS_PENDING)
- return status;
+ break;
mdelay(50);
}
- return status;
+ return status == SDVO_CMD_STATUS_SUCCESS;
}
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -494,37 +525,36 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
* another I2C transaction after issuing the DDC bus switch, it will be
* switched to the internal SDVO register.
*/
-static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
+static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
u8 target)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
},
/* the following two are to read the response */
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = cmd_buf,
},
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = ret_value,
},
};
- intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&target, 1);
/* write the DDC switch command argument */
- intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
+ intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
out_buf[0] = SDVO_I2C_OPCODE;
out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -533,7 +563,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encode
ret_value[0] = 0;
ret_value[1] = 0;
- ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
+ ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
if (ret != 3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -547,23 +577,29 @@ static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encode
return;
}
-static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
+static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
{
- struct intel_sdvo_set_target_input_args targets = {0};
- u8 status;
-
- if (target_0 && target_1)
- return SDVO_CMD_STATUS_NOTSUPP;
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+ return false;
- if (target_1)
- targets.target_1 = 1;
+ return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+}
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
- sizeof(targets));
+static bool
+intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
+ return false;
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ return intel_sdvo_read_response(intel_sdvo, value, len);
+}
- return (status == SDVO_CMD_STATUS_SUCCESS);
+static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_set_target_input_args targets = {0};
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
}
/**
@@ -572,14 +608,12 @@ static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, boo
* This function is making an assumption about the layout of the response,
* which should be checked against the docs.
*/
-static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
+static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
{
struct intel_sdvo_get_trained_inputs_response response;
- u8 status;
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
return false;
*input_1 = response.input0_trained;
@@ -587,21 +621,18 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b
return true;
}
-static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
u16 outputs)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
- sizeof(outputs));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
}
-static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
int mode)
{
- u8 status, state = SDVO_ENCODER_STATE_ON;
+ u8 state = SDVO_ENCODER_STATE_ON;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -618,88 +649,63 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encod
break;
}
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
- sizeof(state));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
}
-static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
int *clock_min,
int *clock_max)
{
struct intel_sdvo_pixel_clock_range clocks;
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
- NULL, 0);
-
- status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
return false;
/* Convert the values from units of 10 kHz to kHz. */
*clock_min = clocks.min * 10;
*clock_max = clocks.max * 10;
-
return true;
}
-static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
u16 outputs)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
- sizeof(outputs));
-
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
}
-static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
+static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
}
-static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_encoder,
+ return intel_sdvo_set_timing(intel_sdvo,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
}
-static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_encoder,
+ return intel_sdvo_set_timing(intel_sdvo,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
static bool
-intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
+intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
uint16_t clock,
uint16_t width,
uint16_t height)
{
struct intel_sdvo_preferred_input_timing_args args;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- uint8_t status;
memset(&args, 0, sizeof(args));
args.clock = clock;
@@ -707,59 +713,32 @@ intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
args.height = height;
args.interlace = 0;
- if (sdvo_priv->is_lvds &&
- (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width ||
- sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
+ if (intel_sdvo->is_lvds &&
+ (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+ intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
- &args, sizeof(args));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
}
-static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
- bool status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
- NULL, 0);
-
- status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
- sizeof(dtd->part1));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
- NULL, 0);
-
- status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
- sizeof(dtd->part2));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return false;
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
}
-static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
+static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
}
static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
uint16_t width, height;
uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
@@ -808,7 +787,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
}
static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
- struct intel_sdvo_dtd *dtd)
+ const struct intel_sdvo_dtd *dtd)
{
mode->hdisplay = dtd->part1.h_active;
mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
@@ -840,45 +819,33 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
-static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_encode *encode)
{
- uint8_t status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
- if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
- memset(encode, 0, sizeof(*encode));
- return false;
- }
+ if (intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ encode, sizeof(*encode)))
+ return true;
- return true;
+ /* non-support means DVI */
+ memset(encode, 0, sizeof(*encode));
+ return false;
}
-static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
uint8_t mode)
{
- uint8_t status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
}
-static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
uint8_t mode)
{
- uint8_t status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
#if 0
-static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
+static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
int i, j;
uint8_t set_buf_index[2];
@@ -887,8 +854,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
uint8_t buf[48];
uint8_t *pos;
- intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
- intel_sdvo_read_response(encoder, &av_split, 1);
+ intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
for (i = 0; i <= av_split; i++) {
set_buf_index[0] = i; set_buf_index[1] = 0;
@@ -908,7 +874,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
}
#endif
-static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
int index,
uint8_t *data, int8_t size, uint8_t tx_rate)
{
@@ -917,15 +883,18 @@ static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
set_buf_index[0] = index;
set_buf_index[1] = 0;
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2);
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
for (; size > 0; size -= 8) {
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
+ return false;
+
data += 8;
}
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
+ return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
}
static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -1000,7 +969,7 @@ struct dip_infoframe {
} __attribute__ ((packed)) u;
} __attribute__((packed));
-static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
struct drm_display_mode * mode)
{
struct dip_infoframe avi_if = {
@@ -1011,133 +980,107 @@ static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
4 + avi_if.len);
- intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
- 4 + avi_if.len,
- SDVO_HBUF_TX_VSYNC);
+ return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
+ 4 + avi_if.len,
+ SDVO_HBUF_TX_VSYNC);
}
-static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
+static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
{
-
struct intel_sdvo_tv_format format;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- uint32_t format_map, i;
- uint8_t status;
-
- for (i = 0; i < TV_FORMAT_NUM; i++)
- if (tv_format_names[i] == sdvo_priv->tv_format_name)
- break;
+ uint32_t format_map;
- format_map = 1 << i;
+ format_map = 1 << intel_sdvo->tv_format_index;
memset(&format, 0, sizeof(format));
- memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
- sizeof(format) : sizeof(format_map));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
- sizeof(format));
-
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- DRM_DEBUG_KMS("%s: Failed to set TV format\n",
- SDVO_NAME(sdvo_priv));
+ BUILD_BUG_ON(sizeof(format) != 6);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
}
-static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool
+intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
+ struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
+ struct intel_sdvo_dtd output_dtd;
- if (dev_priv->is_tv) {
- struct intel_sdvo_dtd output_dtd;
- bool success;
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return false;
- /* We need to construct preferred input timings based on our
- * output timings. To do that, we have to set the output
- * timings, even though this isn't really the right place in
- * the sequence to do it. Oh well.
- */
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ return false;
+ return true;
+}
- /* Set output timings */
- intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
- intel_sdvo_set_target_output(intel_encoder,
- dev_priv->attached_output);
- intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
+static bool
+intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_sdvo_dtd input_dtd;
- /* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_encoder, true, false);
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return false;
+ if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay))
+ return false;
- success = intel_sdvo_create_preferred_input_timing(intel_encoder,
- mode->clock / 10,
- mode->hdisplay,
- mode->vdisplay);
- if (success) {
- struct intel_sdvo_dtd input_dtd;
+ if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+ &input_dtd))
+ return false;
- intel_sdvo_get_preferred_input_timing(intel_encoder,
- &input_dtd);
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
- drm_mode_set_crtcinfo(adjusted_mode, 0);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ mode->clock = adjusted_mode->clock;
+ return true;
+}
- mode->clock = adjusted_mode->clock;
+static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- adjusted_mode->clock *=
- intel_sdvo_get_pixel_multiplier(mode);
- } else {
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (intel_sdvo->is_tv) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
return false;
- }
- } else if (dev_priv->is_lvds) {
- struct intel_sdvo_dtd output_dtd;
- bool success;
-
- drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0);
- /* Set output timings */
- intel_sdvo_get_dtd_from_mode(&output_dtd,
- dev_priv->sdvo_lvds_fixed_mode);
-
- intel_sdvo_set_target_output(intel_encoder,
- dev_priv->attached_output);
- intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
-
- /* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_encoder, true, false);
-
-
- success = intel_sdvo_create_preferred_input_timing(
- intel_encoder,
- mode->clock / 10,
- mode->hdisplay,
- mode->vdisplay);
-
- if (success) {
- struct intel_sdvo_dtd input_dtd;
-
- intel_sdvo_get_preferred_input_timing(intel_encoder,
- &input_dtd);
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
- drm_mode_set_crtcinfo(adjusted_mode, 0);
+ (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
+ } else if (intel_sdvo->is_lvds) {
+ drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
- mode->clock = adjusted_mode->clock;
-
- adjusted_mode->clock *=
- intel_sdvo_get_pixel_multiplier(mode);
- } else {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+ intel_sdvo->sdvo_lvds_fixed_mode))
return false;
- }
- } else {
- /* Make the CRTC code factor in the SDVO pixel multiplier. The
- * SDVO device will be told of the multiplier during mode_set.
- */
- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
}
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will be told of the multiplier during mode_set.
+ */
+ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+
return true;
}
@@ -1149,13 +1092,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
u32 sdvox = 0;
- int sdvo_pixel_multiply;
+ int sdvo_pixel_multiply, rate;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd;
- u8 status;
if (!mode)
return;
@@ -1166,41 +1107,46 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
- in_out.in0 = sdvo_priv->attached_output;
+ in_out.in0 = intel_sdvo->attached_output;
in_out.in1 = 0;
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
+ intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (sdvo_priv->is_hdmi) {
- intel_sdvo_set_avi_infoframe(intel_encoder, mode);
+ if (intel_sdvo->is_hdmi) {
+ if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
+ return;
+
sdvox |= SDVO_AUDIO_ENABLE;
}
/* We have tried to get input timing in mode_fixup, and filled into
adjusted_mode */
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags;
- } else
- intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+ input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
/* If it's a TV, we already set the output timing in mode_fixup.
* Otherwise, the output timing is equal to the input timing.
*/
- if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
+ if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
/* Set the output timing to the screen */
- intel_sdvo_set_target_output(intel_encoder,
- sdvo_priv->attached_output);
- intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
+
+ (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
}
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_encoder, true, false);
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return;
- if (sdvo_priv->is_tv)
- intel_sdvo_set_tv_format(intel_encoder);
+ if (intel_sdvo->is_tv) {
+ if (!intel_sdvo_set_tv_format(intel_sdvo))
+ return;
+ }
/* We would like to use intel_sdvo_create_preferred_input_timing() to
* provide the device with a timing it can support, if it supports that
@@ -1217,23 +1163,17 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_set_input_timing(encoder, &input_dtd);
}
#else
- intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
+ (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
#endif
- switch (intel_sdvo_get_pixel_multiplier(mode)) {
- case 1:
- intel_sdvo_set_clock_rate_mult(intel_encoder,
- SDVO_CLOCK_RATE_MULT_1X);
- break;
- case 2:
- intel_sdvo_set_clock_rate_mult(intel_encoder,
- SDVO_CLOCK_RATE_MULT_2X);
- break;
- case 4:
- intel_sdvo_set_clock_rate_mult(intel_encoder,
- SDVO_CLOCK_RATE_MULT_4X);
- break;
+ sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
+ switch (sdvo_pixel_multiply) {
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
}
+ if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
+ return;
/* Set the SDVO control regs. */
if (IS_I965G(dev)) {
@@ -1243,8 +1183,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
} else {
- sdvox |= I915_READ(sdvo_priv->sdvo_reg);
- switch (sdvo_priv->sdvo_reg) {
+ sdvox |= I915_READ(intel_sdvo->sdvo_reg);
+ switch (intel_sdvo->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
@@ -1257,7 +1197,6 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
- sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
if (IS_I965G(dev)) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
@@ -1266,28 +1205,28 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
+ if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
- intel_sdvo_write_sdvox(intel_encoder, sdvox);
+ intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 temp;
if (mode != DRM_MODE_DPMS_ON) {
- intel_sdvo_set_active_outputs(intel_encoder, 0);
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
- intel_sdvo_set_encoder_power_state(intel_encoder, mode);
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(sdvo_priv->sdvo_reg);
+ temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
}
}
} else {
@@ -1295,28 +1234,25 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
int i;
u8 status;
- temp = I915_READ(sdvo_priv->sdvo_reg);
+ temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev);
-
- status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
- &input2);
-
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(sdvo_priv));
+ "sync\n", SDVO_NAME(intel_sdvo));
}
if (0)
- intel_sdvo_set_encoder_power_state(intel_encoder, mode);
- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
return;
}
@@ -1325,42 +1261,31 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (sdvo_priv->pixel_clock_min > mode->clock)
+ if (intel_sdvo->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
- if (sdvo_priv->pixel_clock_max < mode->clock)
+ if (intel_sdvo->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
- if (sdvo_priv->is_lvds == true) {
- if (sdvo_priv->sdvo_lvds_fixed_mode == NULL)
+ if (intel_sdvo->is_lvds) {
+ if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay)
- return MODE_PANEL;
-
- if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay)
+ if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
-static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
+static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
}
/* No use! */
@@ -1368,12 +1293,12 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
- struct intel_encoder *iout = NULL;
- struct intel_sdvo_priv *sdvo;
+ struct intel_sdvo *iout = NULL;
+ struct intel_sdvo *sdvo;
/* find the sdvo connector */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_intel_encoder(connector);
+ iout = to_intel_sdvo(connector);
if (iout->type != INTEL_OUTPUT_SDVO)
continue;
@@ -1395,75 +1320,69 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
{
u8 response[2];
u8 status;
- struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
DRM_DEBUG_KMS("\n");
if (!connector)
return 0;
- intel_encoder = to_intel_encoder(connector);
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &response, 2);
-
- if (response[0] !=0)
- return 1;
+ intel_sdvo = to_intel_sdvo(connector);
- return 0;
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &response, 2) && response[0];
}
void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
{
u8 response[2];
u8 status;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_encoder, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_sdvo, &response, 2);
if (on) {
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_sdvo, &response, 2);
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
} else {
response[0] = 0;
response[1] = 0;
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
}
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_encoder, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_sdvo, &response, 2);
}
#endif
static bool
-intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
+intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int caps = 0;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
caps++;
@@ -1475,11 +1394,11 @@ intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_encoder = enc_to_intel_encoder(encoder);
- if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+ intel_sdvo = enc_to_intel_sdvo(encoder);
+ if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (encoder == intel_attached_encoder(connector))
return connector;
@@ -1493,8 +1412,8 @@ static int
intel_analog_is_connected(struct drm_device *dev)
{
struct drm_connector *analog_connector;
- analog_connector = intel_find_analog_connector(dev);
+ analog_connector = intel_find_analog_connector(dev);
if (!analog_connector)
return false;
@@ -1509,54 +1428,52 @@ enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status status = connector_status_connected;
struct edid *edid = NULL;
- edid = drm_get_edid(connector, intel_encoder->ddc_bus);
+ edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
/* This is only applied to SDVO cards with multiple outputs */
- if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
uint8_t saved_ddc, temp_ddc;
- saved_ddc = sdvo_priv->ddc_bus;
- temp_ddc = sdvo_priv->ddc_bus >> 1;
+ saved_ddc = intel_sdvo->ddc_bus;
+ temp_ddc = intel_sdvo->ddc_bus >> 1;
/*
* Don't use the 1 as the argument of DDC bus switch to get
* the EDID. It is used for SDVO SPD ROM.
*/
while(temp_ddc > 1) {
- sdvo_priv->ddc_bus = temp_ddc;
- edid = drm_get_edid(connector, intel_encoder->ddc_bus);
+ intel_sdvo->ddc_bus = temp_ddc;
+ edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
if (edid) {
/*
* When we can get the EDID, maybe it is the
* correct DDC bus. Update it.
*/
- sdvo_priv->ddc_bus = temp_ddc;
+ intel_sdvo->ddc_bus = temp_ddc;
break;
}
temp_ddc >>= 1;
}
if (edid == NULL)
- sdvo_priv->ddc_bus = saved_ddc;
+ intel_sdvo->ddc_bus = saved_ddc;
}
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
- if (edid == NULL && sdvo_priv->analog_ddc_bus &&
+ if (edid == NULL && intel_sdvo->analog_ddc_bus &&
!intel_analog_is_connected(connector->dev))
- edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
+ edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
if (edid != NULL) {
bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
+ bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
/* DDC bus is shared, match EDID to connector type */
if (is_digital && need_digital)
- sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
else if (is_digital != need_digital)
status = connector_status_disconnected;
@@ -1572,33 +1489,29 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
{
uint16_t response;
- u8 status;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
- if (sdvo_priv->is_tv) {
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ return connector_status_unknown;
+ if (intel_sdvo->is_tv) {
/* add 30ms delay when the output type is SDVO-TV */
mdelay(30);
}
- status = intel_sdvo_read_response(intel_encoder, &response, 2);
+ if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+ return connector_status_unknown;
DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return connector_status_unknown;
-
if (response == 0)
return connector_status_disconnected;
- sdvo_priv->attached_output = response;
+ intel_sdvo->attached_output = response;
- if ((sdvo_connector->output_flag & response) == 0)
+ if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
else if (response & SDVO_TMDS_MASK)
ret = intel_sdvo_hdmi_sink_detect(connector);
@@ -1607,16 +1520,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
/* May update encoder flag for like clock for SDVO TV, etc.*/
if (ret == connector_status_connected) {
- sdvo_priv->is_tv = false;
- sdvo_priv->is_lvds = false;
- intel_encoder->needs_tv_clock = false;
+ intel_sdvo->is_tv = false;
+ intel_sdvo->is_lvds = false;
+ intel_sdvo->base.needs_tv_clock = false;
if (response & SDVO_TV_MASK) {
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
}
if (response & SDVO_LVDS_MASK)
- sdvo_priv->is_lvds = true;
+ intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
}
return ret;
@@ -1625,12 +1538,11 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
int num_modes;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1639,11 +1551,11 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* which case we'll look there for the digital DDC data.
*/
if (num_modes == 0 &&
- sdvo_priv->analog_ddc_bus &&
+ intel_sdvo->analog_ddc_bus &&
!intel_analog_is_connected(connector->dev)) {
/* Switch to the analog ddc bus and try that
*/
- (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
+ (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
}
}
@@ -1715,52 +1627,43 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
- uint8_t status;
-
/* Read the list of supported input resolutions for the selected TV
* format.
*/
- for (i = 0; i < TV_FORMAT_NUM; i++)
- if (tv_format_names[i] == sdvo_priv->tv_format_name)
- break;
-
- format_map = (1 << i);
+ format_map = 1 << intel_sdvo->tv_format_index;
memcpy(&tv_res, &format_map,
- sizeof(struct intel_sdvo_sdtv_resolution_request) >
- sizeof(format_map) ? sizeof(format_map) :
- sizeof(struct intel_sdvo_sdtv_resolution_request));
+ min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
- intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
+ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
+ return;
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
- &tv_res, sizeof(tv_res));
- status = intel_sdvo_read_response(intel_encoder, &reply, 3);
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
return;
for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
if (reply & (1 << i)) {
struct drm_display_mode *nmode;
nmode = drm_mode_duplicate(connector->dev,
- &sdvo_tv_modes[i]);
+ &sdvo_tv_modes[i]);
if (nmode)
drm_mode_probed_add(connector, nmode);
}
-
}
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_display_mode *newmode;
/*
@@ -1768,7 +1671,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1787,8 +1690,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
end:
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
- sdvo_priv->sdvo_lvds_fixed_mode =
+ intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
+ intel_sdvo->is_lvds = true;
break;
}
}
@@ -1797,66 +1701,67 @@ end:
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (IS_TV(sdvo_connector))
+ if (IS_TV(intel_sdvo_connector))
intel_sdvo_get_tv_modes(connector);
- else if (IS_LVDS(sdvo_connector))
+ else if (IS_LVDS(intel_sdvo_connector))
intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
- if (list_empty(&connector->probed_modes))
- return 0;
- return 1;
+ return !list_empty(&connector->probed_modes);
}
-static
-void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+static void
+intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
struct drm_device *dev = connector->dev;
- if (IS_TV(sdvo_priv)) {
- if (sdvo_priv->left_property)
- drm_property_destroy(dev, sdvo_priv->left_property);
- if (sdvo_priv->right_property)
- drm_property_destroy(dev, sdvo_priv->right_property);
- if (sdvo_priv->top_property)
- drm_property_destroy(dev, sdvo_priv->top_property);
- if (sdvo_priv->bottom_property)
- drm_property_destroy(dev, sdvo_priv->bottom_property);
- if (sdvo_priv->hpos_property)
- drm_property_destroy(dev, sdvo_priv->hpos_property);
- if (sdvo_priv->vpos_property)
- drm_property_destroy(dev, sdvo_priv->vpos_property);
- if (sdvo_priv->saturation_property)
- drm_property_destroy(dev,
- sdvo_priv->saturation_property);
- if (sdvo_priv->contrast_property)
- drm_property_destroy(dev,
- sdvo_priv->contrast_property);
- if (sdvo_priv->hue_property)
- drm_property_destroy(dev, sdvo_priv->hue_property);
- }
- if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
- if (sdvo_priv->brightness_property)
- drm_property_destroy(dev,
- sdvo_priv->brightness_property);
- }
- return;
+ if (intel_sdvo_connector->left)
+ drm_property_destroy(dev, intel_sdvo_connector->left);
+ if (intel_sdvo_connector->right)
+ drm_property_destroy(dev, intel_sdvo_connector->right);
+ if (intel_sdvo_connector->top)
+ drm_property_destroy(dev, intel_sdvo_connector->top);
+ if (intel_sdvo_connector->bottom)
+ drm_property_destroy(dev, intel_sdvo_connector->bottom);
+ if (intel_sdvo_connector->hpos)
+ drm_property_destroy(dev, intel_sdvo_connector->hpos);
+ if (intel_sdvo_connector->vpos)
+ drm_property_destroy(dev, intel_sdvo_connector->vpos);
+ if (intel_sdvo_connector->saturation)
+ drm_property_destroy(dev, intel_sdvo_connector->saturation);
+ if (intel_sdvo_connector->contrast)
+ drm_property_destroy(dev, intel_sdvo_connector->contrast);
+ if (intel_sdvo_connector->hue)
+ drm_property_destroy(dev, intel_sdvo_connector->hue);
+ if (intel_sdvo_connector->sharpness)
+ drm_property_destroy(dev, intel_sdvo_connector->sharpness);
+ if (intel_sdvo_connector->flicker_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
+ if (intel_sdvo_connector->flicker_filter_2d)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
+ if (intel_sdvo_connector->flicker_filter_adaptive)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
+ if (intel_sdvo_connector->tv_luma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
+ if (intel_sdvo_connector->tv_chroma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
+ if (intel_sdvo_connector->dot_crawl)
+ drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
+ if (intel_sdvo_connector->brightness)
+ drm_property_destroy(dev, intel_sdvo_connector->brightness);
}
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (sdvo_connector->tv_format_property)
+ if (intel_sdvo_connector->tv_format)
drm_property_destroy(connector->dev,
- sdvo_connector->tv_format_property);
+ intel_sdvo_connector->tv_format);
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
@@ -1870,132 +1775,118 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- struct drm_crtc *crtc = encoder->crtc;
- int ret = 0;
- bool changed = false;
- uint8_t cmd, status;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
uint16_t temp_value;
+ uint8_t cmd;
+ int ret;
ret = drm_connector_property_set_value(connector, property, val);
- if (ret < 0)
- goto out;
+ if (ret)
+ return ret;
+
+#define CHECK_PROPERTY(name, NAME) \
+ if (intel_sdvo_connector->name == property) { \
+ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+ if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+ cmd = SDVO_CMD_SET_##NAME; \
+ intel_sdvo_connector->cur_##name = temp_value; \
+ goto set_value; \
+ }
- if (property == sdvo_connector->tv_format_property) {
- if (val >= TV_FORMAT_NUM) {
- ret = -EINVAL;
- goto out;
- }
- if (sdvo_priv->tv_format_name ==
- sdvo_connector->tv_format_supported[val])
- goto out;
+ if (property == intel_sdvo_connector->tv_format) {
+ if (val >= TV_FORMAT_NUM)
+ return -EINVAL;
- sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
- changed = true;
- }
+ if (intel_sdvo->tv_format_index ==
+ intel_sdvo_connector->tv_format_supported[val])
+ return 0;
- if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
- cmd = 0;
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
+ goto done;
+ } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
- if (sdvo_connector->left_property == property) {
+ if (intel_sdvo_connector->left == property) {
drm_connector_property_set_value(connector,
- sdvo_connector->right_property, val);
- if (sdvo_connector->left_margin == temp_value)
- goto out;
-
- sdvo_connector->left_margin = temp_value;
- sdvo_connector->right_margin = temp_value;
- temp_value = sdvo_connector->max_hscan -
- sdvo_connector->left_margin;
+ intel_sdvo_connector->right, val);
+ if (intel_sdvo_connector->left_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_connector->right_property == property) {
+ goto set_value;
+ } else if (intel_sdvo_connector->right == property) {
drm_connector_property_set_value(connector,
- sdvo_connector->left_property, val);
- if (sdvo_connector->right_margin == temp_value)
- goto out;
-
- sdvo_connector->left_margin = temp_value;
- sdvo_connector->right_margin = temp_value;
- temp_value = sdvo_connector->max_hscan -
- sdvo_connector->left_margin;
+ intel_sdvo_connector->left, val);
+ if (intel_sdvo_connector->right_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_connector->top_property == property) {
+ goto set_value;
+ } else if (intel_sdvo_connector->top == property) {
drm_connector_property_set_value(connector,
- sdvo_connector->bottom_property, val);
- if (sdvo_connector->top_margin == temp_value)
- goto out;
-
- sdvo_connector->top_margin = temp_value;
- sdvo_connector->bottom_margin = temp_value;
- temp_value = sdvo_connector->max_vscan -
- sdvo_connector->top_margin;
+ intel_sdvo_connector->bottom, val);
+ if (intel_sdvo_connector->top_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_connector->bottom_property == property) {
+ goto set_value;
+ } else if (intel_sdvo_connector->bottom == property) {
drm_connector_property_set_value(connector,
- sdvo_connector->top_property, val);
- if (sdvo_connector->bottom_margin == temp_value)
- goto out;
- sdvo_connector->top_margin = temp_value;
- sdvo_connector->bottom_margin = temp_value;
- temp_value = sdvo_connector->max_vscan -
- sdvo_connector->top_margin;
+ intel_sdvo_connector->top, val);
+ if (intel_sdvo_connector->bottom_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_connector->hpos_property == property) {
- if (sdvo_connector->cur_hpos == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_POSITION_H;
- sdvo_connector->cur_hpos = temp_value;
- } else if (sdvo_connector->vpos_property == property) {
- if (sdvo_connector->cur_vpos == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_POSITION_V;
- sdvo_connector->cur_vpos = temp_value;
- } else if (sdvo_connector->saturation_property == property) {
- if (sdvo_connector->cur_saturation == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_SATURATION;
- sdvo_connector->cur_saturation = temp_value;
- } else if (sdvo_connector->contrast_property == property) {
- if (sdvo_connector->cur_contrast == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_CONTRAST;
- sdvo_connector->cur_contrast = temp_value;
- } else if (sdvo_connector->hue_property == property) {
- if (sdvo_connector->cur_hue == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_HUE;
- sdvo_connector->cur_hue = temp_value;
- } else if (sdvo_connector->brightness_property == property) {
- if (sdvo_connector->cur_brightness == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_BRIGHTNESS;
- sdvo_connector->cur_brightness = temp_value;
- }
- if (cmd) {
- intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
- status = intel_sdvo_read_response(intel_encoder,
- NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO command \n");
- return -EINVAL;
- }
- changed = true;
+ goto set_value;
}
+ CHECK_PROPERTY(hpos, HPOS)
+ CHECK_PROPERTY(vpos, VPOS)
+ CHECK_PROPERTY(saturation, SATURATION)
+ CHECK_PROPERTY(contrast, CONTRAST)
+ CHECK_PROPERTY(hue, HUE)
+ CHECK_PROPERTY(brightness, BRIGHTNESS)
+ CHECK_PROPERTY(sharpness, SHARPNESS)
+ CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+ CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+ CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+ CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+ CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+ CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
}
- if (changed && crtc)
+
+ return -EINVAL; /* unknown property */
+
+set_value:
+ if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
+ return -EIO;
+
+
+done:
+ if (encoder->crtc) {
+ struct drm_crtc *crtc = encoder->crtc;
+
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
-out:
- return ret;
+ crtc->y, crtc->fb);
+ }
+
+ return 0;
+#undef CHECK_PROPERTY
}
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
@@ -2022,28 +1913,57 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- if (sdvo_priv->analog_ddc_bus)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+ if (intel_sdvo->analog_ddc_bus)
+ intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+ if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
- sdvo_priv->sdvo_lvds_fixed_mode);
+ intel_sdvo->sdvo_lvds_fixed_mode);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
+ intel_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
.destroy = intel_sdvo_enc_destroy,
};
+static void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = hweight16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+}
/**
* Choose the appropriate DDC bus for control bus switch command for this
@@ -2054,7 +1974,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
*/
static void
intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
- struct intel_sdvo_priv *sdvo, u32 reg)
+ struct intel_sdvo *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
@@ -2063,61 +1983,53 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
else
mapping = &(dev_priv->sdvo_mappings[1]);
- sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ intel_sdvo_guess_ddc_bus(sdvo);
}
static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
+intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
{
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
- uint8_t status;
-
- if (device == 0)
- intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
- else
- intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
-
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
- status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
- return true;
+ return intel_sdvo_set_target_output(intel_sdvo,
+ device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+ &intel_sdvo->is_hdmi, 1);
}
-static struct intel_encoder *
-intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
+static struct intel_sdvo *
+intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
{
struct drm_device *dev = chan->drm_dev;
struct drm_encoder *encoder;
- struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_encoder = enc_to_intel_encoder(encoder);
- if (intel_encoder->ddc_bus == &chan->adapter)
- break;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ if (intel_sdvo->base.ddc_bus == &chan->adapter)
+ return intel_sdvo;
}
- return intel_encoder;
+
+ return NULL;
}
static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num)
{
- struct intel_encoder *intel_encoder;
- struct intel_sdvo_priv *sdvo_priv;
+ struct intel_sdvo *intel_sdvo;
struct i2c_algo_bit_data *algo_data;
const struct i2c_algorithm *algo;
algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
- intel_encoder =
- intel_sdvo_chan_to_intel_encoder(
- (struct intel_i2c_chan *)(algo_data->data));
- if (intel_encoder == NULL)
+ intel_sdvo =
+ intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
+ (algo_data->data));
+ if (intel_sdvo == NULL)
return -EINVAL;
- sdvo_priv = intel_encoder->dev_priv;
- algo = intel_encoder->i2c_bus->algo;
+ algo = intel_sdvo->base.i2c_bus->algo;
- intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
+ intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
return algo->master_xfer(i2c_adap, msgs, num);
}
@@ -2162,27 +2074,9 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
return 0x72;
}
-static bool
-intel_sdvo_connector_alloc (struct intel_connector **ret)
-{
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *sdvo_connector;
-
- *ret = kzalloc(sizeof(*intel_connector) +
- sizeof(*sdvo_connector), GFP_KERNEL);
- if (!*ret)
- return false;
-
- intel_connector = *ret;
- sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
- intel_connector->dev_priv = sdvo_connector;
-
- return true;
-}
-
static void
-intel_sdvo_connector_create (struct drm_encoder *encoder,
- struct drm_connector *connector)
+intel_sdvo_connector_init(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
connector->connector_type);
@@ -2198,582 +2092,470 @@ intel_sdvo_connector_create (struct drm_encoder *encoder,
}
static bool
-intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_sdvo->base.enc;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *sdvo_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
- if (!intel_sdvo_connector_alloc(&intel_connector))
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
return false;
- sdvo_connector = intel_connector->dev_priv;
-
if (device == 0) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
- sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
} else if (device == 1) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
- sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
}
+ intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
- && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
- && sdvo_priv->is_hdmi) {
+ if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
+ && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
+ && intel_sdvo->is_hdmi) {
/* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_encoder,
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
}
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
+ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
- intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_connector_init(encoder, connector);
return true;
}
static bool
-intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_sdvo->base.enc;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *sdvo_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
- if (!intel_sdvo_connector_alloc(&intel_connector))
- return false;
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+ intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_connector = intel_connector->dev_priv;
- sdvo_priv->controlled_output |= type;
- sdvo_connector->output_flag = type;
+ intel_sdvo->controlled_output |= type;
+ intel_sdvo_connector->output_flag = type;
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
+ intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_connector_init(encoder, connector);
- intel_sdvo_tv_create_property(connector, type);
+ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ goto err;
- intel_sdvo_create_enhance_property(connector);
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
return true;
+
+err:
+ intel_sdvo_destroy_enhance_property(connector);
+ kfree(intel_sdvo_connector);
+ return false;
}
static bool
-intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_sdvo->base.enc;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *sdvo_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
- if (!intel_sdvo_connector_alloc(&intel_connector))
- return false;
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+ intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- sdvo_connector = intel_connector->dev_priv;
if (device == 0) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
- sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
} else if (device == 1) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
- sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
+ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
- intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_connector_init(encoder, connector);
return true;
}
static bool
-intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_sdvo->base.enc;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *sdvo_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
- if (!intel_sdvo_connector_alloc(&intel_connector))
- return false;
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
- connector = &intel_connector->base;
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_connector = intel_connector->dev_priv;
-
- sdvo_priv->is_lvds = true;
if (device == 0) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
- sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
} else if (device == 1) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
- sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT));
- intel_sdvo_connector_create(encoder, connector);
- intel_sdvo_create_enhance_property(connector);
- return true;
+ intel_sdvo_connector_init(encoder, connector);
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ intel_sdvo_destroy_enhance_property(connector);
+ kfree(intel_sdvo_connector);
+ return false;
}
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-
- sdvo_priv->is_tv = false;
- intel_encoder->needs_tv_clock = false;
- sdvo_priv->is_lvds = false;
+ intel_sdvo->is_tv = false;
+ intel_sdvo->base.needs_tv_clock = false;
+ intel_sdvo->is_lvds = false;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
if (flags & SDVO_OUTPUT_TMDS0)
- if (!intel_sdvo_dvi_init(intel_encoder, 0))
+ if (!intel_sdvo_dvi_init(intel_sdvo, 0))
return false;
if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
- if (!intel_sdvo_dvi_init(intel_encoder, 1))
+ if (!intel_sdvo_dvi_init(intel_sdvo, 1))
return false;
/* TV has no XXX1 function block */
if (flags & SDVO_OUTPUT_SVID0)
- if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
return false;
if (flags & SDVO_OUTPUT_CVBS0)
- if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
return false;
if (flags & SDVO_OUTPUT_RGB0)
- if (!intel_sdvo_analog_init(intel_encoder, 0))
+ if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
- if (!intel_sdvo_analog_init(intel_encoder, 1))
+ if (!intel_sdvo_analog_init(intel_sdvo, 1))
return false;
if (flags & SDVO_OUTPUT_LVDS0)
- if (!intel_sdvo_lvds_init(intel_encoder, 0))
+ if (!intel_sdvo_lvds_init(intel_sdvo, 0))
return false;
if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
- if (!intel_sdvo_lvds_init(intel_encoder, 1))
+ if (!intel_sdvo_lvds_init(intel_sdvo, 1))
return false;
if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
- sdvo_priv->controlled_output = 0;
- memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
+ intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
- SDVO_NAME(sdvo_priv),
+ SDVO_NAME(intel_sdvo),
bytes[0], bytes[1]);
return false;
}
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
return true;
}
-static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
+static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct drm_device *dev = intel_sdvo->base.enc.dev;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
- uint8_t status;
- intel_sdvo_set_target_output(intel_encoder, type);
+ if (!intel_sdvo_set_target_output(intel_sdvo, type))
+ return false;
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &format, sizeof(format));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return;
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
- memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ?
- sizeof(format_map) : sizeof(format));
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
if (format_map == 0)
- return;
+ return false;
- sdvo_connector->format_supported_num = 0;
+ intel_sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
- if (format_map & (1 << i)) {
- sdvo_connector->tv_format_supported
- [sdvo_connector->format_supported_num++] =
- tv_format_names[i];
- }
+ if (format_map & (1 << i))
+ intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
- sdvo_connector->tv_format_property =
- drm_property_create(
- connector->dev, DRM_MODE_PROP_ENUM,
- "mode", sdvo_connector->format_supported_num);
+ intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", intel_sdvo_connector->format_supported_num);
+ if (!intel_sdvo_connector->tv_format)
+ return false;
- for (i = 0; i < sdvo_connector->format_supported_num; i++)
+ for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
- sdvo_connector->tv_format_property, i,
- i, sdvo_connector->tv_format_supported[i]);
+ intel_sdvo_connector->tv_format, i,
+ i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
- sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(
- connector, sdvo_connector->tv_format_property, 0);
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
+ drm_connector_attach_property(&intel_sdvo_connector->base.base,
+ intel_sdvo_connector->tv_format, 0);
+ return true;
}
-static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
+#define ENHANCEMENT(name, NAME) do { \
+ if (enhancements.name) { \
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ intel_sdvo_connector->max_##name = data_value[0]; \
+ intel_sdvo_connector->cur_##name = response; \
+ intel_sdvo_connector->name = \
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+ if (!intel_sdvo_connector->name) return false; \
+ intel_sdvo_connector->name->values[0] = 0; \
+ intel_sdvo_connector->name->values[1] = data_value[0]; \
+ drm_connector_attach_property(connector, \
+ intel_sdvo_connector->name, \
+ intel_sdvo_connector->cur_##name); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while(0)
+
+static bool
+intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
- struct intel_sdvo_enhancements_reply sdvo_data;
- struct drm_device *dev = connector->dev;
- uint8_t status;
+ struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
- NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
- sizeof(sdvo_data));
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS(" incorrect response is returned\n");
- return;
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_hscan = data_value[0];
+ intel_sdvo_connector->left_margin = data_value[0] - response;
+ intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
+ intel_sdvo_connector->left =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left_margin", 2);
+ if (!intel_sdvo_connector->left)
+ return false;
+
+ intel_sdvo_connector->left->values[0] = 0;
+ intel_sdvo_connector->left->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->left,
+ intel_sdvo_connector->left_margin);
+
+ intel_sdvo_connector->right =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right_margin", 2);
+ if (!intel_sdvo_connector->right)
+ return false;
+
+ intel_sdvo_connector->right->values[0] = 0;
+ intel_sdvo_connector->right->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->right,
+ intel_sdvo_connector->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
}
- response = *((uint16_t *)&sdvo_data);
- if (!response) {
- DRM_DEBUG_KMS("No enhancement is supported\n");
- return;
+
+ if (enhancements.overscan_v) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_vscan = data_value[0];
+ intel_sdvo_connector->top_margin = data_value[0] - response;
+ intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
+ intel_sdvo_connector->top =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top_margin", 2);
+ if (!intel_sdvo_connector->top)
+ return false;
+
+ intel_sdvo_connector->top->values[0] = 0;
+ intel_sdvo_connector->top->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->top,
+ intel_sdvo_connector->top_margin);
+
+ intel_sdvo_connector->bottom =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom_margin", 2);
+ if (!intel_sdvo_connector->bottom)
+ return false;
+
+ intel_sdvo_connector->bottom->values[0] = 0;
+ intel_sdvo_connector->bottom->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->bottom,
+ intel_sdvo_connector->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
}
- if (IS_TV(sdvo_priv)) {
- /* when horizontal overscan is supported, Add the left/right
- * property
- */
- if (sdvo_data.overscan_h) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO max "
- "h_overscan\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
- return;
- }
- sdvo_priv->max_hscan = data_value[0];
- sdvo_priv->left_margin = data_value[0] - response;
- sdvo_priv->right_margin = sdvo_priv->left_margin;
- sdvo_priv->left_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left_margin", 2);
- sdvo_priv->left_property->values[0] = 0;
- sdvo_priv->left_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->left_property,
- sdvo_priv->left_margin);
- sdvo_priv->right_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right_margin", 2);
- sdvo_priv->right_property->values[0] = 0;
- sdvo_priv->right_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->right_property,
- sdvo_priv->right_margin);
- DRM_DEBUG_KMS("h_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.overscan_v) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO max "
- "v_overscan\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
- return;
- }
- sdvo_priv->max_vscan = data_value[0];
- sdvo_priv->top_margin = data_value[0] - response;
- sdvo_priv->bottom_margin = sdvo_priv->top_margin;
- sdvo_priv->top_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top_margin", 2);
- sdvo_priv->top_property->values[0] = 0;
- sdvo_priv->top_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->top_property,
- sdvo_priv->top_margin);
- sdvo_priv->bottom_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom_margin", 2);
- sdvo_priv->bottom_property->values[0] = 0;
- sdvo_priv->bottom_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->bottom_property,
- sdvo_priv->bottom_margin);
- DRM_DEBUG_KMS("v_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.position_h) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_POSITION_H, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
- return;
- }
- sdvo_priv->max_hpos = data_value[0];
- sdvo_priv->cur_hpos = response;
- sdvo_priv->hpos_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hpos", 2);
- sdvo_priv->hpos_property->values[0] = 0;
- sdvo_priv->hpos_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->hpos_property,
- sdvo_priv->cur_hpos);
- DRM_DEBUG_KMS("h_position: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.position_v) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_POSITION_V, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
- return;
- }
- sdvo_priv->max_vpos = data_value[0];
- sdvo_priv->cur_vpos = response;
- sdvo_priv->vpos_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "vpos", 2);
- sdvo_priv->vpos_property->values[0] = 0;
- sdvo_priv->vpos_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->vpos_property,
- sdvo_priv->cur_vpos);
- DRM_DEBUG_KMS("v_position: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.saturation) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_SATURATION, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
- return;
- }
- sdvo_priv->max_saturation = data_value[0];
- sdvo_priv->cur_saturation = response;
- sdvo_priv->saturation_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "saturation", 2);
- sdvo_priv->saturation_property->values[0] = 0;
- sdvo_priv->saturation_property->values[1] =
- data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->saturation_property,
- sdvo_priv->cur_saturation);
- DRM_DEBUG_KMS("saturation: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.contrast) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_CONTRAST, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
- return;
- }
- sdvo_priv->max_contrast = data_value[0];
- sdvo_priv->cur_contrast = response;
- sdvo_priv->contrast_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "contrast", 2);
- sdvo_priv->contrast_property->values[0] = 0;
- sdvo_priv->contrast_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->contrast_property,
- sdvo_priv->cur_contrast);
- DRM_DEBUG_KMS("contrast: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.hue) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_HUE, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_HUE, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
- return;
- }
- sdvo_priv->max_hue = data_value[0];
- sdvo_priv->cur_hue = response;
- sdvo_priv->hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hue", 2);
- sdvo_priv->hue_property->values[0] = 0;
- sdvo_priv->hue_property->values[1] =
- data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->hue_property,
- sdvo_priv->cur_hue);
- DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
+
+ ENHANCEMENT(hpos, HPOS);
+ ENHANCEMENT(vpos, VPOS);
+ ENHANCEMENT(saturation, SATURATION);
+ ENHANCEMENT(contrast, CONTRAST);
+ ENHANCEMENT(hue, HUE);
+ ENHANCEMENT(sharpness, SHARPNESS);
+ ENHANCEMENT(brightness, BRIGHTNESS);
+ ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+ ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+ ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_dot_crawl = 1;
+ intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ intel_sdvo_connector->dot_crawl =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+ if (!intel_sdvo_connector->dot_crawl)
+ return false;
+
+ intel_sdvo_connector->dot_crawl->values[0] = 0;
+ intel_sdvo_connector->dot_crawl->values[1] = 1;
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->dot_crawl,
+ intel_sdvo_connector->cur_dot_crawl);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
}
- if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
- if (sdvo_data.brightness) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
- return;
- }
- sdvo_priv->max_brightness = data_value[0];
- sdvo_priv->cur_brightness = response;
- sdvo_priv->brightness_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "brightness", 2);
- sdvo_priv->brightness_property->values[0] = 0;
- sdvo_priv->brightness_property->values[1] =
- data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->brightness_property,
- sdvo_priv->cur_brightness);
- DRM_DEBUG_KMS("brightness: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
+
+ return true;
+}
+
+static bool
+intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ ENHANCEMENT(brightness, BRIGHTNESS);
+
+ return true;
+}
+#undef ENHANCEMENT
+
+static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector)
+{
+ union {
+ struct intel_sdvo_enhancements_reply reply;
+ uint16_t response;
+ } enhancements;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements)))
+ return false;
+
+ if (enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
}
- return;
+
+ if (IS_TV(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else if(IS_LVDS(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+
}
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
- struct intel_sdvo_priv *sdvo_priv;
+ struct intel_sdvo *intel_sdvo;
u8 ch[0x40];
int i;
u32 i2c_reg, ddc_reg, analog_ddc_reg;
- intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
- if (!intel_encoder) {
+ intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+ if (!intel_sdvo)
return false;
- }
- sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
- sdvo_priv->sdvo_reg = sdvo_reg;
+ intel_sdvo->sdvo_reg = sdvo_reg;
- intel_encoder->dev_priv = sdvo_priv;
+ intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
if (HAS_PCH_SPLIT(dev)) {
@@ -2795,14 +2577,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (!intel_encoder->i2c_bus)
goto err_inteloutput;
- sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
/* Save the bit-banging i2c functionality for use by the DDC wrapper */
intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
+ if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
@@ -2812,17 +2594,16 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
/* setup the DDC bus. */
if (IS_SDVOB(sdvo_reg)) {
intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+ intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOB/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+ intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOC/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
-
- if (intel_encoder->ddc_bus == NULL)
+ if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
goto err_i2c;
/* Wrap with our custom algo which switches to DDC mode */
@@ -2833,53 +2614,56 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
- intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
+ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+ goto err_enc;
- if (intel_sdvo_output_setup(intel_encoder,
- sdvo_priv->caps.output_flags) != true) {
+ if (intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
- goto err_i2c;
+ goto err_enc;
}
- intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
+ intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_encoder, true, false);
-
- intel_sdvo_get_input_pixel_clock_range(intel_encoder,
- &sdvo_priv->pixel_clock_min,
- &sdvo_priv->pixel_clock_max);
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ goto err_enc;
+ if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ &intel_sdvo->pixel_clock_min,
+ &intel_sdvo->pixel_clock_max))
+ goto err_enc;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
- SDVO_NAME(sdvo_priv),
- sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
- sdvo_priv->caps.device_rev_id,
- sdvo_priv->pixel_clock_min / 1000,
- sdvo_priv->pixel_clock_max / 1000,
- (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
- (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ SDVO_NAME(intel_sdvo),
+ intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+ intel_sdvo->caps.device_rev_id,
+ intel_sdvo->pixel_clock_min / 1000,
+ intel_sdvo->pixel_clock_max / 1000,
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
/* check currently supported outputs */
- sdvo_priv->caps.output_flags &
+ intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
- sdvo_priv->caps.output_flags &
+ intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
-
return true;
+err_enc:
+ drm_encoder_cleanup(&intel_encoder->enc);
err_i2c:
- if (sdvo_priv->analog_ddc_bus != NULL)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+ if (intel_sdvo->analog_ddc_bus != NULL)
+ intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
if (intel_encoder->ddc_bus != NULL)
intel_i2c_destroy(intel_encoder->ddc_bus);
if (intel_encoder->i2c_bus != NULL)
intel_i2c_destroy(intel_encoder->i2c_bus);
err_inteloutput:
- kfree(intel_encoder);
+ kfree(intel_sdvo);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index ba5cdf8ae40..a386b022e53 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -312,7 +312,7 @@ struct intel_sdvo_set_target_input_args {
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
-/** 5 bytes of bit flags for TV formats shared by all TV format functions */
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
@@ -596,32 +596,32 @@ struct intel_sdvo_enhancements_reply {
unsigned int overscan_h:1;
unsigned int overscan_v:1;
- unsigned int position_h:1;
- unsigned int position_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
unsigned int sharpness:1;
unsigned int dot_crawl:1;
unsigned int dither:1;
- unsigned int max_tv_chroma_filter:1;
- unsigned int max_tv_luma_filter:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
} __attribute__((packed));
/* Picture enhancement limits below are dependent on the current TV format,
* and thus need to be queried and set after it.
*/
-#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d
-#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b
-#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
#define SDVO_CMD_GET_MAX_SATURATION 0x55
#define SDVO_CMD_GET_MAX_HUE 0x58
#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
-#define SDVO_CMD_GET_MAX_POSITION_H 0x67
-#define SDVO_CMD_GET_MAX_POSITION_V 0x6a
-#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d
-#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74
-#define SDVO_CMD_GET_MAX_TV_LUMA 0x77
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
struct intel_sdvo_enhancement_limits_reply {
u16 max_value;
u16 default_value;
@@ -638,10 +638,10 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
-#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50
-#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51
-#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53
-#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
#define SDVO_CMD_GET_SATURATION 0x56
#define SDVO_CMD_SET_SATURATION 0x57
#define SDVO_CMD_GET_HUE 0x59
@@ -654,16 +654,16 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_SET_OVERSCAN_H 0x63
#define SDVO_CMD_GET_OVERSCAN_V 0x65
#define SDVO_CMD_SET_OVERSCAN_V 0x66
-#define SDVO_CMD_GET_POSITION_H 0x68
-#define SDVO_CMD_SET_POSITION_H 0x69
-#define SDVO_CMD_GET_POSITION_V 0x6b
-#define SDVO_CMD_SET_POSITION_V 0x6c
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
#define SDVO_CMD_GET_SHARPNESS 0x6e
#define SDVO_CMD_SET_SHARPNESS 0x6f
-#define SDVO_CMD_GET_TV_CHROMA 0x75
-#define SDVO_CMD_SET_TV_CHROMA 0x76
-#define SDVO_CMD_GET_TV_LUMA 0x78
-#define SDVO_CMD_SET_TV_LUMA 0x79
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
struct intel_sdvo_enhancements_arg {
u16 value;
}__attribute__((packed));
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index cc3726a4a1c..c671f60ce80 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -44,7 +44,9 @@ enum tv_margin {
};
/** Private structure for the integrated TV support */
-struct intel_tv_priv {
+struct intel_tv {
+ struct intel_encoder base;
+
int type;
char *tv_format;
int margin[4];
@@ -896,6 +898,11 @@ static const struct tv_mode tv_modes[] = {
},
};
+static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
+}
+
static void
intel_tv_dpms(struct drm_encoder *encoder, int mode)
{
@@ -929,19 +936,17 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
-intel_tv_mode_find (struct intel_encoder *intel_encoder)
+intel_tv_mode_find (struct intel_tv *intel_tv)
{
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-
- return intel_tv_mode_lookup(tv_priv->tv_format);
+ return intel_tv_mode_lookup(intel_tv->tv_format);
}
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -957,8 +962,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct drm_device *dev = encoder->dev;
struct drm_mode_config *drm_config = &dev->mode_config;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
struct drm_encoder *other_encoder;
if (!tv_mode)
@@ -983,9 +988,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1001,7 +1005,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
tv_ctl = I915_READ(TV_CTL);
tv_ctl &= TV_CTL_SAVE;
- switch (tv_priv->type) {
+ switch (intel_tv->type) {
default:
case DRM_MODE_CONNECTOR_Unknown:
case DRM_MODE_CONNECTOR_Composite:
@@ -1154,11 +1158,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Wait for vblank for the disable to take effect */
if (!IS_I9XX(dev))
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
/* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
/* Filter ctl must be set before TV_WIN_SIZE */
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1168,12 +1172,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
ysize = 2*tv_mode->nbr_end + 1;
- xpos += tv_priv->margin[TV_MARGIN_LEFT];
- ypos += tv_priv->margin[TV_MARGIN_TOP];
- xsize -= (tv_priv->margin[TV_MARGIN_LEFT] +
- tv_priv->margin[TV_MARGIN_RIGHT]);
- ysize -= (tv_priv->margin[TV_MARGIN_TOP] +
- tv_priv->margin[TV_MARGIN_BOTTOM]);
+ xpos += intel_tv->margin[TV_MARGIN_LEFT];
+ ypos += intel_tv->margin[TV_MARGIN_TOP];
+ xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
@@ -1222,9 +1226,9 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
+intel_tv_detect_type (struct intel_tv *intel_tv)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_tv->base.enc;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -1263,11 +1267,15 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
DAC_C_0_7_V);
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
- intel_wait_for_vblank(dev);
+ POSTING_READ(TV_DAC);
+ msleep(20);
+
tv_dac = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac);
I915_WRITE(TV_CTL, save_tv_ctl);
- intel_wait_for_vblank(dev);
+ POSTING_READ(TV_CTL);
+ msleep(20);
+
/*
* A B C
* 0 1 1 Composite
@@ -1304,12 +1312,11 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
static void intel_tv_find_better_format(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int i;
- if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
return;
@@ -1317,12 +1324,12 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
tv_mode = tv_modes + i;
- if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
break;
}
- tv_priv->tv_format = tv_mode->name;
+ intel_tv->tv_format = tv_mode->name;
drm_connector_property_set_value(connector,
connector->dev->mode_config.tv_mode_property, i);
}
@@ -1336,31 +1343,31 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
static enum drm_connector_status
intel_tv_detect(struct drm_connector *connector)
{
- struct drm_crtc *crtc;
struct drm_display_mode mode;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- int dpms_mode;
- int type = tv_priv->type;
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ int type;
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (encoder->crtc && encoder->crtc->enabled) {
- type = intel_tv_detect_type(encoder->crtc, intel_encoder);
+ type = intel_tv_detect_type(intel_tv);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+ struct drm_crtc *crtc;
+ int dpms_mode;
+
+ crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
&mode, &dpms_mode);
if (crtc) {
- type = intel_tv_detect_type(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder, connector,
+ type = intel_tv_detect_type(intel_tv);
+ intel_release_load_detect_pipe(&intel_tv->base, connector,
dpms_mode);
} else
type = -1;
}
- tv_priv->type = type;
+ intel_tv->type = type;
if (type < 0)
return connector_status_disconnected;
@@ -1391,8 +1398,8 @@ intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1417,8 +1424,8 @@ intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int j, count = 0;
u64 tmp;
@@ -1483,8 +1490,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1494,30 +1500,30 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
goto out;
if (property == dev->mode_config.tv_left_margin_property &&
- tv_priv->margin[TV_MARGIN_LEFT] != val) {
- tv_priv->margin[TV_MARGIN_LEFT] = val;
+ intel_tv->margin[TV_MARGIN_LEFT] != val) {
+ intel_tv->margin[TV_MARGIN_LEFT] = val;
changed = true;
} else if (property == dev->mode_config.tv_right_margin_property &&
- tv_priv->margin[TV_MARGIN_RIGHT] != val) {
- tv_priv->margin[TV_MARGIN_RIGHT] = val;
+ intel_tv->margin[TV_MARGIN_RIGHT] != val) {
+ intel_tv->margin[TV_MARGIN_RIGHT] = val;
changed = true;
} else if (property == dev->mode_config.tv_top_margin_property &&
- tv_priv->margin[TV_MARGIN_TOP] != val) {
- tv_priv->margin[TV_MARGIN_TOP] = val;
+ intel_tv->margin[TV_MARGIN_TOP] != val) {
+ intel_tv->margin[TV_MARGIN_TOP] = val;
changed = true;
} else if (property == dev->mode_config.tv_bottom_margin_property &&
- tv_priv->margin[TV_MARGIN_BOTTOM] != val) {
- tv_priv->margin[TV_MARGIN_BOTTOM] = val;
+ intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
+ intel_tv->margin[TV_MARGIN_BOTTOM] = val;
changed = true;
} else if (property == dev->mode_config.tv_mode_property) {
if (val >= ARRAY_SIZE(tv_modes)) {
ret = -EINVAL;
goto out;
}
- if (!strcmp(tv_priv->tv_format, tv_modes[val].name))
+ if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
goto out;
- tv_priv->tv_format = tv_modes[val].name;
+ intel_tv->tv_format = tv_modes[val].name;
changed = true;
} else {
ret = -EINVAL;
@@ -1553,16 +1559,8 @@ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs =
.best_encoder = intel_attached_encoder,
};
-static void intel_tv_enc_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
- .destroy = intel_tv_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
/*
@@ -1606,9 +1604,9 @@ intel_tv_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
+ struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
int i, initial_mode = 0;
@@ -1647,18 +1645,18 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
- intel_encoder = kzalloc(sizeof(struct intel_encoder) +
- sizeof(struct intel_tv_priv), GFP_KERNEL);
- if (!intel_encoder) {
+ intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
+ if (!intel_tv) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(intel_tv);
return;
}
+ intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
@@ -1668,22 +1666,20 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
- tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
- intel_encoder->dev_priv = tv_priv;
- tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
+ intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
- tv_priv->margin[TV_MARGIN_LEFT] = 54;
- tv_priv->margin[TV_MARGIN_TOP] = 36;
- tv_priv->margin[TV_MARGIN_RIGHT] = 46;
- tv_priv->margin[TV_MARGIN_BOTTOM] = 37;
+ intel_tv->margin[TV_MARGIN_LEFT] = 54;
+ intel_tv->margin[TV_MARGIN_TOP] = 36;
+ intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+ intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
- tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+ intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
@@ -1703,16 +1699,16 @@ intel_tv_init(struct drm_device *dev)
initial_mode);
drm_connector_attach_property(connector,
dev->mode_config.tv_left_margin_property,
- tv_priv->margin[TV_MARGIN_LEFT]);
+ intel_tv->margin[TV_MARGIN_LEFT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_top_margin_property,
- tv_priv->margin[TV_MARGIN_TOP]);
+ intel_tv->margin[TV_MARGIN_TOP]);
drm_connector_attach_property(connector,
dev->mode_config.tv_right_margin_property,
- tv_priv->margin[TV_MARGIN_RIGHT]);
+ intel_tv->margin[TV_MARGIN_RIGHT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
- tv_priv->margin[TV_MARGIN_BOTTOM]);
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
out:
drm_sysfs_connector_add(connector);
}
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index fff82045c42..9ce2827f8c0 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1085,19 +1085,19 @@ file_priv)
}
struct drm_ioctl_desc mga_ioctls[] = {
- DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0b69a9628c9..974b0f8ae04 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2166,7 +2166,7 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
uint32_t val = 0;
if (off < pci_resource_len(dev->pdev, 1)) {
- uint32_t __iomem *p =
+ uint8_t __iomem *p =
io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
val = ioread32(p + (off & ~PAGE_MASK));
@@ -2182,7 +2182,7 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
uint32_t off, uint32_t val)
{
if (off < pci_resource_len(dev->pdev, 1)) {
- uint32_t __iomem *p =
+ uint8_t __iomem *p =
io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
iowrite32(val, p + (off & ~PAGE_MASK));
@@ -3869,27 +3869,10 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
}
#ifdef __powerpc__
/* Powerbook specific quirks */
- if ((dev->pci_device & 0xffff) == 0x0179 ||
- (dev->pci_device & 0xffff) == 0x0189 ||
- (dev->pci_device & 0xffff) == 0x0329) {
- if (script == LVDS_RESET) {
- nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
-
- } else if (script == LVDS_PANEL_ON) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
- bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
- | (1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT,
- bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
-
- } else if (script == LVDS_PANEL_OFF) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
- bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
- & ~(1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT,
- bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
- }
- }
+ if (script == LVDS_RESET &&
+ (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+ dev->pci_device == 0x0329))
+ nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
#endif
return 0;
@@ -4381,11 +4364,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
*
* For the moment, a quirk will do :)
*/
- if ((dev->pdev->device == 0x01d7) &&
- (dev->pdev->subsystem_vendor == 0x1028) &&
- (dev->pdev->subsystem_device == 0x01c2)) {
+ if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
bios->fp.duallink_transition_clk = 80000;
- }
/* set dual_link flag for EDID case */
if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
@@ -4587,7 +4567,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -1) {
@@ -4597,7 +4577,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -2) {
@@ -4610,7 +4590,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk > 0) {
@@ -4622,7 +4602,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk < 0) {
@@ -4634,7 +4614,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
}
@@ -5357,19 +5337,17 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
}
tmdstableptr = ROM16(bios->data[bitentry->offset]);
-
- if (tmdstableptr == 0x0) {
+ if (!tmdstableptr) {
NV_ERROR(dev, "Pointer to TMDS table invalid\n");
return -EINVAL;
}
+ NV_INFO(dev, "TMDS table version %d.%d\n",
+ bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+
/* nv50+ has v2.0, but we don't parse it atm */
- if (bios->data[tmdstableptr] != 0x11) {
- NV_WARN(dev,
- "TMDS table revision %d.%d not currently supported\n",
- bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+ if (bios->data[tmdstableptr] != 0x11)
return -ENOSYS;
- }
/*
* These two scripts are odd: they don't seem to get run even when
@@ -5809,6 +5787,20 @@ parse_dcb_gpio_table(struct nvbios *bios)
gpio->line = tvdac_gpio[1] >> 4;
gpio->invert = tvdac_gpio[0] & 2;
}
+ } else {
+ /*
+ * No systematic way to store GPIO info on pre-v2.2
+ * DCBs, try to match the PCI device IDs.
+ */
+
+ /* Apple iMac G4 NV18 */
+ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+ struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+
+ gpio->tag = DCB_GPIO_TVDAC0;
+ gpio->line = 4;
+ }
+
}
if (!gpio_table_ptr)
@@ -5884,9 +5876,7 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
struct drm_device *dev = bios->dev;
/* Gigabyte NX85T */
- if ((dev->pdev->device == 0x0421) &&
- (dev->pdev->subsystem_vendor == 0x1458) &&
- (dev->pdev->subsystem_device == 0x344c)) {
+ if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
if (cte->type == DCB_CONNECTOR_HDMI_1)
cte->type = DCB_CONNECTOR_DVI_I;
}
@@ -6139,7 +6129,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
break;
- case 0xe:
+ case OUTPUT_EOL:
/* weird g80 mobile type that "nv" treats as a terminator */
dcb->entries--;
return false;
@@ -6176,22 +6166,14 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->type = OUTPUT_TV;
break;
case 2:
- case 3:
- entry->type = OUTPUT_LVDS;
- break;
case 4:
- switch ((conn & 0x000000f0) >> 4) {
- case 0:
- entry->type = OUTPUT_TMDS;
- break;
- case 1:
+ if (conn & 0x10)
entry->type = OUTPUT_LVDS;
- break;
- default:
- NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
- (conn & 0x000000f0) >> 4);
- return false;
- }
+ else
+ entry->type = OUTPUT_TMDS;
+ break;
+ case 3:
+ entry->type = OUTPUT_LVDS;
break;
default:
NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
@@ -6307,9 +6289,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
* nasty problems until this is sorted (assuming it's not a
* VBIOS bug).
*/
- if ((dev->pdev->device == 0x040d) &&
- (dev->pdev->subsystem_vendor == 0x1028) &&
- (dev->pdev->subsystem_device == 0x019b)) {
+ if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
if (*conn == 0x02026312 && *conf == 0x00000020)
return false;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd14dfd3d78..c1de2f3fcb0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -95,6 +95,7 @@ enum dcb_type {
OUTPUT_TMDS = 2,
OUTPUT_LVDS = 3,
OUTPUT_DP = 6,
+ OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
OUTPUT_ANY = -1
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 84f85183d04..f6f44779d82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,6 +36,21 @@
#include <linux/log2.h>
#include <linux/slab.h>
+int
+nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
+{
+ struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
+ int ret;
+
+ if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
+ return 0;
+
+ spin_lock(&nvbo->bo.lock);
+ ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.lock);
+ return ret;
+}
+
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 90fdcda332b..0480f064f2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -426,18 +426,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index b1b22baf142..a1473fff06a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -104,7 +104,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct nouveau_i2c_chan *i2c;
+ struct nouveau_i2c_chan *i2c = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
@@ -117,7 +117,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
if (!obj)
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
- i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+
+ if (nv_encoder->dcb->i2c_index < 0xf)
+ i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
*pnv_encoder = nv_encoder;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e424bf74d70..b1be617373b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1165,6 +1165,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
/* nouveau_fence.c */
struct nouveau_fence;
@@ -1388,6 +1389,15 @@ nv_two_reg_pll(struct drm_device *dev)
return false;
}
+static inline bool
+nv_match_device(struct drm_device *dev, unsigned device,
+ unsigned sub_vendor, unsigned sub_device)
+{
+ return dev->pdev->device == device &&
+ dev->pdev->subsystem_vendor == sub_vendor &&
+ dev->pdev->subsystem_device == sub_device;
+}
+
#define NV_SW 0x0000506e
#define NV_SW_DMA_SEMAPHORE 0x00000060
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6b208ffafa8..87ac21ec23d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
struct nouveau_fence *fence;
uint32_t sequence;
+ spin_lock(&chan->fence.lock);
+
if (USE_REFCNT)
sequence = nvchan_rd32(chan, 0x48);
else
sequence = atomic_read(&chan->fence.last_sequence_irq);
if (chan->fence.sequence_ack == sequence)
- return;
+ goto out;
chan->fence.sequence_ack = sequence;
- spin_lock(&chan->fence.lock);
list_for_each_safe(entry, tmp, &chan->fence.pending) {
fence = list_entry(entry, struct nouveau_fence, entry);
@@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
if (sequence == chan->fence.sequence_ack)
break;
}
+out:
spin_unlock(&chan->fence.lock);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0f417ac1b69..ead7b8fc53f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -245,7 +245,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_unreference(nvbo->gem);
+ drm_gem_object_unreference_unlocked(nvbo->gem);
}
}
@@ -300,7 +300,7 @@ retry:
validate_fini(op, NULL);
if (ret == -EAGAIN)
ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
- drm_gem_object_unreference(gem);
+ drm_gem_object_unreference_unlocked(gem);
if (ret) {
NV_ERROR(dev, "fail reserve\n");
return ret;
@@ -337,7 +337,9 @@ retry:
return -EINVAL;
}
+ mutex_unlock(&drm_global_mutex);
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
+ mutex_lock(&drm_global_mutex);
if (ret) {
NV_ERROR(dev, "fail wait_cpu\n");
return ret;
@@ -361,16 +363,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
- if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
- spin_lock(&nvbo->bo.lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.lock);
- if (unlikely(ret)) {
- NV_ERROR(dev, "fail wait other chan\n");
- return ret;
- }
+ ret = nouveau_bo_sync_gpu(nvbo, chan);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail pre-validate sync\n");
+ return ret;
}
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -381,7 +378,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- nvbo->channel = chan;
+ nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false, false);
nvbo->channel = NULL;
@@ -390,6 +387,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
+ ret = nouveau_bo_sync_gpu(nvbo, chan);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail post-validate sync\n");
+ return ret;
+ }
+
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -613,7 +616,20 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return PTR_ERR(bo);
}
- mutex_lock(&dev->struct_mutex);
+ /* Mark push buffers as being used on PFIFO, the validation code
+ * will then make sure that if the pushbuf bo moves, that they
+ * happen on the kernel channel, which will in turn cause a sync
+ * to happen before we try and submit the push buffer.
+ */
+ for (i = 0; i < req->nr_push; i++) {
+ if (push[i].bo_index >= req->nr_buffers) {
+ NV_ERROR(dev, "push %d buffer not in list\n", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bo[push[i].bo_index].read_domains |= (1 << 31);
+ }
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
@@ -647,7 +663,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
push[i].length);
}
} else
- if (dev_priv->card_type >= NV_20) {
+ if (dev_priv->chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -713,7 +729,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out:
validate_fini(&op, fence);
nouveau_fence_unref((void**)&fence);
- mutex_unlock(&dev->struct_mutex);
kfree(bo);
kfree(push);
@@ -722,7 +737,7 @@ out_next:
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
- if (dev_priv->card_type >= NV_20) {
+ if (dev_priv->chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 0bd407ca3d4..84614858728 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
if (entry->chan)
return -EEXIST;
- if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) {
+ if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 491767fe4fc..6b9187d7f67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -214,6 +214,7 @@ int
nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
struct nouveau_gpuobj *gpuobj = NULL;
uint32_t aper_size, obj_size;
int i, ret;
@@ -239,10 +240,19 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.sg_dummy_page =
alloc_page(GFP_KERNEL|__GFP_DMA32);
+ if (!dev_priv->gart_info.sg_dummy_page) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+
set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
dev_priv->gart_info.sg_dummy_bus =
- pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+ pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -EFAULT;
+ }
if (dev_priv->card_type < NV_50) {
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index a5dcf768580..0d3206a7046 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -444,6 +444,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct dcb_entry *dcbe = nv_encoder->dcb;
int head = nouveau_crtc(encoder->crtc)->index;
+ struct drm_encoder *slave_encoder;
if (dcbe->type == OUTPUT_TMDS)
run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -462,9 +463,10 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
/* Init external transmitters */
- if (get_tmds_slave(encoder))
- get_slave_funcs(get_tmds_slave(encoder))->mode_set(
- encoder, &nv_encoder->mode, &nv_encoder->mode);
+ slave_encoder = get_tmds_slave(encoder);
+ if (slave_encoder)
+ get_slave_funcs(slave_encoder)->mode_set(
+ slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
helper->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -473,6 +475,27 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
+static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
+{
+#ifdef __powerpc__
+ struct drm_device *dev = encoder->dev;
+
+ /* BIOS scripts usually take care of the backlight, thanks
+ * Apple for your consistency.
+ */
+ if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+ dev->pci_device == 0x0329) {
+ if (mode == DRM_MODE_DPMS_ON) {
+ nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
+ nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
+ } else {
+ nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+ nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
+ }
+ }
+#endif
+}
+
static inline bool is_powersaving_dpms(int mode)
{
return (mode != DRM_MODE_DPMS_ON);
@@ -520,6 +543,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
LVDS_PANEL_OFF, 0);
}
+ nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
if (mode == DRM_MODE_DPMS_ON)
@@ -543,6 +567,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
mode, nv_encoder->dcb->index);
+ nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 44fefb0c708..13cdc05b7c2 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -121,10 +121,14 @@ static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
/* Zotac FX5200 */
- if (dev->pdev->device == 0x0322 &&
- dev->pdev->subsystem_vendor == 0x19da &&
- (dev->pdev->subsystem_device == 0x1035 ||
- dev->pdev->subsystem_device == 0x2035)) {
+ if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
+ nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
+ *pin_mask = 0xc;
+ return false;
+ }
+
+ /* MSI nForce2 IGP */
+ if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
*pin_mask = 0xc;
return false;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 37c7b48ab24..91ef93cf1f3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev)
chan->file_priv = (struct drm_file *)-2;
dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
+ INIT_LIST_HEAD(&chan->ramht_refs);
+
/* Channel's PRAMIN object + heap */
ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
NULL, &chan->ramin);
@@ -278,7 +280,7 @@ nv50_instmem_init(struct drm_device *dev)
/*XXX: incorrect, but needed to make hash func "work" */
dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 3ab3cdc4217..6b451f86478 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -142,14 +142,16 @@ int
nvc0_instmem_suspend(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 *buf;
int i;
dev_priv->susres.ramin_copy = vmalloc(65536);
if (!dev_priv->susres.ramin_copy)
return -ENOMEM;
+ buf = dev_priv->susres.ramin_copy;
- for (i = 0x700000; i < 0x710000; i += 4)
- dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i);
+ for (i = 0; i < 65536; i += 4)
+ buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
return 0;
}
@@ -157,14 +159,15 @@ void
nvc0_instmem_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 *buf = dev_priv->susres.ramin_copy;
u64 chan;
int i;
chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
nv_wr32(dev, 0x001700, chan >> 16);
- for (i = 0x700000; i < 0x710000; i += 4)
- nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]);
+ for (i = 0; i < 65536; i += 4)
+ nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
vfree(dev_priv->susres.ramin_copy);
dev_priv->susres.ramin_copy = NULL;
@@ -221,7 +224,7 @@ nvc0_instmem_init(struct drm_device *dev)
/*XXX: incorrect, but needed to make hash func "work" */
dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
return 0;
}
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 077af1f2f9b..a9e33ce6591 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1639,30 +1639,29 @@ void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
r128_do_cleanup_pageflip(dev);
}
}
-
void r128_driver_lastclose(struct drm_device *dev)
{
r128_do_cleanup_cce(dev);
}
struct drm_ioctl_desc r128_ioctls[] = {
- DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
};
int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 12ad512bd3d..464a81a1990 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
args.usV_SyncWidth =
cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+ args.ucOverscanRight = radeon_crtc->h_border;
+ args.ucOverscanLeft = radeon_crtc->h_border;
+ args.ucOverscanBottom = radeon_crtc->v_border;
+ args.ucOverscanTop = radeon_crtc->v_border;
+
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
misc |= ATOM_VSYNC_POLARITY;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -471,6 +476,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct radeon_encoder *radeon_encoder = NULL;
u32 adjusted_clock = mode->clock;
int encoder_mode = 0;
+ u32 dp_clock = mode->clock;
+ int bpc = 8;
/* reset the pll flags */
pll->flags = 0;
@@ -513,6 +520,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
encoder_mode = atombios_get_encoder_mode(encoder);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ }
+ }
+
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
@@ -521,6 +539,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
pll->algo = PLL_ALGO_LEGACY;
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
}
+ /* There is some evidence (often anecdotal) that RV515 LVDS
+ * (on some boards at least) prefers the legacy algo. I'm not
+ * sure whether this should handled generically or on a
+ * case-by-case quirk basis. Both algos should work fine in the
+ * majority of cases.
+ */
+ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
+ (rdev->family == CHIP_RV515)) {
+ /* allow the user to overrride just in case */
+ if (radeon_new_pll == 1)
+ pll->algo = PLL_ALGO_NEW;
+ else
+ pll->algo = PLL_ALGO_LEGACY;
+ }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -555,6 +587,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ /* may want to enable SS on DP eventually */
+ /* args.v1.ucConfig |=
+ ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
+ } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+ args.v1.ucConfig |=
+ ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+ }
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
@@ -568,10 +608,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ /* may want to enable SS on DP/eDP eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- else {
+ /* 16200 or 27000 */
+ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+ } else {
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ /* deep color support */
+ args.v3.sInput.usPixelClock =
+ cpu_to_le16((mode->clock * bpc / 8) / 10);
+ }
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
@@ -580,13 +630,19 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
DISPPLL_CONFIG_DUAL_LINK;
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- /* may want to enable SS on DP/eDP eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
- if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ /* may want to enable SS on DP/eDP eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- else {
+ /* 16200 or 27000 */
+ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+ } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+ /* want to enable SS on LVDS eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
+ } else {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
@@ -1019,11 +1075,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id) {
- WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
- WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
} else {
- WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
- WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
}
}
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1160,8 +1216,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ bool is_tvcv = false;
- /* TODO color tiling */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ /* find tv std */
+ if (encoder->crtc == crtc) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ is_tvcv = true;
+ }
+ }
atombios_disable_ss(crtc);
/* always set DCPLL */
@@ -1170,9 +1236,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_enable_ss(crtc);
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_DCE4(rdev))
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- else {
+ else if (ASIC_IS_AVIVO(rdev)) {
+ if (is_tvcv)
+ atombios_crtc_set_timing(crtc, adjusted_mode);
+ else
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+ } else {
atombios_crtc_set_timing(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 36e0d4b545e..4e7778d44b8 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -610,7 +610,7 @@ void dp_link_train(struct drm_encoder *encoder,
enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
else
enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
- if (dig_connector->linkb)
+ if (dig->linkb)
enc_id |= ATOM_DP_CONFIG_LINK_B;
else
enc_id |= ATOM_DP_CONFIG_LINK_A;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 957d5067ad9..b8b7f010b25 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
return 0;
}
+static int evergreen_cp_start(struct radeon_device *rdev)
+{
+ int r;
+ uint32_t cp_me;
+
+ r = radeon_ring_lock(rdev, 7);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
+
+ cp_me = 0xff;
+ WREG32(CP_ME_CNTL, cp_me);
+
+ r = radeon_ring_lock(rdev, 4);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ /* init some VGT regs */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, 0xe);
+ radeon_ring_write(rdev, 0x10);
+ radeon_ring_unlock_commit(rdev);
+
+ return 0;
+}
+
int evergreen_cp_resume(struct radeon_device *rdev)
{
u32 tmp;
@@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
- r600_cp_start(rdev);
+ evergreen_cp_start(rdev);
rdev->cp.ready = true;
r = radeon_ring_test(rdev);
if (r) {
@@ -2054,11 +2091,6 @@ int evergreen_resume(struct radeon_device *rdev)
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
r = evergreen_startup(rdev);
if (r) {
@@ -2164,9 +2196,6 @@ int evergreen_init(struct radeon_device *rdev)
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- r = radeon_clocks_init(rdev);
- if (r)
- return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2236,7 +2265,6 @@ void evergreen_fini(struct radeon_device *rdev)
evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_clocks_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d0ebae9dde2..afc18d87fdc 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev)
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
- if (rdev->family >= CHIP_CEDAR) {
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
- } else if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
} else {
@@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev)
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
r = r600_startup(rdev);
if (r) {
@@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- r = radeon_clocks_init(rdev);
- if (r)
- return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev)
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_clocks_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
@@ -3541,7 +3529,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
*/
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp;
WREG32(HDP_DEBUG1, 0);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3dfcfa3ca42..a168d644bf9 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+/* VRAM scratch page for HDP bug */
+struct r700_vram_scratch {
+ struct radeon_bo *robj;
+ volatile uint32_t *ptr;
+};
/*
* Core structure, functions and helpers.
@@ -1079,6 +1084,7 @@ struct radeon_device {
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
struct r600_blit r600_blit;
+ struct r700_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
struct workqueue_struct *wq;
@@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev);
extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
extern void radeon_update_display_priority(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
-extern int radeon_clocks_init(struct radeon_device *rdev);
-extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index f40dfb77f9b..bd2f33e5c91 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -156,7 +156,13 @@ int radeon_agp_init(struct radeon_device *rdev)
}
mode.mode = info.mode;
- agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+ /* chips with the agp to pcie bridge don't have the AGP_STATUS register
+ * Just use the whatever mode the host sets up.
+ */
+ if (rdev->family <= CHIP_RV350)
+ agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+ else
+ agp_status = mode.mode;
is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
if (is_v3) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 646f96f97c7..25e1dd19779 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -733,6 +733,7 @@ static struct radeon_asic evergreen_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
@@ -857,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev)
return 0;
}
-/*
- * Wrapper around modesetting bits. Move to radeon_clocks.c?
- */
-int radeon_clocks_init(struct radeon_device *rdev)
-{
- int r;
-
- r = radeon_static_clocks_init(rdev->ddev);
- if (r) {
- return r;
- }
- DRM_INFO("Clocks initialized !\n");
- return 0;
-}
-
-void radeon_clocks_fini(struct radeon_device *rdev)
-{
-}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6d30868744e..ebae14c4b76 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -32,11 +32,11 @@
/* from radeon_encoder.c */
extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
/* from radeon_connector.c */
@@ -46,14 +46,14 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- bool linkb, uint32_t igp_lane_info,
+ uint32_t igp_lane_info,
uint16_t connector_object_id,
struct radeon_hpd *hpd,
struct radeon_router *router);
/* from radeon_legacy_encoder.c */
extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
union atom_supported_devices {
@@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((i == 7) &&
+ (gpio->usClkMaskRegisterIndex == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
i2c.valid = false;
+
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((i == 7) &&
+ (gpio->usClkMaskRegisterIndex == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
@@ -226,6 +253,8 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
struct radeon_hpd hpd;
u32 reg;
+ memset(&hpd, 0, sizeof(struct radeon_hpd));
+
if (ASIC_IS_DCE4(rdev))
reg = EVERGREEN_DC_GPIO_HPD_A;
else
@@ -477,7 +506,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
int i, j, k, path_size, device_support;
int connector_type;
u16 igp_lane_info, conn_id, connector_object_id;
- bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
struct radeon_router router;
struct radeon_gpio_rec gpio;
@@ -510,7 +538,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
addr += path_size;
path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
path_size += le16_to_cpu(path->usSize);
- linkb = false;
+
if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type;
@@ -601,13 +629,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
- if (grph_obj_num == 2)
- linkb = true;
- else
- linkb = false;
+ u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
radeon_add_atom_encoder(dev,
- grph_obj_id,
+ encoder_obj,
le16_to_cpu
(path->
usDeviceTag));
@@ -744,7 +769,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
le16_to_cpu(path->
usDeviceTag),
connector_type, &ddc_bus,
- linkb, igp_lane_info,
+ igp_lane_info,
connector_object_id,
&hpd,
&router);
@@ -933,13 +958,13 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
radeon_add_atom_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
(1 << i),
dac),
(1 << i));
else
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
(1 << i),
dac),
(1 << i));
@@ -996,7 +1021,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i].
connector_type,
&bios_connectors[i].ddc_bus,
- false, 0,
+ 0,
connector_object_id,
&bios_connectors[i].hpd,
&router);
@@ -1183,7 +1208,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
return true;
break;
case 2:
- if (igp_info->info_2.ucMemoryType & 0x0f)
+ if (igp_info->info_2.ulBootUpSidePortClock)
return true;
break;
default:
@@ -1305,6 +1330,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
union lvds_info *lvds_info;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
+ int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
@@ -1368,6 +1394,12 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
}
encoder->native_mode = lvds->native_mode;
+
+ if (encoder_enum == 2)
+ lvds->linkb = true;
+ else
+ lvds->linkb = false;
+
}
return lvds;
}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 14448a740ba..5249af8931e 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev)
mpll->max_feedback_div = 0xff;
mpll->best_vco = 0;
+ if (!rdev->clock.default_sclk)
+ rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
+ if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
+ rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
+
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
+
}
/* 10 khz */
@@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
}
}
-static void radeon_apply_clock_quirks(struct radeon_device *rdev)
-{
- uint32_t tmp;
-
- /* XXX make sure engine is idle */
-
- if (rdev->family < CHIP_RS600) {
- tmp = RREG32_PLL(RADEON_SCLK_CNTL);
- if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
- tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
- if ((rdev->family == CHIP_RV250)
- || (rdev->family == CHIP_RV280))
- tmp |=
- RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
- if ((rdev->family == CHIP_RV350)
- || (rdev->family == CHIP_RV380))
- tmp |= R300_SCLK_FORCE_VAP;
- if (rdev->family == CHIP_R420)
- tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
- WREG32_PLL(RADEON_SCLK_CNTL, tmp);
- } else if (rdev->family < CHIP_R600) {
- tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
- tmp |= AVIVO_CP_FORCEON;
- WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
-
- tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
- tmp |= AVIVO_E2_FORCEON;
- WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
-
- tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
- tmp |= AVIVO_IDCT_FORCEON;
- WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
- }
-}
-
-int radeon_static_clocks_init(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
-
- /* XXX make sure engine is idle */
-
- if (radeon_dynclks != -1) {
- if (radeon_dynclks) {
- if (rdev->asic->set_clock_gating)
- radeon_set_clock_gating(rdev, 1);
- }
- }
- radeon_apply_clock_quirks(rdev);
- return 0;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 885dcfac183..bd74e428bd1 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -39,8 +39,8 @@
/* from radeon_encoder.c */
extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
/* from radeon_connector.c */
@@ -55,7 +55,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
/* from radeon_legacy_encoder.c */
extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
/* old legacy ATI BIOS routines */
@@ -1505,7 +1505,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1520,7 +1520,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1535,7 +1535,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1550,12 +1550,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1571,7 +1571,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1588,7 +1588,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1607,7 +1607,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1619,7 +1619,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1631,7 +1631,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1648,7 +1648,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1660,12 +1660,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1680,7 +1680,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1697,7 +1697,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1709,12 +1709,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1728,7 +1728,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1745,7 +1745,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1757,7 +1757,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1769,7 +1769,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1786,12 +1786,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1806,7 +1806,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1823,12 +1823,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1842,7 +1842,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1859,7 +1859,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
@@ -1871,7 +1871,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1883,7 +1883,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1900,7 +1900,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1912,7 +1912,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1924,7 +1924,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1941,7 +1941,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1952,7 +1952,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -2109,7 +2109,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
else
devices = ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev, devices, 0),
devices);
radeon_add_legacy_connector(dev, i, devices,
@@ -2123,7 +2123,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (tmp & 0x1) {
devices = ATOM_DEVICE_CRT2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
@@ -2131,7 +2131,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
devices = ATOM_DEVICE_CRT1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
@@ -2151,7 +2151,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (tmp & 0x1) {
devices |= ATOM_DEVICE_CRT2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
@@ -2159,7 +2159,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
devices |= ATOM_DEVICE_CRT1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
@@ -2168,7 +2168,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
@@ -2177,7 +2177,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
devices |= ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
@@ -2202,7 +2202,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
}
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev, devices, 0),
devices);
radeon_add_legacy_connector(dev, i, devices,
@@ -2215,7 +2215,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
case CONNECTOR_CTV_LEGACY:
case CONNECTOR_STV_LEGACY:
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
@@ -2242,12 +2242,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
@@ -2268,7 +2268,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
if (crt_info) {
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -2297,7 +2297,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
COMBIOS_LCD_DDC_INFO_TABLE);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -2351,7 +2351,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
hpd.hpd = RADEON_HPD_NONE;
ddc_i2c.valid = false;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 47c4b276d30..a9dd7847d96 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -977,27 +977,29 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
- u8 sink_type;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- sink_type = radeon_dp_getsinktype(radeon_connector);
- if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- if (radeon_dp_getdpcd(radeon_connector)) {
- radeon_dig_connector->dp_sink_type = sink_type;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ /* eDP is always DP */
+ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+ if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
- }
} else {
- if (radeon_ddc_probe(radeon_connector)) {
- radeon_dig_connector->dp_sink_type = sink_type;
- ret = connector_status_connected;
+ radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
+ } else {
+ if (radeon_ddc_probe(radeon_connector))
+ ret = connector_status_connected;
}
}
+ radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
@@ -1037,7 +1039,6 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- bool linkb,
uint32_t igp_lane_info,
uint16_t connector_object_id,
struct radeon_hpd *hpd,
@@ -1050,10 +1051,16 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
- /* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
+ /* if the user selected tv=0 don't try and add the connector */
+ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+ (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+ (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+ (radeon_tv == 0))
+ return;
+
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
@@ -1128,7 +1135,6 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1158,7 +1164,6 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1182,7 +1187,6 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
@@ -1211,25 +1215,22 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- if (radeon_tv == 1) {
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- 1);
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.tv_std_property,
- radeon_atombios_get_tv_info(rdev));
- /* no HPD on analog connectors */
- radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- }
+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ radeon_atombios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1275,10 +1276,16 @@ radeon_add_legacy_connector(struct drm_device *dev,
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
- /* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
+ /* if the user selected tv=0 don't try and add the connector */
+ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+ (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+ (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+ (radeon_tv == 0))
+ return;
+
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
@@ -1350,26 +1357,24 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- if (radeon_tv == 1) {
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- radeon_connector->dac_load_detect = true;
- /* RS400,RC410,RS480 chipset seems to report a lot
- * of false positive on load detect, we haven't yet
- * found a way to make load detect reliable on those
- * chipset, thus just disable it for TV.
- */
- if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
- radeon_connector->dac_load_detect = false;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- radeon_connector->dac_load_detect);
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.tv_std_property,
- radeon_combios_get_tv_info(rdev));
- /* no HPD on analog connectors */
- radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- }
+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+ radeon_connector->dac_load_detect = true;
+ /* RS400,RC410,RS480 chipset seems to report a lot
+ * of false positive on load detect, we haven't yet
+ * found a way to make load detect reliable on those
+ * chipset, thus just disable it for TV.
+ */
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+ radeon_connector->dac_load_detect = false;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ radeon_connector->dac_load_detect);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ radeon_combios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4f7a170d156..256d204a6d2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
+ if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
@@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev)
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
- u32 sclk, mclk;
+ u32 sclk = rdev->pm.current_sclk;
+ u32 mclk = rdev->pm.current_mclk;
- if (rdev->flags & RADEON_IS_IGP) {
- sclk = radeon_get_engine_clock(rdev);
- mclk = rdev->clock.default_mclk;
-
- a.full = dfixed_const(100);
- rdev->pm.sclk.full = dfixed_const(sclk);
- rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = dfixed_const(mclk);
- rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+ /* sclk/mclk in Mhz */
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+ if (rdev->flags & RADEON_IS_IGP) {
a.full = dfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
- } else {
- sclk = radeon_get_engine_clock(rdev);
- mclk = radeon_get_memory_clock(rdev);
-
- a.full = dfixed_const(100);
- rdev->pm.sclk.full = dfixed_const(sclk);
- rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = dfixed_const(mclk);
- rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5764f4d3b4f..6dd434ad242 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1094,6 +1094,18 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_i2c_fini(rdev);
}
+static bool is_hdtv_mode(struct drm_display_mode *mode)
+{
+ /* try and guess if this is a tv or a monitor */
+ if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+ (mode->vdisplay == 576) || /* 576p */
+ (mode->vdisplay == 720) || /* 720p */
+ (mode->vdisplay == 1080)) /* 1080p */
+ return true;
+ else
+ return false;
+}
+
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -1141,7 +1153,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if (ASIC_IS_AVIVO(rdev) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(radeon_connector->edid)))) {
+ drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ is_hdtv_mode(mode)))) {
radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
radeon_crtc->rmx_type = RMX_FULL;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 263c8098d7d..2c293e8304d 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -81,7 +81,7 @@ void radeon_setup_encoder_clones(struct drm_device *dev)
}
uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
struct radeon_device *rdev = dev->dev_private;
uint32_t ret = 0;
@@ -97,59 +97,59 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
- ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+ ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
+ ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
break;
case 2: /* dac b */
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+ ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
else {
/*if (rdev->family == CHIP_R200)
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
else*/
- ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
}
break;
case 3: /* external dac */
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+ ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
break;
}
break;
case ATOM_DEVICE_LCD1_SUPPORT:
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
+ ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
break;
case ATOM_DEVICE_DFP1_SUPPORT:
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+ ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+ ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
break;
case ATOM_DEVICE_LCD2_SUPPORT:
case ATOM_DEVICE_DFP2_SUPPORT:
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
- ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
+ ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+ ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
break;
case ATOM_DEVICE_DFP3_SUPPORT:
- ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
break;
}
@@ -228,32 +228,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
-static struct radeon_connector_atom_dig *
-radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
-
- if (!rdev->is_atom_bios)
- return NULL;
-
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- return NULL;
-
- radeon_connector = to_radeon_connector(connector);
-
- if (!radeon_connector->con_priv)
- return NULL;
-
- dig_connector = radeon_connector->con_priv;
-
- return dig_connector;
-}
-
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
@@ -512,14 +486,12 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
- if (!dig || !dig_connector)
+ if (!dig)
return;
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
@@ -562,7 +534,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1);
} else {
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -601,7 +573,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -623,6 +595,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
@@ -636,9 +610,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
- return ATOM_ENCODER_MODE_HDMI;
- else if (radeon_connector->use_digital)
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
@@ -646,9 +624,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
- return ATOM_ENCODER_MODE_HDMI;
- else
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -660,9 +642,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid))
- return ATOM_ENCODER_MODE_HDMI;
- else
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_DVIA:
@@ -729,13 +715,24 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
union dig_encoder_control args;
int index = 0;
uint8_t frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
- if (!dig || !dig_connector)
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ }
+
+ /* no dig encoder assigned */
+ if (dig->dig_encoder == -1)
return;
memset(&args, 0, sizeof(args));
@@ -757,9 +754,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dig_connector->dp_clock == 270000)
+ if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- args.v1.ucLaneNum = dig_connector->dp_lane_count;
+ args.v1.ucLaneNum = dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.v1.ucLaneNum = 8;
else
@@ -781,7 +778,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
@@ -804,38 +801,47 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
union dig_transmitter_control args;
int index = 0;
uint8_t frev, crev;
bool is_dp = false;
int pll_id = 0;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+ int igp_lane_info = 0;
- if (!dig || !dig_connector)
- return;
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
- connector = radeon_get_connector_for_encoder(encoder);
- radeon_connector = to_radeon_connector(connector);
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ igp_lane_info = dig_connector->igp_lane_info;
+ }
+
+ /* no dig encoder assigned */
+ if (dig->dig_encoder == -1)
+ return;
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
- else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
- break;
- }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+ break;
}
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
@@ -843,14 +849,14 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v1.usInitInfo = radeon_connector->connector_object_id;
+ args.v1.usInitInfo = connector_object_id;
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v1.usPixelClock =
- cpu_to_le16(dig_connector->dp_clock / 10);
+ cpu_to_le16(dp_clock / 10);
else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
@@ -858,13 +864,13 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
if (ASIC_IS_DCE4(rdev)) {
if (is_dp)
- args.v3.ucLaneNum = dig_connector->dp_lane_count;
+ args.v3.ucLaneNum = dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
- if (dig_connector->linkb) {
+ if (dig->linkb) {
args.v3.acConfig.ucLinkSel = 1;
args.v3.acConfig.ucEncoderSel = 1;
}
@@ -904,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
} else if (ASIC_IS_DCE32(rdev)) {
args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v2.acConfig.ucLinkSel = 1;
switch (radeon_encoder->encoder_id) {
@@ -938,23 +944,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if ((rdev->flags & RADEON_IS_IGP) &&
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
- if (dig_connector->igp_lane_info & 0x1)
+ if (igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else if (dig_connector->igp_lane_info & 0x2)
+ else if (igp_lane_info & 0x2)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
- else if (dig_connector->igp_lane_info & 0x4)
+ else if (igp_lane_info & 0x4)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
- else if (dig_connector->igp_lane_info & 0x8)
+ else if (igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
} else {
- if (dig_connector->igp_lane_info & 0x3)
+ if (igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
- else if (dig_connector->igp_lane_info & 0xc)
+ else if (igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
}
}
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
@@ -1072,8 +1078,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- if (!ASIC_IS_DCE4(rdev))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@@ -1085,8 +1090,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (!ASIC_IS_DCE4(rdev))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
@@ -1290,24 +1294,22 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
uint32_t dig_enc_in_use = 0;
if (ASIC_IS_DCE4(rdev)) {
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
-
+ dig = radeon_encoder->enc_priv;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig_connector->linkb)
+ if (dig->linkb)
return 1;
else
return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig_connector->linkb)
+ if (dig->linkb)
return 3;
else
return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig_connector->linkb)
+ if (dig->linkb)
return 5;
else
return 4;
@@ -1641,6 +1643,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
struct radeon_encoder_atom_dig *
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
+ int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
if (!dig)
@@ -1650,11 +1653,16 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
dig->coherent_mode = true;
dig->dig_encoder = -1;
+ if (encoder_enum == 2)
+ dig->linkb = true;
+ else
+ dig->linkb = false;
+
return dig;
}
void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1663,7 +1671,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->encoder_id == encoder_id) {
+ if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
@@ -1691,7 +1699,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
radeon_encoder->enc_priv = NULL;
- radeon_encoder->encoder_id = encoder_id;
+ radeon_encoder->encoder_enum = encoder_enum;
+ radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index dbf86962bdd..c74a8b20d94 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -118,7 +118,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
+ false, true,
&gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index bfd2ce5f537..6a13ee38a5b 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -99,6 +99,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
}
}
+ /* switch the pads to ddc mode */
+ if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
+ temp = RREG32(rec->mask_clk_reg);
+ temp &= ~(1 << 16);
+ WREG32(rec->mask_clk_reg, temp);
+ }
+
/* clear the output pin values */
temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
WREG32(rec->a_clk_reg, temp);
@@ -206,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
{
- u32 sclk = radeon_get_engine_clock(rdev);
+ u32 sclk = rdev->pm.current_sclk;
u32 prescale = 0;
u32 nm;
u8 n, m, loop;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 059bfa4098d..a108c7ed14f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
* chips. Disable MSI on them for now.
*/
if ((rdev->family >= CHIP_RV380) &&
- (!(rdev->flags & RADEON_IS_IGP))) {
+ (!(rdev->flags & RADEON_IS_IGP)) &&
+ (!(rdev->flags & RADEON_IS_AGP))) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
rdev->msi_enabled = 1;
- DRM_INFO("radeon: using MSI.\n");
+ dev_info(rdev->dev, "radeon: using MSI.\n");
}
}
rdev->irq.installed = true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index b1c8ace5f08..5eee3c41d12 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -161,6 +161,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
return -EINVAL;
}
+ break;
case RADEON_INFO_WANT_HYPERZ:
/* The "value" here is both an input and output parameter.
* If the input value is 1, filp requests hyper-z access.
@@ -323,45 +324,45 @@ KMS_INVALID_IOCTL(radeon_surface_free_kms)
struct drm_ioctl_desc radeon_ioctls_kms[] = {
- DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
/* KMS */
- DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 989df519a1e..305049afde1 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
if (!ref_div)
return 1;
- vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
+ vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
/*
* This is horribly crude: the VCO frequency range is divided into
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index b8149cbc0c7..0b8397000f4 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1345,7 +1345,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
}
void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1354,7 +1354,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->encoder_id == encoder_id) {
+ if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
@@ -1374,7 +1374,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
radeon_encoder->enc_priv = NULL;
- radeon_encoder->encoder_id = encoder_id;
+ radeon_encoder->encoder_enum = encoder_enum;
+ radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5bbc086b926..efbe975312d 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -342,6 +342,7 @@ struct radeon_atom_ss {
};
struct radeon_encoder_atom_dig {
+ bool linkb;
/* atom dig */
bool coherent_mode;
int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
@@ -360,6 +361,7 @@ struct radeon_encoder_atom_dac {
struct radeon_encoder {
struct drm_encoder base;
+ uint32_t encoder_enum;
uint32_t encoder_id;
uint32_t devices;
uint32_t active_device;
@@ -378,7 +380,6 @@ struct radeon_encoder {
struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
- bool linkb;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
u8 dpcd[8];
@@ -599,7 +600,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d
void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
-extern int radeon_static_clocks_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 58038f5cab3..f87efec7623 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -226,6 +226,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
int i;
+ /* no need to take locks, etc. if nothing's going to change */
+ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+ return;
+
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
mutex_lock(&rdev->cp.mutex);
@@ -632,8 +637,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
}
radeon_hwmon_fini(rdev);
- if (rdev->pm.i2c_bus)
- radeon_i2c_destroy(rdev->pm.i2c_bus);
}
void radeon_pm_compute_clocks(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index b3ba44c0a81..4ae5a3d1074 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -3228,34 +3228,34 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
}
struct drm_ioctl_desc radeon_ioctls[] = {
- DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
+ DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
};
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index f1c79681011..bfa59db374d 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev)
}
+static int rv770_vram_scratch_init(struct radeon_device *rdev)
+{
+ int r;
+ u64 gpu_addr;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
+ true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->vram_scratch.robj);
+ if (r) {
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->vram_scratch.robj,
+ RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->vram_scratch.robj,
+ (void **)&rdev->vram_scratch.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+ return r;
+}
+
+static void rv770_vram_scratch_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ return;
+ }
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->vram_scratch.robj);
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ }
+ radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
int rv770_mc_init(struct radeon_device *rdev)
{
u32 tmp;
@@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
}
+ r = rv770_vram_scratch_init(rdev);
+ if (r)
+ return r;
rv770_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
@@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev)
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
r = rv770_startup(rdev);
if (r) {
@@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev)
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- r = radeon_clocks_init(rdev);
- if (r)
- return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev)
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
+ rv770_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_clocks_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 976dc8d2528..bf5f83ea14f 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1082,10 +1082,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
}
struct drm_ioctl_desc savage_ioctls[] = {
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
};
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 07d0f2979ca..7fe2b63412c 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -320,12 +320,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
}
struct drm_ioctl_desc sis_ioctls[] = {
- DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 68dda74a50a..cc0ffa9abd0 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -722,20 +722,20 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
}
struct drm_ioctl_desc via_ioctls[] = {
- DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+ DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
};
int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9dd395b9021..72ec2e2b6e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -99,47 +99,47 @@
*/
#define VMW_IOCTL_DEF(ioctl, func, flags) \
- [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
/**
* Ioctl definitions.
*/
static struct drm_ioctl_desc vmw_ioctls[] = {
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
+ VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+ VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
+ VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+ VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+ VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+ VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+ VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+ VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
+ VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+ VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+ VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+ VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
};
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e635199a0cd..0c52899be96 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1299,6 +1299,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
index f44bdc084cb..8ca7f65cf2f 100644
--- a/drivers/hid/hid-egalax.c
+++ b/drivers/hid/hid-egalax.c
@@ -159,6 +159,13 @@ static int egalax_event(struct hid_device *hid, struct hid_field *field,
{
struct egalax_data *td = hid_get_drvdata(hid);
+ /* Note, eGalax has two product lines: the first is resistive and
+ * uses a standard parallel multitouch protocol (product ID ==
+ * 48xx). The second is capacitive and uses an unusual "serial"
+ * protocol with a different message for each multitouch finger
+ * (product ID == 72xx). We do not yet generate a correct event
+ * sequence for the capacitive/serial protocol.
+ */
if (hid->claimed & HID_CLAIMED_INPUT) {
struct input_dev *input = field->hidinput->input;
@@ -246,6 +253,8 @@ static void egalax_remove(struct hid_device *hdev)
static const struct hid_device_id egalax_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
{ }
};
MODULE_DEVICE_TABLE(hid, egalax_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d3fc13ae094..85c6d13c9ff 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -188,6 +188,7 @@
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 346f0e34987..bc2e0774062 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -547,11 +547,11 @@ static void picolcd_fb_destroy(struct fb_info *info)
ref_cnt--;
mutex_lock(&info->lock);
(*ref_cnt)--;
- may_release = !ref_cnt;
+ may_release = !*ref_cnt;
mutex_unlock(&info->lock);
if (may_release) {
- framebuffer_release(info);
vfree((u8 *)info->fix.smem_start);
+ framebuffer_release(info);
}
}
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 254a003af04..0a29c51114a 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -266,13 +266,15 @@ static int hiddev_open(struct inode *inode, struct file *file)
{
struct hiddev_list *list;
struct usb_interface *intf;
+ struct hid_device *hid;
struct hiddev *hiddev;
int res;
intf = usb_find_interface(&hiddev_driver, iminor(inode));
if (!intf)
return -ENODEV;
- hiddev = usb_get_intfdata(intf);
+ hid = usb_get_intfdata(intf);
+ hiddev = hid->hiddev;
if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL)))
return -ENOMEM;
@@ -587,7 +589,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct hiddev_list *list = file->private_data;
struct hiddev *hiddev = list->hiddev;
struct hid_device *hid = hiddev->hid;
- struct usb_device *dev = hid_to_usb_dev(hid);
+ struct usb_device *dev;
struct hiddev_collection_info cinfo;
struct hiddev_report_info rinfo;
struct hiddev_field_info finfo;
@@ -601,9 +603,11 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* Called without BKL by compat methods so no BKL taken */
/* FIXME: Who or what stop this racing with a disconnect ?? */
- if (!hiddev->exist)
+ if (!hiddev->exist || !hid)
return -EIO;
+ dev = hid_to_usb_dev(hid);
+
switch (cmd) {
case HIDIOCGVERSION:
@@ -888,7 +892,6 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
hid->hiddev = hiddev;
hiddev->hid = hid;
hiddev->exist = 1;
- usb_set_intfdata(usbhid->intf, usbhid);
retval = usb_register_dev(usbhid->intf, &hiddev_class);
if (retval) {
err_hid("Not able to get a minor for this device.");
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0fba8294312..4d4d09bdec0 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -332,11 +332,11 @@ config SENSORS_F71805F
will be called f71805f.
config SENSORS_F71882FG
- tristate "Fintek F71808E, F71858FG, F71862FG, F71882FG, F71889FG and F8000"
+ tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000"
depends on EXPERIMENTAL
help
- If you say yes here you get support for hardware monitoring features
- of the Fintek F71808E, F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
+ If you say yes here you get support for hardware monitoring
+ features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
F71889FG and F8000 Super-I/O chips.
This driver can also be built as a module. If so, the module
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index b300a2048af..52319340e18 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -160,30 +160,12 @@ static const struct attribute_group ads7871_group = {
static int __devinit ads7871_probe(struct spi_device *spi)
{
- int status, ret, err = 0;
+ int ret, err;
uint8_t val;
struct ads7871_data *pdata;
dev_dbg(&spi->dev, "probe\n");
- pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
- if (!pdata) {
- err = -ENOMEM;
- goto exit;
- }
-
- status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
- if (status < 0)
- goto error_free;
-
- pdata->hwmon_dev = hwmon_device_register(&spi->dev);
- if (IS_ERR(pdata->hwmon_dev)) {
- err = PTR_ERR(pdata->hwmon_dev);
- goto error_remove;
- }
-
- spi_set_drvdata(spi, pdata);
-
/* Configure the SPI bus */
spi->mode = (SPI_MODE_0);
spi->bits_per_word = 8;
@@ -201,6 +183,24 @@ static int __devinit ads7871_probe(struct spi_device *spi)
we need to make sure we really have a chip*/
if (val != ret) {
err = -ENODEV;
+ goto exit;
+ }
+
+ pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
+ if (err < 0)
+ goto error_free;
+
+ spi_set_drvdata(spi, pdata);
+
+ pdata->hwmon_dev = hwmon_device_register(&spi->dev);
+ if (IS_ERR(pdata->hwmon_dev)) {
+ err = PTR_ERR(pdata->hwmon_dev);
goto error_remove;
}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index c070c9714cb..de8111114f4 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -518,7 +518,6 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
static int __init coretemp_init(void)
{
int i, err = -ENODEV;
- struct pdev_entry *p, *n;
/* quick check if we run Intel */
if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 6207120dcd4..537841ef44b 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -45,7 +45,6 @@
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
-#define SIO_F71808_ID 0x0901 /* Chipset ID */
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
@@ -97,10 +96,9 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
+enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
static const char *f71882fg_names[] = {
- "f71808fg",
"f71858fg",
"f71862fg",
"f71882fg",
@@ -308,8 +306,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
-/* In attr common to the f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
+/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
+static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
@@ -319,22 +317,6 @@ static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
-};
-
-/* In attr for the f71808fg */
-static struct sensor_device_attribute_2 f71808_in_attr[] = {
- SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
- SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
- SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
- SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
- SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
- SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
- SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 7),
- SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 8),
-};
-
-/* Temp attr common to the f71808fg, f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 fxxxx_temp_attr[] = {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 1),
@@ -373,10 +355,6 @@ static struct sensor_device_attribute_2 fxxxx_temp_attr[] = {
store_temp_beep, 0, 6),
SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
-};
-
-/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 f71862_temp_attr[] = {
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 3),
@@ -1011,11 +989,6 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->temp_type[1] = 6;
break;
}
- } else if (data->type == f71808fg) {
- reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
- data->temp_type[1] = (reg & 0x02) ? 2 : 4;
- data->temp_type[2] = (reg & 0x04) ? 2 : 4;
-
} else {
reg2 = f71882fg_read8(data, F71882FG_REG_PECI);
if ((reg2 & 0x03) == 0x01)
@@ -1898,8 +1871,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
val /= 1000;
- if (data->type == f71889fg
- || data->type == f71808fg)
+ if (data->type == f71889fg)
val = SENSORS_LIMIT(val, -128, 127);
else
val = SENSORS_LIMIT(val, 0, 127);
@@ -2002,28 +1974,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
/* fall through! */
case f71862fg:
err = f71882fg_create_sysfs_files(pdev,
- f71862_temp_attr,
- ARRAY_SIZE(f71862_temp_attr));
- if (err)
- goto exit_unregister_sysfs;
- err = f71882fg_create_sysfs_files(pdev,
- fxxxx_in_attr,
- ARRAY_SIZE(fxxxx_in_attr));
- if (err)
- goto exit_unregister_sysfs;
- err = f71882fg_create_sysfs_files(pdev,
- fxxxx_temp_attr,
- ARRAY_SIZE(fxxxx_temp_attr));
- break;
- case f71808fg:
- err = f71882fg_create_sysfs_files(pdev,
- f71808_in_attr,
- ARRAY_SIZE(f71808_in_attr));
- if (err)
- goto exit_unregister_sysfs;
- err = f71882fg_create_sysfs_files(pdev,
- fxxxx_temp_attr,
- ARRAY_SIZE(fxxxx_temp_attr));
+ fxxxx_in_temp_attr,
+ ARRAY_SIZE(fxxxx_in_temp_attr));
break;
case f8000:
err = f71882fg_create_sysfs_files(pdev,
@@ -2050,7 +2002,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71862fg:
err = (data->pwm_enable & 0x15) != 0x15;
break;
- case f71808fg:
case f71882fg:
case f71889fg:
err = 0;
@@ -2096,7 +2047,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
f8000_auto_pwm_attr,
ARRAY_SIZE(f8000_auto_pwm_attr));
break;
- case f71808fg:
case f71889fg:
for (i = 0; i < nr_fans; i++) {
data->pwm_auto_point_mapping[i] =
@@ -2176,22 +2126,8 @@ static int f71882fg_remove(struct platform_device *pdev)
/* fall through! */
case f71862fg:
f71882fg_remove_sysfs_files(pdev,
- f71862_temp_attr,
- ARRAY_SIZE(f71862_temp_attr));
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_in_attr,
- ARRAY_SIZE(fxxxx_in_attr));
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_temp_attr,
- ARRAY_SIZE(fxxxx_temp_attr));
- break;
- case f71808fg:
- f71882fg_remove_sysfs_files(pdev,
- f71808_in_attr,
- ARRAY_SIZE(f71808_in_attr));
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_temp_attr,
- ARRAY_SIZE(fxxxx_temp_attr));
+ fxxxx_in_temp_attr,
+ ARRAY_SIZE(fxxxx_in_temp_attr));
break;
case f8000:
f71882fg_remove_sysfs_files(pdev,
@@ -2259,9 +2195,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
switch (devid) {
- case SIO_F71808_ID:
- sio_data->type = f71808fg;
- break;
case SIO_F71858_ID:
sio_data->type = f71858fg;
break;
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index b9bb3e0ca53..39ead2a4d3c 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -143,6 +143,37 @@ static const struct pci_device_id k8temp_ids[] = {
MODULE_DEVICE_TABLE(pci, k8temp_ids);
+static int __devinit is_rev_g_desktop(u8 model)
+{
+ u32 brandidx;
+
+ if (model < 0x69)
+ return 0;
+
+ if (model == 0xc1 || model == 0x6c || model == 0x7c)
+ return 0;
+
+ /*
+ * Differentiate between AM2 and ASB1.
+ * See "Constructing the processor Name String" in "Revision
+ * Guide for AMD NPT Family 0Fh Processors" (33610).
+ */
+ brandidx = cpuid_ebx(0x80000001);
+ brandidx = (brandidx >> 9) & 0x1f;
+
+ /* Single core */
+ if ((model == 0x6f || model == 0x7f) &&
+ (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
+ return 0;
+
+ /* Dual core */
+ if (model == 0x6b &&
+ (brandidx == 0xb || brandidx == 0xc))
+ return 0;
+
+ return 1;
+}
+
static int __devinit k8temp_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
"wrong - check erratum #141\n");
}
- if ((model >= 0x69) &&
- !(model == 0xc1 || model == 0x6c || model == 0x7c ||
- model == 0x6b || model == 0x6f || model == 0x7f)) {
+ if (is_rev_g_desktop(model)) {
/*
* RevG desktop CPUs (i.e. no socket S1G1 or
* ASB1 parts) need additional offset,
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index d0dc1db80b2..50815022cff 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -1106,7 +1106,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
if (recv->block_irq_interval * 4 > iso->buf_packets)
recv->block_irq_interval = iso->buf_packets / 4;
if (recv->block_irq_interval < 1)
- recv->block_irq_interval = 1;
+ recv->block_irq_interval = 1;
/* choose a buffer stride */
/* must be a power of 2, and <= PAGE_SIZE */
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index dcc86b97a15..19fa94af207 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -232,13 +232,13 @@ static void hil_dev_handle_ptr_events(struct hil_dev *ptr)
if (absdev) {
val = lo + (hi << 8);
#ifdef TABLET_AUTOADJUST
- if (val < input_abs_min(dev, ABS_X + i))
+ if (val < input_abs_get_min(dev, ABS_X + i))
input_abs_set_min(dev, ABS_X + i, val);
- if (val > input_abs_max(dev, ABS_X + i))
+ if (val > input_abs_get_max(dev, ABS_X + i))
input_abs_set_max(dev, ABS_X + i, val);
#endif
if (i % 3)
- val = input_abs_max(dev, ABS_X + i) - val;
+ val = input_abs_get_max(dev, ABS_X + i) - val;
input_report_abs(dev, ABS_X + i, val);
} else {
val = (int) (((int8_t) lo) | ((int8_t) hi << 8));
@@ -388,11 +388,11 @@ static void hil_dev_pointer_setup(struct hil_dev *ptr)
#ifdef TABLET_AUTOADJUST
for (i = 0; i < ABS_MAX; i++) {
- int diff = input_abs_max(input_dev, ABS_X + i) / 10;
+ int diff = input_abs_get_max(input_dev, ABS_X + i) / 10;
input_abs_set_min(input_dev, ABS_X + i,
- input_abs_min(input_dev, ABS_X + i) + diff)
+ input_abs_get_min(input_dev, ABS_X + i) + diff);
input_abs_set_max(input_dev, ABS_X + i,
- input_abs_max(input_dev, ABS_X + i) - diff)
+ input_abs_get_max(input_dev, ABS_X + i) - diff);
}
#endif
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 0e53b3bc39a..f32404f9918 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -567,8 +567,6 @@ static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
clk_put(keypad->clk);
input_unregister_device(keypad->input_dev);
- input_free_device(keypad->input_dev);
-
iounmap(keypad->mmio_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index bb53fd33cd1..0d4266a533a 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -811,6 +811,8 @@ static struct miscdevice uinput_misc = {
.minor = UINPUT_MINOR,
.name = UINPUT_NAME,
};
+MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
+MODULE_ALIAS("devname:" UINPUT_NAME);
static int __init uinput_init(void)
{
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 83c24cca234..d528a2dba06 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -138,8 +138,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
fx(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
- size = input_abs_get_min(dev, ABS_X) -
- input_abs_get_max(dev, ABS_X);
+ size = input_abs_get_max(dev, ABS_X) -
+ input_abs_get_min(dev, ABS_X);
if (size == 0)
size = 256 * 2;
@@ -155,8 +155,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
fy(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
/* use X size for ABS_Y to keep the same scale */
- size = input_abs_get_min(dev, ABS_X) -
- input_abs_get_max(dev, ABS_X);
+ size = input_abs_get_max(dev, ABS_X) -
+ input_abs_get_min(dev, ABS_X);
if (size == 0)
size = 256 * 2;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 35bc2737412..2d17e76066b 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -45,6 +45,7 @@
#include <linux/syscalls.h>
#include <linux/suspend.h>
#include <linux/cpu.h>
+#include <linux/compat.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
@@ -2349,11 +2350,52 @@ static long pmu_unlocked_ioctl(struct file *filp,
return ret;
}
+#ifdef CONFIG_COMPAT
+#define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t)
+#define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t)
+#define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t)
+#define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t)
+#define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t)
+#define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t)
+
+static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg)
+{
+ switch (cmd) {
+ case PMU_IOC_SLEEP:
+ break;
+ case PMU_IOC_GET_BACKLIGHT32:
+ cmd = PMU_IOC_GET_BACKLIGHT;
+ break;
+ case PMU_IOC_SET_BACKLIGHT32:
+ cmd = PMU_IOC_SET_BACKLIGHT;
+ break;
+ case PMU_IOC_GET_MODEL32:
+ cmd = PMU_IOC_GET_MODEL;
+ break;
+ case PMU_IOC_HAS_ADB32:
+ cmd = PMU_IOC_HAS_ADB;
+ break;
+ case PMU_IOC_CAN_SLEEP32:
+ cmd = PMU_IOC_CAN_SLEEP;
+ break;
+ case PMU_IOC_GRAB_BACKLIGHT32:
+ cmd = PMU_IOC_GRAB_BACKLIGHT;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static const struct file_operations pmu_device_fops = {
.read = pmu_read,
.write = pmu_write,
.poll = pmu_fpoll,
.unlocked_ioctl = pmu_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_pmu_ioctl,
+#endif
.open = pmu_open,
.release = pmu_release,
};
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore
deleted file mode 100644
index a7afec6b19c..00000000000
--- a/drivers/md/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-mktables
-raid6altivec*.c
-raid6int*.c
-raid6tables.c
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 1ba1e122e94..ed4900ade93 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1542,8 +1542,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
atomic_read(&bitmap->mddev->recovery_active) == 0);
bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
- if (bitmap->mddev->persistent)
- set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 11567c7999a..43cf9cc9c1d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2136,16 +2136,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
* with the rest of the array)
*/
mdk_rdev_t *rdev;
-
- /* First make sure individual recovery_offsets are correct */
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (rdev->raid_disk >= 0 &&
- mddev->delta_disks >= 0 &&
- !test_bit(In_sync, &rdev->flags) &&
- mddev->curr_resync_completed > rdev->recovery_offset)
- rdev->recovery_offset = mddev->curr_resync_completed;
-
- }
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->sb_events == mddev->events ||
(nospares &&
@@ -2167,13 +2157,27 @@ static void md_update_sb(mddev_t * mddev, int force_change)
int sync_req;
int nospares = 0;
- mddev->utime = get_seconds();
- if (mddev->external)
- return;
repeat:
+ /* First make sure individual recovery_offsets are correct */
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->raid_disk >= 0 &&
+ mddev->delta_disks >= 0 &&
+ !test_bit(In_sync, &rdev->flags) &&
+ mddev->curr_resync_completed > rdev->recovery_offset)
+ rdev->recovery_offset = mddev->curr_resync_completed;
+
+ }
+ if (!mddev->persistent) {
+ clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ wake_up(&mddev->sb_wait);
+ return;
+ }
+
spin_lock_irq(&mddev->write_lock);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ mddev->utime = get_seconds();
+
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
@@ -2221,19 +2225,6 @@ repeat:
MD_BUG();
mddev->events --;
}
-
- /*
- * do not write anything to disk if using
- * nonpersistent superblocks
- */
- if (!mddev->persistent) {
- if (!mddev->external)
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
-
- spin_unlock_irq(&mddev->write_lock);
- wake_up(&mddev->sb_wait);
- return;
- }
sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
@@ -3379,7 +3370,7 @@ array_state_show(mddev_t *mddev, char *page)
case 0:
if (mddev->in_sync)
st = clean;
- else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
else if (mddev->safemode)
st = active_idle;
@@ -3460,9 +3451,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- if (mddev->persistent)
- set_bit(MD_CHANGE_CLEAN,
- &mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
@@ -3474,8 +3463,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case active:
if (mddev->pers) {
restart_array(mddev);
- if (mddev->external)
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -6580,6 +6568,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
@@ -6588,7 +6577,6 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
@@ -6624,6 +6612,7 @@ int md_allow_write(mddev_t *mddev)
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
@@ -6633,7 +6622,7 @@ int md_allow_write(mddev_t *mddev)
} else
spin_unlock_irq(&mddev->write_lock);
- if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EAGAIN;
else
return 0;
@@ -6831,8 +6820,7 @@ void md_do_sync(mddev_t *mddev)
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed =
mddev->curr_resync;
- if (mddev->persistent)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -7111,8 +7099,7 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
- if (mddev->persistent)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index a953fe2808a..3931299788d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -140,7 +140,7 @@ struct mddev_s
unsigned long flags;
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2 /* superblock update in progress */
+#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
int suspended;
atomic_t active_io;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 73cc74ffc26..ad83a4dcadc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -787,8 +787,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
struct bio_list bl;
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
- const bool do_sync = (bio->bi_rw & REQ_SYNC);
- bool do_barriers;
+ const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
+ unsigned long do_barriers;
mdk_rdev_t *blocked_rdev;
/*
@@ -1120,6 +1120,8 @@ static int raid1_spare_active(mddev_t *mddev)
{
int i;
conf_t *conf = mddev->private;
+ int count = 0;
+ unsigned long flags;
/*
* Find all failed disks within the RAID1 configuration
@@ -1131,15 +1133,16 @@ static int raid1_spare_active(mddev_t *mddev)
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded--;
- spin_unlock_irqrestore(&conf->device_lock, flags);
+ count++;
+ sysfs_notify_dirent(rdev->sysfs_state);
}
}
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded -= count;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
print_conf(conf);
- return 0;
+ return count;
}
@@ -1640,7 +1643,7 @@ static void raid1d(mddev_t *mddev)
* We already have a nr_pending reference on these rdevs.
*/
int i;
- const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
+ const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
clear_bit(R1BIO_Barrier, &r1_bio->state);
for (i=0; i < conf->raid_disks; i++)
@@ -1696,7 +1699,7 @@ static void raid1d(mddev_t *mddev)
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
- const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
+ const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL;
r1_bio->read_disk = disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a88aeb5198c..84718383124 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -799,7 +799,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
- const bool do_sync = (bio->bi_rw & REQ_SYNC);
+ const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
struct bio_list bl;
unsigned long flags;
mdk_rdev_t *blocked_rdev;
@@ -1116,6 +1116,8 @@ static int raid10_spare_active(mddev_t *mddev)
int i;
conf_t *conf = mddev->private;
mirror_info_t *tmp;
+ int count = 0;
+ unsigned long flags;
/*
* Find all non-in_sync disks within the RAID10 configuration
@@ -1126,15 +1128,16 @@ static int raid10_spare_active(mddev_t *mddev)
if (tmp->rdev
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded--;
- spin_unlock_irqrestore(&conf->device_lock, flags);
+ count++;
+ sysfs_notify_dirent(tmp->rdev->sysfs_state);
}
}
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded -= count;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
print_conf(conf);
- return 0;
+ return count;
}
@@ -1734,7 +1737,7 @@ static void raid10d(mddev_t *mddev)
raid_end_bio_io(r10_bio);
bio_put(bio);
} else {
- const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+ const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 866d4b5a144..69b0a169e43 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5330,6 +5330,8 @@ static int raid5_spare_active(mddev_t *mddev)
int i;
raid5_conf_t *conf = mddev->private;
struct disk_info *tmp;
+ int count = 0;
+ unsigned long flags;
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->disks + i;
@@ -5337,14 +5339,15 @@ static int raid5_spare_active(mddev_t *mddev)
&& tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded--;
- spin_unlock_irqrestore(&conf->device_lock, flags);
+ count++;
+ sysfs_notify_dirent(tmp->rdev->sysfs_state);
}
}
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded -= count;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf);
- return 0;
+ return count;
}
static int raid5_remove_disk(mddev_t *mddev, int number)
diff --git a/drivers/media/dvb/mantis/Kconfig b/drivers/media/dvb/mantis/Kconfig
index decdeda840d..fd0830ed10d 100644
--- a/drivers/media/dvb/mantis/Kconfig
+++ b/drivers/media/dvb/mantis/Kconfig
@@ -1,6 +1,6 @@
config MANTIS_CORE
tristate "Mantis/Hopper PCI bridge based devices"
- depends on PCI && I2C && INPUT
+ depends on PCI && I2C && INPUT && IR_CORE
help
Support for PCI cards based on the Mantis and Hopper PCi bridge.
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0efe631e50c..d80cfdc8edd 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -86,7 +86,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
+#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
+#endif
/*
* By default, hosts do not support SGIO or large requests.
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 283190bc2a4..68d12794cfd 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -132,7 +132,7 @@ config MMC_SDHCI_CNS3XXX
config MMC_SDHCI_S3C
tristate "SDHCI support on Samsung S3C SoC"
- depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX)
+ depends on MMC_SDHCI && PLAT_SAMSUNG
help
This selects the Secure Digital Host Controller Interface (SDHCI)
often referrered to as the HSMMC block in some of the Samsung S3C
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 0a7f2614c6f..71ad4163b95 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -242,7 +242,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
{
struct sdhci_host *host = platform_get_drvdata(dev);
if (host) {
- mutex_lock(&host->lock);
+ spin_lock(&host->lock);
if (state) {
dev_dbg(&dev->dev, "card inserted.\n");
host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -252,8 +252,8 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
host->flags |= SDHCI_DEVICE_DEAD;
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
- sdhci_card_detect(host);
- mutex_unlock(&host->lock);
+ tasklet_schedule(&host->card_tasklet);
+ spin_unlock(&host->lock);
}
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 785512133b5..401527d273b 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1180,7 +1180,8 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
- if (ios->timing == MMC_TIMING_SD_HS)
+ if (ios->timing == MMC_TIMING_SD_HS &&
+ !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
ctrl |= SDHCI_CTRL_HISPD;
else
ctrl &= ~SDHCI_CTRL_HISPD;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 036cfae7636..d316bc79b63 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -245,6 +245,8 @@ struct sdhci_host {
#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
/* Controller uses Auto CMD12 command to stop the transfer */
#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
+/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
+#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
int irq; /* Device IRQ */
void __iomem * ioaddr; /* Mapped address */
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 00af55d7afb..fe63f6bd663 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -22,6 +22,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/concat.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a3c7473dd40..d551ddd9537 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2866,6 +2866,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
*/
if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
id_data[0] == NAND_MFR_SAMSUNG &&
+ (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
id_data[5] != 0x00) {
/* Calc pagesize */
mtd->writesize = 2048 << (extid & 0x03);
@@ -2934,14 +2935,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
/* Set the bad block position */
- if (!(busw & NAND_BUSWIDTH_16) && (*maf_id == NAND_MFR_STMICRO ||
- (*maf_id == NAND_MFR_SAMSUNG &&
- mtd->writesize == 512) ||
- *maf_id == NAND_MFR_AMD))
- chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
- else
+ if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
-
+ else
+ chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
/* Get chip options, preserve non chip based options */
chip->options &= ~NAND_CHIPOPTIONS_MSK;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index e02fa4f0e3c..4d89f378020 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
#define tAR_NDTR1(r) (((r) >> 0) & 0xf)
/* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
+#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
/* convert nand flash controller clock cycles to nano-seconds */
#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
index 2246f154e2f..61f6e5e4045 100644
--- a/drivers/mtd/ubi/Kconfig.debug
+++ b/drivers/mtd/ubi/Kconfig.debug
@@ -6,7 +6,7 @@ config MTD_UBI_DEBUG
depends on SYSFS
depends on MTD_UBI
select DEBUG_FS
- select KALLSYMS_ALL
+ select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
help
This option enables UBI debugging.
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 4dfa6b90c21..3d2d1a69e9a 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi,
goto out_free;
}
- re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
- if (!re) {
+ re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
+ if (!re1) {
err = -ENOMEM;
ubi_close_volume(desc);
goto out_free;
}
- re->remove = 1;
- re->desc = desc;
- list_add(&re->list, &rename_list);
+ re1->remove = 1;
+ re1->desc = desc;
+ list_add(&re1->list, &rename_list);
dbg_msg("will remove volume %d, name \"%s\"",
- re->desc->vol->vol_id, re->desc->vol->name);
+ re1->desc->vol->vol_id, re1->desc->vol->name);
}
mutex_lock(&ubi->device_mutex);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 372a15ac999..69b52e9c948 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
case UBI_COMPAT_DELETE:
ubi_msg("\"delete\" compatible internal volume %d:%d"
" found, will remove it", vol_id, lnum);
- err = add_to_list(si, pnum, ec, &si->corr);
+ err = add_to_list(si, pnum, ec, &si->erase);
if (err)
return err;
return 0;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ee7b1d8fbb9..97a435672ea 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1212,7 +1212,8 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
- if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
+ if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
+ in_wl_tree(e, &ubi->erroneous)) {
spin_unlock(&ubi->wl_lock);
return 0;
}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 7a01588fb6f..e31a6d1919c 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -633,7 +633,8 @@ struct vortex_private {
open:1,
medialock:1,
must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
- large_frames:1; /* accept large frames */
+ large_frames:1, /* accept large frames */
+ handling_irq:1; /* private in_irq indicator */
int drv_flags;
u16 status_enable;
u16 intr_enable;
@@ -646,7 +647,7 @@ struct vortex_private {
u16 io_size; /* Size of PCI region (for release_region) */
/* Serialises access to hardware other than MII and variables below.
- * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */
+ * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
spinlock_t lock;
spinlock_t mii_lock; /* Serialises access to MII */
@@ -1993,10 +1994,9 @@ vortex_error(struct net_device *dev, int status)
}
}
- if (status & RxEarly) { /* Rx early is unused. */
- vortex_rx(dev);
+ if (status & RxEarly) /* Rx early is unused. */
iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
- }
+
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat;
if (vortex_debug > 4)
@@ -2133,6 +2133,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name, vp->cur_tx);
}
+ /*
+ * We can't allow a recursion from our interrupt handler back into the
+ * tx routine, as they take the same spin lock, and that causes
+ * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in
+ * a bit
+ */
+ if (vp->handling_irq)
+ return NETDEV_TX_BUSY;
+
if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
@@ -2288,7 +2297,12 @@ vortex_interrupt(int irq, void *dev_id)
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
if (status == 0xffff)
break;
+ if (status & RxEarly)
+ vortex_rx(dev);
+ spin_unlock(&vp->window_lock);
vortex_error(dev, status);
+ spin_lock(&vp->window_lock);
+ window_set(vp, 7);
}
if (--work_done < 0) {
@@ -2335,11 +2349,13 @@ boomerang_interrupt(int irq, void *dev_id)
ioaddr = vp->ioaddr;
+
/*
* It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
* and boomerang_start_xmit
*/
spin_lock(&vp->lock);
+ vp->handling_irq = 1;
status = ioread16(ioaddr + EL3_STATUS);
@@ -2447,6 +2463,7 @@ boomerang_interrupt(int irq, void *dev_id)
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
+ vp->handling_irq = 0;
spin_unlock(&vp->lock);
return IRQ_HANDLED;
}
@@ -2971,7 +2988,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int err;
struct vortex_private *vp = netdev_priv(dev);
- unsigned long flags;
pci_power_t state = 0;
if(VORTEX_PCI(vp))
@@ -2981,9 +2997,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
- spin_lock_irqsave(&vp->lock, flags);
err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
- spin_unlock_irqrestore(&vp->lock, flags);
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), state);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 7342308718a..8e7c8a8e61c 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget)
b44_tx(bp);
/* spin_unlock(&bp->tx_lock); */
}
+ if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
+ bp->istat &= ~ISTAT_RFO;
+ b44_disable_ints(bp);
+ ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+ netif_wake_queue(bp->dev);
+ }
+
spin_unlock_irqrestore(&bp->lock, flags);
work_done = 0;
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index d1fb7928ffc..4faf6961dce 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -181,6 +181,7 @@ struct be_drvr_stats {
u64 be_rx_bytes_prev;
u64 be_rx_pkts;
u32 be_rx_rate;
+ u32 be_rx_mcast_pkt;
/* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr */
u32 be_802_3_dropped_frames;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3d305494a60..34abcc9403d 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */
- BUG_ON(!is_link_state_evt(compl->flags));
-
- /* Interpret compl as a async link evt */
- be_async_link_state_process(adapter,
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter,
(struct be_async_event_link_state *) compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
*status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
- be_dump_ue(adapter);
+ be_detect_dump_ue(adapter);
return -1;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index bdc10a28cfd..ad1e6fac60c 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
extern int be_cmd_get_phy_info(struct be_adapter *adapter,
struct be_dma_mem *cmd);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_dump_ue(struct be_adapter *adapter);
+extern void be_detect_dump_ue(struct be_adapter *adapter);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9bedb9ff6e1..d92063420c2 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(be_rx_events)},
{DRVSTAT_INFO(be_tx_compl)},
{DRVSTAT_INFO(be_rx_compl)},
+ {DRVSTAT_INFO(be_rx_mcast_pkt)},
{DRVSTAT_INFO(be_ethrx_post_fail)},
{DRVSTAT_INFO(be_802_3_dropped_frames)},
{DRVSTAT_INFO(be_802_3_malformed_frames)},
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 5d38046402b..a2ec5df0d73 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -167,8 +167,11 @@
#define FLASH_FCoE_BIOS_START_g3 (13631488)
#define FLASH_REDBOOT_START_g3 (262144)
-
-
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET 0
+#define BE_MULTICAST_PACKET 1
+#define BE_BROADCAST_PACKET 2
+#define BE_RSVD_PACKET 3
/*
* BE descriptors: host memory data structures whose formats
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index dcee7a20c0b..43a3a574e2e 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter)
dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
+ dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
/* bad pkts received */
dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter)
/* no space available in linux */
dev_stats->tx_dropped = 0;
- dev_stats->multicast = port_stats->rx_multicast_frames;
dev_stats->collisions = 0;
/* detailed tx_errors */
@@ -843,7 +843,7 @@ static void be_rx_rate_update(struct be_adapter *adapter)
}
static void be_rx_stats_update(struct be_adapter *adapter,
- u32 pktsize, u16 numfrags)
+ u32 pktsize, u16 numfrags, u8 pkt_type)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -851,6 +851,9 @@ static void be_rx_stats_update(struct be_adapter *adapter,
stats->be_rx_frags += numfrags;
stats->be_rx_bytes += pktsize;
stats->be_rx_pkts++;
+
+ if (pkt_type == BE_MULTICAST_PACKET)
+ stats->be_rx_mcast_pkt++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -920,9 +923,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
u16 rxq_idx, i, j;
u32 pktsize, hdr_len, curr_frag_len, size;
u8 *start;
+ u8 pkt_type;
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
+ pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
page_info = get_rx_page_info(adapter, rxq_idx);
@@ -988,7 +993,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
BUG_ON(j > MAX_SKB_FRAGS);
done:
- be_rx_stats_update(adapter, pktsize, num_rcvd);
+ be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -1055,6 +1060,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
u16 i, rxq_idx = 0, vid, j;
u8 vtm;
+ u8 pkt_type;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
/* Is it a flush compl that has no data */
@@ -1065,6 +1071,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+ pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
/* vlanf could be wrongly set in some cards.
* ignore if vtm is not set */
@@ -1120,7 +1127,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
}
- be_rx_stats_update(adapter, pkt_size, num_rcvd);
+ be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
}
static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -1738,26 +1745,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
return 1;
}
-static inline bool be_detect_ue(struct be_adapter *adapter)
-{
- u32 online0 = 0, online1 = 0;
-
- pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
-
- pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
-
- if (!online0 || !online1) {
- adapter->ue_detected = true;
- dev_err(&adapter->pdev->dev,
- "UE Detected!! online0=%d online1=%d\n",
- online0, online1);
- return true;
- }
-
- return false;
-}
-
-void be_dump_ue(struct be_adapter *adapter)
+void be_detect_dump_ue(struct be_adapter *adapter)
{
u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
u32 i;
@@ -1774,6 +1762,11 @@ void be_dump_ue(struct be_adapter *adapter)
ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
+ if (ue_status_lo || ue_status_hi) {
+ adapter->ue_detected = true;
+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ }
+
if (ue_status_lo) {
for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
if (ue_status_lo & 1)
@@ -1809,10 +1802,8 @@ static void be_worker(struct work_struct *work)
adapter->rx_post_starved = false;
be_post_rx_frags(adapter);
}
- if (!adapter->ue_detected) {
- if (be_detect_ue(adapter))
- be_dump_ue(adapter);
- }
+ if (!adapter->ue_detected)
+ be_detect_dump_ue(adapter);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2cc4cfc3189..3b16f62d560 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* so it can wait
*/
bond_for_each_slave(bond, slave, i) {
+ unsigned long trans_start = dev_trans_start(slave->dev);
+
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) &&
- time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) {
+ if (time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + delta_in_ticks) &&
+ time_in_range(jiffies,
+ slave->dev->last_rx - delta_in_ticks,
+ slave->dev->last_rx + delta_in_ticks)) {
slave->link = BOND_LINK_UP;
slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) ||
- (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
+ if (!time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + 2 * delta_in_ticks) ||
+ !time_in_range(jiffies,
+ slave->dev->last_rx - delta_in_ticks,
+ slave->dev->last_rx + 2 * delta_in_ticks)) {
slave->link = BOND_LINK_DOWN;
slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
{
struct slave *slave;
int i, commit = 0;
+ unsigned long trans_start;
bond_for_each_slave(bond, slave, i) {
slave->new_link = BOND_LINK_NOCHANGE;
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, slave_last_rx(bond, slave) +
- delta_in_ticks)) {
+ if (time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + delta_in_ticks)) {
+
slave->new_link = BOND_LINK_UP;
commit++;
}
@@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (!time_after_eq(jiffies, slave->jiffies +
- 2 * delta_in_ticks))
+ if (time_in_range(jiffies,
+ slave->jiffies - delta_in_ticks,
+ slave->jiffies + 2 * delta_in_ticks))
continue;
/*
@@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*/
if (slave->state == BOND_STATE_BACKUP &&
!bond->current_arp_slave &&
- time_after(jiffies, slave_last_rx(bond, slave) +
- 3 * delta_in_ticks)) {
+ !time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* - (more than 2*delta since receive AND
* the bond has an IP address)
*/
+ trans_start = dev_trans_start(slave->dev);
if ((slave->state == BOND_STATE_ACTIVE) &&
- (time_after_eq(jiffies, dev_trans_start(slave->dev) +
- 2 * delta_in_ticks) ||
- (time_after_eq(jiffies, slave_last_rx(bond, slave)
- + 2 * delta_in_ticks)))) {
+ (!time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + 2 * delta_in_ticks) ||
+ !time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
{
struct slave *slave;
int i;
+ unsigned long trans_start;
bond_for_each_slave(bond, slave, i) {
switch (slave->new_link) {
@@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
continue;
case BOND_LINK_UP:
+ trans_start = dev_trans_start(slave->dev);
if ((!bond->curr_active_slave &&
- time_before_eq(jiffies,
- dev_trans_start(slave->dev) +
- delta_in_ticks)) ||
+ time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + delta_in_ticks)) ||
bond->curr_active_slave != slave) {
slave->link = BOND_LINK_UP;
bond->current_arp_slave = NULL;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 631a6242b01..75bfc3a9d95 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -15,7 +15,7 @@ config CAIF_TTY
config CAIF_SPI_SLAVE
tristate "CAIF SPI transport driver for slave interface"
- depends on CAIF
+ depends on CAIF && HAS_DMA
default n
---help---
The CAIF Link layer SPI Protocol driver for Slave SPI interface.
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 3995fafc1e0..8c6c1e2a875 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -238,7 +238,7 @@ void emac_dbg_dump_all(void)
}
#if defined(CONFIG_MAGIC_SYSRQ)
-static void emac_sysrq_handler(int key, struct tty_struct *tty)
+static void emac_sysrq_handler(int key)
{
emac_dbg_dump_all();
}
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index b4fb07a6f13..51919fcd50c 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_wrreg16(ks, KS_RXQCR,
ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
- if (rxlen > 0) {
- skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
- if (!skb) {
- /* todo - dump frame and move on */
- }
+ if (rxlen > 4) {
+ unsigned int rxalign;
+
+ rxlen -= 4;
+ rxalign = ALIGN(rxlen, 4);
+ skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
+ if (skb) {
- /* two bytes to ensure ip is aligned, and four bytes
- * for the status header and 4 bytes of garbage */
- skb_reserve(skb, 2 + 4 + 4);
+ /* 4 bytes of status header + 4 bytes of
+ * garbage: we put them before ethernet
+ * header, so that they are copied,
+ * but ignored.
+ */
- rxpkt = skb_put(skb, rxlen - 4) - 8;
+ rxpkt = skb_put(skb, rxlen) - 8;
- /* align the packet length to 4 bytes, and add 4 bytes
- * as we're getting the rx status header as well */
- ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
+ ks8851_rdfifo(ks, rxpkt, rxalign + 8);
- if (netif_msg_pktdata(ks))
- ks8851_dbg_dumpkkt(ks, rxpkt);
+ if (netif_msg_pktdata(ks))
+ ks8851_dbg_dumpkkt(ks, rxpkt);
- skb->protocol = eth_type_trans(skb, ks->netdev);
- netif_rx(skb);
+ skb->protocol = eth_type_trans(skb, ks->netdev);
+ netif_rx(skb);
- ks->netdev->stats.rx_packets++;
- ks->netdev->stats.rx_bytes += rxlen - 4;
+ ks->netdev->stats.rx_packets++;
+ ks->netdev->stats.rx_bytes += rxlen;
+ }
}
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index cb30df106a2..73d31459223 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -2131,9 +2131,16 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netxen_nic_poll_controller(struct net_device *netdev)
{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
disable_irq(adapter->irq);
- netxen_intr(adapter->irq, adapter);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netxen_intr(adapter->irq, sds_ring);
+ }
enable_irq(adapter->irq);
}
#endif
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 1f89f472cbf..8e1859c801a 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
struct niu_parent *parent = np->parent;
struct niu_tcam_entry *tp;
int i, idx, cnt;
- u16 n_entries;
unsigned long flags;
-
+ int ret = 0;
/* put the tcam size here */
nfc->data = tcam_get_size(np);
niu_lock_parent(np, flags);
- n_entries = nfc->rule_cnt;
for (cnt = 0, i = 0; i < nfc->data; i++) {
idx = tcam_get_index(np, i);
tp = &parent->tcam[idx];
if (!tp->valid)
continue;
+ if (cnt == nfc->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
rule_locs[cnt] = i;
cnt++;
}
niu_unlock_parent(np, flags);
- if (n_entries != cnt) {
- /* print warning, this should not happen */
- netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
- np->parent->index, __func__, n_entries, cnt);
- }
-
- return 0;
+ return ret;
}
static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 5ad42e0aee2..e180832c278 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1617,6 +1617,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
+ PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0761197c07..16ddc77313c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -466,6 +466,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev->interface = interface;
+ phydev->state = PHY_READY;
+
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 764aa909946..75c2ff99d66 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -652,15 +652,15 @@ static void eth_port_start(struct net_device *dev)
/* Assignment of Tx CTRP of given queue */
tx_curr_desc = pep->tx_curr_desc_q;
wrl(pep, ETH_C_TX_DESC_1,
- (u32) ((struct tx_desc *)pep->tx_desc_dma + tx_curr_desc));
+ (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
/* Assignment of Rx CRDP of given queue */
rx_curr_desc = pep->rx_curr_desc_q;
wrl(pep, ETH_C_RX_DESC_0,
- (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc));
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
wrl(pep, ETH_F_RX_DESC_0,
- (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc));
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
/* Clear all interrupts */
wrl(pep, INT_CAUSE, 0);
@@ -1347,7 +1347,7 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
{
struct pxa168_eth_private *pep = netdev_priv(dev);
if (pep->phy != NULL)
- return phy_mii_ioctl(pep->phy, if_mii(ifr), cmd);
+ return phy_mii_ioctl(pep->phy, ifr, cmd);
return -EOPNOTSUPP;
}
@@ -1411,10 +1411,8 @@ static int ethernet_phy_setup(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
- if (pep->pd != NULL) {
- if (pep->pd->init)
- pep->pd->init();
- }
+ if (pep->pd->init)
+ pep->pd->init();
pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
if (pep->phy != NULL)
phy_init(pep, pep->pd->speed, pep->pd->duplex);
@@ -1496,7 +1494,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
if (!dev) {
err = -ENOMEM;
- goto out;
+ goto err_clk;
}
platform_set_drvdata(pdev, dev);
@@ -1506,12 +1504,12 @@ static int pxa168_eth_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
err = -ENODEV;
- goto out;
+ goto err_netdev;
}
pep->base = ioremap(res->start, res->end - res->start + 1);
if (pep->base == NULL) {
err = -ENOMEM;
- goto out;
+ goto err_netdev;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
BUG_ON(!res);
@@ -1548,7 +1546,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->smi_bus = mdiobus_alloc();
if (pep->smi_bus == NULL) {
err = -ENOMEM;
- goto out;
+ goto err_base;
}
pep->smi_bus->priv = pep;
pep->smi_bus->name = "pxa168_eth smi";
@@ -1557,31 +1555,31 @@ static int pxa168_eth_probe(struct platform_device *pdev)
snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
pep->smi_bus->parent = &pdev->dev;
pep->smi_bus->phy_mask = 0xffffffff;
- if (mdiobus_register(pep->smi_bus) < 0) {
- err = -ENOMEM;
- goto out;
- }
+ err = mdiobus_register(pep->smi_bus);
+ if (err)
+ goto err_free_mdio;
+
pxa168_init_hw(pep);
err = ethernet_phy_setup(dev);
if (err)
- goto out;
+ goto err_mdiobus;
SET_NETDEV_DEV(dev, &pdev->dev);
err = register_netdev(dev);
if (err)
- goto out;
+ goto err_mdiobus;
return 0;
-out:
- if (pep->clk) {
- clk_disable(pep->clk);
- clk_put(pep->clk);
- pep->clk = NULL;
- }
- if (pep->base) {
- iounmap(pep->base);
- pep->base = NULL;
- }
- if (dev)
- free_netdev(dev);
+
+err_mdiobus:
+ mdiobus_unregister(pep->smi_bus);
+err_free_mdio:
+ mdiobus_free(pep->smi_bus);
+err_base:
+ iounmap(pep->base);
+err_netdev:
+ free_netdev(dev);
+err_clk:
+ clk_disable(clk);
+ clk_put(clk);
return err;
}
@@ -1605,6 +1603,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
iounmap(pep->base);
pep->base = NULL;
+ mdiobus_unregister(pep->smi_bus);
+ mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
flush_scheduled_work();
free_netdev(dev);
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 5a6445a691b..5fd2abd1eb6 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2513,9 +2513,16 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
disable_irq(adapter->irq);
- qlcnic_intr(adapter->irq, adapter);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_intr(adapter->irq, sds_ring);
+ }
enable_irq(adapter->irq);
}
#endif
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index e2d0e108b9a..4ffebe83d88 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3926,12 +3926,12 @@ static int ql_adapter_down(struct ql_adapter *qdev)
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
- ql_free_rx_buffers(qdev);
-
status = ql_adapter_reset(qdev);
if (status)
netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
qdev->func);
+ ql_free_rx_buffers(qdev);
+
return status;
}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 1ccea76d89a..03c160c6d75 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1862,15 +1862,15 @@ static int stmmac_resume(struct platform_device *pdev)
if (!netif_running(dev))
return 0;
- spin_lock(&priv->lock);
-
if (priv->shutdown) {
/* Re-open the interface and re-init the MAC/DMA
- and the rings. */
+ and the rings (i.e. on hibernation stage) */
stmmac_open(dev);
- goto out_resume;
+ return 0;
}
+ spin_lock(&priv->lock);
+
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
@@ -1898,7 +1898,6 @@ static int stmmac_resume(struct platform_device *pdev)
netif_start_queue(dev);
-out_resume:
spin_unlock(&priv->lock);
return 0;
}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 08e7b6abacd..b2bcf99e6f0 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -58,6 +58,7 @@
#define USB_PRODUCT_IPHONE 0x1290
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
+#define USB_PRODUCT_IPHONE_4 0x1297
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -92,6 +93,10 @@ static struct usb_device_id ipheth_table[] = {
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
@@ -424,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = {
.ndo_get_stats = &ipheth_stats,
};
-static struct device_type ipheth_type = {
- .name = "wwan",
-};
-
static int ipheth_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -445,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf,
netdev->netdev_ops = &ipheth_netdev_ops;
netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
- strcpy(netdev->name, "wwan%d");
+ strcpy(netdev->name, "eth%d");
dev = netdev_priv(netdev);
dev->udev = udev;
@@ -495,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf,
SET_NETDEV_DEV(netdev, &intf->dev);
SET_ETHTOOL_OPS(netdev, &ops);
- SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
retval = register_netdev(netdev);
if (retval) {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index ed7f4f5c406..6884813b809 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
- NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
+ NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
ret = register_netdev(dev);
if (ret < 0)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 45789c8990d..116ac66c6e3 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1327,6 +1327,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
PCI_DMA_TODEVICE);
rate = ieee80211_get_tx_rate(sc->hw, info);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_unmap;
+ }
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
flags |= AR5K_TXDESC_NOACK;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b883b174385..057fb69ddf7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
length = block[it+1];
length &= 0xff;
- if (length > 0 && spot >= 0 && spot+length < mdataSize) {
+ if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
ath_print(common, ATH_DBG_EEPROM,
"Restore at %d: spot=%d "
"offset=%d length=%d\n",
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 7f48df1e290..0b09db0f8e7 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -62,7 +62,7 @@
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
-#define CTL_MODE_M 7
+#define CTL_MODE_M 0xf
#define CTL_11A 0
#define CTL_11B 1
#define CTL_11G 2
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index a1c39526161..345dd9721b4 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -31,7 +31,6 @@ enum ctl_group {
#define NO_CTL 0xff
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
-#define CTL_MODE_M 7
#define CTL_11A 0
#define CTL_11B 1
#define CTL_11G 2
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 1bbaaa44d98..296fd00a512 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -124,7 +124,7 @@ struct if_sdio_card {
bool helper_allocated;
bool firmware_allocated;
- u8 buffer[65536];
+ u8 buffer[65536] __attribute__((aligned(4)));
spinlock_t lock;
struct if_sdio_packet *packets;
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 8f67db487f5..76b2318a7dc 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
- (!payload->status))
+ !(payload->status & P54_TX_FAILED))
info->flags |= IEEE80211_TX_STAT_ACK;
if (payload->status & P54_TX_PSM_CANCELLED)
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 45fcc1e96df..3bc72d18b12 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -338,9 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
- OSC_SHPC_NATIVE_HP_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ flags &= OSC_SHPC_NATIVE_HP_CONTROL;
if (!flags) {
err("Invalid flags %u specified!\n", flags);
return -EINVAL;
@@ -360,7 +358,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
dbg("Trying to get hotplug control for %s\n",
(char *)string.pointer);
- status = acpi_pci_osc_control_set(handle, flags);
+ status = acpi_pci_osc_control_set(handle, &flags, flags);
if (ACPI_SUCCESS(status))
goto got_one;
if (status == AE_SUPPORT)
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 4ed76b47b6d..73d51398926 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -176,19 +176,11 @@ static inline void pciehp_firmware_init(void)
{
pciehp_acpi_slot_detection_init();
}
-
-static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
-{
- int retval;
- u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
- if (retval)
- return retval;
- return pciehp_acpi_slot_detection_check(dev);
-}
#else
#define pciehp_firmware_init() do {} while (0)
-#define pciehp_get_hp_hw_control_from_firmware(dev) 0
+static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
+{
+ return 0;
+}
#endif /* CONFIG_ACPI */
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 1f4000a5a10..2574700db46 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -85,9 +85,7 @@ static int __init dummy_probe(struct pcie_device *dev)
acpi_handle handle;
struct dummy_slot *slot, *tmp;
struct pci_dev *pdev = dev->port;
- /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
- if (pciehp_get_hp_hw_control_from_firmware(pdev))
- return -ENODEV;
+
pos = pci_pcie_cap(pdev);
if (!pos)
return -ENODEV;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3588ea61b0d..aa5f3ff629f 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -59,7 +59,7 @@ module_param(pciehp_force, bool, 0644);
MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
-MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
+MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
#define PCIE_MODULE_NAME "pciehp"
@@ -235,7 +235,7 @@ static int pciehp_probe(struct pcie_device *dev)
dev_info(&dev->device,
"Bypassing BIOS check for pciehp use on %s\n",
pci_name(dev->port));
- else if (pciehp_get_hp_hw_control_from_firmware(dev->port))
+ else if (pciehp_acpi_slot_detection_check(dev->port))
goto err_out_none;
ctrl = pcie_init(dev);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 679c39de6a8..7754a678ab1 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -140,8 +140,10 @@ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
+bool pci_aer_available(void);
#else
static inline void pci_no_aer(void) { }
+static inline bool pci_aer_available(void) { return false; }
#endif
static inline int pci_no_d1d2(struct pci_dev *dev)
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index ea654545e7c..00c62df5a9f 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -6,10 +6,11 @@
obj-$(CONFIG_PCIEASPM) += aspm.o
pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
+pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
# Build PCI Express AER if needed
obj-$(CONFIG_PCIEAER) += aer/
-obj-$(CONFIG_PCIE_PME) += pme/
+obj-$(CONFIG_PCIE_PME) += pme.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 484cc55194b..f409948e1a9 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -72,6 +72,11 @@ void pci_no_aer(void)
pcie_aer_disable = 1; /* has priority over 'forceload' */
}
+bool pci_aer_available(void)
+{
+ return !pcie_aer_disable && pci_msi_enabled();
+}
+
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
@@ -411,9 +416,7 @@ static void aer_error_resume(struct pci_dev *dev)
*/
static int __init aer_service_init(void)
{
- if (pcie_aer_disable)
- return -ENXIO;
- if (!pci_msi_enabled())
+ if (!pci_aer_available())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index f278d7b0d95..2bb9b897221 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -19,42 +19,6 @@
#include <acpi/apei.h>
#include "aerdrv.h"
-/**
- * aer_osc_setup - run ACPI _OSC method
- * @pciedev: pcie_device which AER is being enabled on
- *
- * @return: Zero on success. Nonzero otherwise.
- *
- * Invoked when PCIe bus loads AER service driver. To avoid conflict with
- * BIOS AER support requires BIOS to yield AER control to OS native driver.
- **/
-int aer_osc_setup(struct pcie_device *pciedev)
-{
- acpi_status status = AE_NOT_FOUND;
- struct pci_dev *pdev = pciedev->port;
- acpi_handle handle = NULL;
-
- if (acpi_pci_disabled)
- return -1;
-
- handle = acpi_find_root_bridge_handle(pdev);
- if (handle) {
- status = acpi_pci_osc_control_set(handle,
- OSC_PCI_EXPRESS_AER_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- }
-
- if (ACPI_FAILURE(status)) {
- dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
- "init device: %s\n",
- (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
- "no _OSC support" : "_OSC failed");
- return -1;
- }
-
- return 0;
-}
-
#ifdef CONFIG_ACPI_APEI
static inline int hest_match_pci(struct acpi_hest_aer_common *p,
struct pci_dev *pci)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index fc0b5a93e1d..29e268fadf1 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -772,22 +772,10 @@ void aer_isr(struct work_struct *work)
*/
int aer_init(struct pcie_device *dev)
{
- if (pcie_aer_get_firmware_first(dev->port)) {
- dev_printk(KERN_DEBUG, &dev->device,
- "PCIe errors handled by platform firmware.\n");
- goto out;
- }
-
- if (aer_osc_setup(dev))
- goto out;
-
- return 0;
-out:
if (forceload) {
dev_printk(KERN_DEBUG, &dev->device,
"aerdrv forceload requested.\n");
pcie_aer_force_firmware_first(dev->port, 0);
- return 0;
}
- return -ENXIO;
+ return 0;
}
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme.c
index bbdea18693d..2f3c9040722 100644
--- a/drivers/pci/pcie/pme/pcie_pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -23,38 +23,13 @@
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
-#include "../../pci.h"
-#include "pcie_pme.h"
+#include "../pci.h"
+#include "portdrv.h"
#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
/*
- * If set, this switch will prevent the PCIe root port PME service driver from
- * being registered. Consequently, the interrupt-based PCIe PME signaling will
- * not be used by any PCIe root ports in that case.
- */
-static bool pcie_pme_disabled = true;
-
-/*
- * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
- * "In order to maintain compatibility with non-PCI Express-aware system
- * software, system power management logic must be configured by firmware to use
- * the legacy mechanism of signaling PME by default. PCI Express-aware system
- * software must notify the firmware prior to enabling native, interrupt-based
- * PME signaling." However, if the platform doesn't provide us with a suitable
- * notification mechanism or the notification fails, it is not clear whether or
- * not we are supposed to use the interrupt-based PCIe PME signaling. The
- * switch below can be used to indicate the desired behaviour. When set, it
- * will make the kernel use the interrupt-based PCIe PME signaling regardless of
- * the platform notification status, although the kernel will attempt to notify
- * the platform anyway. When unset, it will prevent the kernel from using the
- * the interrupt-based PCIe PME signaling if the platform notification fails,
- * which is the default.
- */
-static bool pcie_pme_force_enable;
-
-/*
* If this switch is set, MSI will not be used for PCIe PME signaling. This
* causes the PCIe port driver to use INTx interrupts only, but it turns out
* that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
@@ -64,38 +39,13 @@ bool pcie_pme_msi_disabled;
static int __init pcie_pme_setup(char *str)
{
- if (!strncmp(str, "auto", 4))
- pcie_pme_disabled = false;
- else if (!strncmp(str, "force", 5))
- pcie_pme_force_enable = true;
-
- str = strchr(str, ',');
- if (str) {
- str++;
- str += strspn(str, " \t");
- if (*str && !strcmp(str, "nomsi"))
- pcie_pme_msi_disabled = true;
- }
+ if (!strncmp(str, "nomsi", 5))
+ pcie_pme_msi_disabled = true;
return 1;
}
__setup("pcie_pme=", pcie_pme_setup);
-/**
- * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
- * @srv: PCIe PME root port service to use for carrying out the check.
- *
- * Notify the platform that the native PCIe PME is going to be used and return
- * 'true' if the control of the PCIe PME registers has been acquired from the
- * platform.
- */
-static bool pcie_pme_platform_setup(struct pcie_device *srv)
-{
- if (!pcie_pme_platform_notify(srv))
- return true;
- return pcie_pme_force_enable;
-}
-
struct pcie_pme_service_data {
spinlock_t lock;
struct pcie_device *srv;
@@ -108,7 +58,7 @@ struct pcie_pme_service_data {
* @dev: PCIe root port or event collector.
* @enable: Enable or disable the interrupt.
*/
-static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
+void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
{
int rtctl_pos;
u16 rtctl;
@@ -417,9 +367,6 @@ static int pcie_pme_probe(struct pcie_device *srv)
struct pcie_pme_service_data *data;
int ret;
- if (!pcie_pme_platform_setup(srv))
- return -EACCES;
-
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -509,8 +456,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
*/
static int __init pcie_pme_service_init(void)
{
- return pcie_pme_disabled ?
- -ENODEV : pcie_port_service_register(&pcie_pme_driver);
+ return pcie_port_service_register(&pcie_pme_driver);
}
module_init(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile
deleted file mode 100644
index 8b923805308..00000000000
--- a/drivers/pci/pcie/pme/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for PCI-Express Root Port PME signaling driver
-#
-
-obj-$(CONFIG_PCIE_PME) += pmedriver.o
-
-pmedriver-objs := pcie_pme.o
-pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h
deleted file mode 100644
index b30d2b7c977..00000000000
--- a/drivers/pci/pcie/pme/pcie_pme.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * drivers/pci/pcie/pme/pcie_pme.h
- *
- * PCI Express Root Port PME signaling support
- *
- * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- */
-
-#ifndef _PCIE_PME_H_
-#define _PCIE_PME_H_
-
-struct pcie_device;
-
-#ifdef CONFIG_ACPI
-extern int pcie_pme_acpi_setup(struct pcie_device *srv);
-
-static inline int pcie_pme_platform_notify(struct pcie_device *srv)
-{
- return pcie_pme_acpi_setup(srv);
-}
-#else /* !CONFIG_ACPI */
-static inline int pcie_pme_platform_notify(struct pcie_device *srv)
-{
- return 0;
-}
-#endif /* !CONFIG_ACPI */
-
-#endif
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c
deleted file mode 100644
index 83ab2287ae3..00000000000
--- a/drivers/pci/pcie/pme/pcie_pme_acpi.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * PCIe Native PME support, ACPI-related part
- *
- * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License V2. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
-#include <linux/pcieport_if.h>
-
-/**
- * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME.
- * @srv - PCIe PME service for a root port or event collector.
- *
- * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid
- * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME
- * control to the kernel.
- */
-int pcie_pme_acpi_setup(struct pcie_device *srv)
-{
- acpi_status status = AE_NOT_FOUND;
- struct pci_dev *port = srv->port;
- acpi_handle handle;
- int error = 0;
-
- if (acpi_pci_disabled)
- return -ENOSYS;
-
- dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n");
-
- handle = acpi_find_root_bridge_handle(port);
- if (!handle)
- return -EINVAL;
-
- status = acpi_pci_osc_control_set(handle,
- OSC_PCI_EXPRESS_PME_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- if (ACPI_FAILURE(status)) {
- dev_info(&port->dev,
- "Failed to receive control of PCIe PME service: %s\n",
- (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
- "no _OSC support" : "ACPI _OSC failed");
- error = -ENODEV;
- }
-
- return error;
-}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 813a5c3427b..7b5aba0a329 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,6 +20,9 @@
#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
+extern bool pcie_ports_disabled;
+extern bool pcie_ports_auto;
+
extern struct bus_type pcie_port_bus_type;
extern int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
@@ -30,6 +33,8 @@ extern void pcie_port_device_remove(struct pci_dev *dev);
extern int __must_check pcie_port_bus_register(void);
extern void pcie_port_bus_unregister(void);
+struct pci_dev;
+
#ifdef CONFIG_PCIE_PME
extern bool pcie_pme_msi_disabled;
@@ -42,9 +47,26 @@ static inline bool pcie_pme_no_msi(void)
{
return pcie_pme_msi_disabled;
}
+
+extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
#else /* !CONFIG_PCIE_PME */
static inline void pcie_pme_disable_msi(void) {}
static inline bool pcie_pme_no_msi(void) { return false; }
+static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
+#ifdef CONFIG_ACPI
+extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+ return pcie_port_acpi_setup(port, mask);
+}
+#else /* !CONFIG_ACPI */
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+ return 0;
+}
+#endif /* !CONFIG_ACPI */
+
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
new file mode 100644
index 00000000000..b7c4cb1ccb2
--- /dev/null
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -0,0 +1,77 @@
+/*
+ * PCIe Port Native Services Support, ACPI-Related Part
+ *
+ * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License V2. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <linux/pcieport_if.h>
+
+#include "aer/aerdrv.h"
+#include "../pci.h"
+
+/**
+ * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
+ * @port: PCIe Port service for a root port or event collector.
+ * @srv_mask: Bit mask of services that can be enabled for @port.
+ *
+ * Invoked when @port is identified as a PCIe port device. To avoid conflicts
+ * with the BIOS PCIe port native services support requires the BIOS to yield
+ * control of these services to the kernel. The mask of services that the BIOS
+ * allows to be enabled for @port is written to @srv_mask.
+ *
+ * NOTE: It turns out that we cannot do that for individual port services
+ * separately, because that would make some systems work incorrectly.
+ */
+int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
+{
+ acpi_status status;
+ acpi_handle handle;
+ u32 flags;
+
+ if (acpi_pci_disabled)
+ return 0;
+
+ handle = acpi_find_root_bridge_handle(port);
+ if (!handle)
+ return -EINVAL;
+
+ flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
+ | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+ | OSC_PCI_EXPRESS_PME_CONTROL;
+
+ if (pci_aer_available()) {
+ if (pcie_aer_get_firmware_first(port))
+ dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
+ else
+ flags |= OSC_PCI_EXPRESS_AER_CONTROL;
+ }
+
+ status = acpi_pci_osc_control_set(handle, &flags,
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
+ status);
+ return -ENODEV;
+ }
+
+ dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
+
+ *srv_mask = PCIE_PORT_SERVICE_VC;
+ if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_HP;
+ if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_PME;
+ if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_AER;
+
+ return 0;
+}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e73effbe402..a9c222d79eb 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -14,6 +14,8 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/pcieport_if.h>
+#include <linux/aer.h>
+#include <linux/pci-aspm.h>
#include "../pci.h"
#include "portdrv.h"
@@ -236,24 +238,64 @@ static int get_port_device_capability(struct pci_dev *dev)
int services = 0, pos;
u16 reg16;
u32 reg32;
+ int cap_mask;
+ int err;
+
+ err = pcie_port_platform_notify(dev, &cap_mask);
+ if (pcie_ports_auto) {
+ if (err) {
+ pcie_no_aspm();
+ return 0;
+ }
+ } else {
+ cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
+ | PCIE_PORT_SERVICE_VC;
+ if (pci_aer_available())
+ cap_mask |= PCIE_PORT_SERVICE_AER;
+ }
pos = pci_pcie_cap(dev);
pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
/* Hot-Plug Capable */
- if (reg16 & PCI_EXP_FLAGS_SLOT) {
+ if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) {
pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
- if (reg32 & PCI_EXP_SLTCAP_HPC)
+ if (reg32 & PCI_EXP_SLTCAP_HPC) {
services |= PCIE_PORT_SERVICE_HP;
+ /*
+ * Disable hot-plug interrupts in case they have been
+ * enabled by the BIOS and the hot-plug service driver
+ * is not loaded.
+ */
+ pos += PCI_EXP_SLTCTL;
+ pci_read_config_word(dev, pos, &reg16);
+ reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
+ pci_write_config_word(dev, pos, reg16);
+ }
}
/* AER capable */
- if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
+ if ((cap_mask & PCIE_PORT_SERVICE_AER)
+ && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) {
services |= PCIE_PORT_SERVICE_AER;
+ /*
+ * Disable AER on this port in case it's been enabled by the
+ * BIOS (the AER service driver will enable it when necessary).
+ */
+ pci_disable_pcie_error_reporting(dev);
+ }
/* VC support */
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
services |= PCIE_PORT_SERVICE_VC;
/* Root ports are capable of generating PME too */
- if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ if ((cap_mask & PCIE_PORT_SERVICE_PME)
+ && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
services |= PCIE_PORT_SERVICE_PME;
+ /*
+ * Disable PME interrupt on this port in case it's been enabled
+ * by the BIOS (the PME service driver will enable it when
+ * necessary).
+ */
+ pcie_pme_interrupt_enable(dev, false);
+ }
return services;
}
@@ -494,6 +536,9 @@ static void pcie_port_shutdown_service(struct device *dev) {}
*/
int pcie_port_service_register(struct pcie_port_service_driver *new)
{
+ if (pcie_ports_disabled)
+ return -ENODEV;
+
new->driver.name = (char *)new->name;
new->driver.bus = &pcie_port_bus_type;
new->driver.probe = pcie_port_probe_service;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 3debed25e46..f9033e190fb 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -15,6 +15,7 @@
#include <linux/pcieport_if.h>
#include <linux/aer.h>
#include <linux/dmi.h>
+#include <linux/pci-aspm.h>
#include "portdrv.h"
#include "aer/aerdrv.h"
@@ -29,6 +30,31 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+/* If this switch is set, PCIe port native services should not be enabled. */
+bool pcie_ports_disabled;
+
+/*
+ * If this switch is set, ACPI _OSC will be used to determine whether or not to
+ * enable PCIe port native services.
+ */
+bool pcie_ports_auto = true;
+
+static int __init pcie_port_setup(char *str)
+{
+ if (!strncmp(str, "compat", 6)) {
+ pcie_ports_disabled = true;
+ } else if (!strncmp(str, "native", 6)) {
+ pcie_ports_disabled = false;
+ pcie_ports_auto = false;
+ } else if (!strncmp(str, "auto", 4)) {
+ pcie_ports_disabled = false;
+ pcie_ports_auto = true;
+ }
+
+ return 1;
+}
+__setup("pcie_ports=", pcie_port_setup);
+
/* global data */
static int pcie_portdrv_restore_config(struct pci_dev *dev)
@@ -301,6 +327,11 @@ static int __init pcie_portdrv_init(void)
{
int retval;
+ if (pcie_ports_disabled) {
+ pcie_no_aspm();
+ return -EACCES;
+ }
+
dmi_check_system(pcie_portdrv_dmi_table);
retval = pcie_port_bus_register();
@@ -315,11 +346,4 @@ static int __init pcie_portdrv_init(void)
return retval;
}
-static void __exit pcie_portdrv_exit(void)
-{
- pci_unregister_driver(&pcie_portdriver);
- pcie_port_bus_unregister();
-}
-
module_init(pcie_portdrv_init);
-module_exit(pcie_portdrv_exit);
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 659eaa0fc48..968cfea04f7 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,7 +49,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
}
/* these strings match up with the values in pci_bus_speed */
-static char *pci_bus_speed_strings[] = {
+static const char *pci_bus_speed_strings[] = {
"33 MHz PCI", /* 0x00 */
"66 MHz PCI", /* 0x01 */
"66 MHz PCI-X", /* 0x02 */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 044f430f3b4..cff7cc2c1f0 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -486,10 +486,12 @@ config TOPSTAR_LAPTOP
config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
depends on ACPI
+ depends on LEDS_CLASS
+ depends on NEW_LEDS
+ depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
depends on RFKILL || RFKILL = n
select INPUT_POLLDEV
- select BACKLIGHT_CLASS_DEVICE
---help---
This driver adds support for access to certain system settings
on "legacy free" Toshiba laptops. These laptops can be recognized by
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index e058c2ba2a1..ca05aefd03b 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -938,10 +938,11 @@ static int set_brightness(int value)
/* SPLV laptop */
if (hotk->methods->brightness_set) {
if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set,
- value, NULL))
+ value, NULL)) {
printk(KERN_WARNING
"Asus ACPI: Error changing brightness\n");
ret = -EIO;
+ }
goto out;
}
@@ -953,10 +954,11 @@ static int set_brightness(int value)
hotk->methods->brightness_down,
NULL, NULL);
(value > 0) ? value-- : value++;
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
printk(KERN_WARNING
"Asus ACPI: Error changing brightness\n");
ret = -EIO;
+ }
}
out:
return ret;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index d071ce05632..097083cac41 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -841,6 +841,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = {
.callback = dmi_check_cb
},
{
+ .ident = "Dell Mini 1012",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
.ident = "Dell Inspiron 11z",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1092,5 +1100,6 @@ MODULE_ALIAS("dmi:*:rnJHL90:rvrREFERENCE:*");
MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*");
MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*");
MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*");
+MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*");
MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*");
MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*");
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index b41ed5cab3e..4413975912e 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -122,6 +122,13 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
},
},
{
+ .ident = "Dell Mini 1012",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ },
+ {
.ident = "Dell Inspiron 11z",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index f1551637498..c1741142a4c 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -79,12 +79,13 @@ struct bios_args {
u32 command;
u32 commandtype;
u32 datasize;
- char *data;
+ u32 data;
};
struct bios_return {
u32 sigpass;
u32 return_code;
+ u32 value;
};
struct key_entry {
@@ -148,7 +149,7 @@ static struct platform_driver hp_wmi_driver = {
* buffer = kzalloc(128, GFP_KERNEL);
* ret = hp_wmi_perform_query(0x7, 0, buffer, 128)
*/
-static int hp_wmi_perform_query(int query, int write, char *buffer,
+static int hp_wmi_perform_query(int query, int write, u32 *buffer,
int buffersize)
{
struct bios_return bios_return;
@@ -159,7 +160,7 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
.command = write ? 0x2 : 0x1,
.commandtype = query,
.datasize = buffersize,
- .data = buffer,
+ .data = *buffer,
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -177,29 +178,14 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
bios_return = *((struct bios_return *)obj->buffer.pointer);
- if (bios_return.return_code) {
- printk(KERN_WARNING PREFIX "Query %d returned %d\n", query,
- bios_return.return_code);
- kfree(obj);
- return bios_return.return_code;
- }
- if (obj->buffer.length - sizeof(bios_return) > buffersize) {
- kfree(obj);
- return -EINVAL;
- }
-
- memset(buffer, 0, buffersize);
- memcpy(buffer,
- ((char *)obj->buffer.pointer) + sizeof(struct bios_return),
- obj->buffer.length - sizeof(bios_return));
- kfree(obj);
+ memcpy(buffer, &bios_return.value, sizeof(bios_return.value));
return 0;
}
static int hp_wmi_display_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
sizeof(state));
if (ret)
return -EINVAL;
@@ -208,8 +194,8 @@ static int hp_wmi_display_state(void)
static int hp_wmi_hddtemp_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
sizeof(state));
if (ret)
return -EINVAL;
@@ -218,8 +204,8 @@ static int hp_wmi_hddtemp_state(void)
static int hp_wmi_als_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
sizeof(state));
if (ret)
return -EINVAL;
@@ -228,8 +214,8 @@ static int hp_wmi_als_state(void)
static int hp_wmi_dock_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state));
if (ret)
@@ -240,8 +226,8 @@ static int hp_wmi_dock_state(void)
static int hp_wmi_tablet_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state));
if (ret)
return ret;
@@ -256,7 +242,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
int ret;
ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
- (char *)&query, sizeof(query));
+ &query, sizeof(query));
if (ret)
return -EINVAL;
return 0;
@@ -268,10 +254,10 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = {
static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
{
- int wireless;
+ int wireless = 0;
int mask;
hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
- (char *)&wireless, sizeof(wireless));
+ &wireless, sizeof(wireless));
/* TBD: Pass error */
mask = 0x200 << (r * 8);
@@ -284,10 +270,10 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
{
- int wireless;
+ int wireless = 0;
int mask;
hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
- (char *)&wireless, sizeof(wireless));
+ &wireless, sizeof(wireless));
/* TBD: Pass error */
mask = 0x800 << (r * 8);
@@ -347,7 +333,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 tmp = simple_strtoul(buf, NULL, 10);
- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, (char *)&tmp,
+ int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
sizeof(tmp));
if (ret)
return -EINVAL;
@@ -421,7 +407,7 @@ static void hp_wmi_notify(u32 value, void *context)
static struct key_entry *key;
union acpi_object *obj;
u32 event_id, event_data;
- int key_code, ret;
+ int key_code = 0, ret;
u32 *location;
acpi_status status;
@@ -475,7 +461,7 @@ static void hp_wmi_notify(u32 value, void *context)
break;
case HPWMI_BEZEL_BUTTON:
ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
- (char *)&key_code,
+ &key_code,
sizeof(key_code));
if (ret)
break;
@@ -578,9 +564,9 @@ static void cleanup_sysfs(struct platform_device *device)
static int __devinit hp_wmi_bios_setup(struct platform_device *device)
{
int err;
- int wireless;
+ int wireless = 0;
- err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, (char *)&wireless,
+ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
sizeof(wireless));
if (err)
return err;
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index afe82e50dfe..9024480a822 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1342,8 +1342,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
limits = &ips_lv_limits;
else if (strstr(boot_cpu_data.x86_model_id, "CPU U"))
limits = &ips_ulv_limits;
- else
+ else {
dev_info(&ips->dev->dev, "No CPUID match found.\n");
+ goto out;
+ }
rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
tdp = turbo_power & TURBO_TDP_MASK;
@@ -1432,6 +1434,12 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
spin_lock_init(&ips->turbo_status_lock);
+ ret = pci_enable_device(dev);
+ if (ret) {
+ dev_err(&dev->dev, "can't enable PCI device, aborting\n");
+ goto error_free;
+ }
+
if (!pci_resource_start(dev, 0)) {
dev_err(&dev->dev, "TBAR not assigned, aborting\n");
ret = -ENXIO;
@@ -1444,11 +1452,6 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto error_free;
}
- ret = pci_enable_device(dev);
- if (ret) {
- dev_err(&dev->dev, "can't enable PCI device, aborting\n");
- goto error_free;
- }
ips->regmap = ioremap(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c
index 73f8e6d7266..2b11a33325e 100644
--- a/drivers/platform/x86/intel_rar_register.c
+++ b/drivers/platform/x86/intel_rar_register.c
@@ -145,7 +145,7 @@ static void free_rar_device(struct rar_device *rar)
*/
static struct rar_device *_rar_to_device(int rar, int *off)
{
- if (rar >= 0 && rar <= 3) {
+ if (rar >= 0 && rar < MRST_NUM_RAR) {
*off = rar;
return &my_rar_device;
}
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 943f9084dcb..6abe18e638e 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -487,7 +487,7 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
mdelay(1);
*data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
} else if (cmd == IPC_I2C_WRITE) {
- writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR);
+ writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
mdelay(1);
writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
} else {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 5d6119bed00..e35ed128bde 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1911,6 +1911,17 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
TP_ACPI_HOTKEYSCAN_MUTE,
TP_ACPI_HOTKEYSCAN_THINKPAD,
+ TP_ACPI_HOTKEYSCAN_UNK1,
+ TP_ACPI_HOTKEYSCAN_UNK2,
+ TP_ACPI_HOTKEYSCAN_UNK3,
+ TP_ACPI_HOTKEYSCAN_UNK4,
+ TP_ACPI_HOTKEYSCAN_UNK5,
+ TP_ACPI_HOTKEYSCAN_UNK6,
+ TP_ACPI_HOTKEYSCAN_UNK7,
+ TP_ACPI_HOTKEYSCAN_UNK8,
+
+ /* Hotkey keymap size */
+ TPACPI_HOTKEY_MAP_LEN
};
enum { /* Keys/events available through NVRAM polling */
@@ -3082,6 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
};
+typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+
static int __init hotkey_init(struct ibm_init_struct *iibm)
{
/* Requirements for changing the default keymaps:
@@ -3113,9 +3126,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
* If the above is too much to ask, don't change the keymap.
* Ask the thinkpad-acpi maintainer to do it, instead.
*/
- static u16 ibm_keycode_map[] __initdata = {
+
+ enum keymap_index {
+ TPACPI_KEYMAP_IBM_GENERIC = 0,
+ TPACPI_KEYMAP_LENOVO_GENERIC,
+ };
+
+ static const tpacpi_keymap_t tpacpi_keymaps[] __initconst = {
+ /* Generic keymap for IBM ThinkPads */
+ [TPACPI_KEYMAP_IBM_GENERIC] = {
/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
- KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP,
+ KEY_FN_F1, KEY_BATTERY, KEY_COFFEE, KEY_SLEEP,
KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
@@ -3146,11 +3167,13 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- };
- static u16 lenovo_keycode_map[] __initdata = {
+ },
+
+ /* Generic keymap for Lenovo ThinkPads */
+ [TPACPI_KEYMAP_LENOVO_GENERIC] = {
/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP,
- KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
+ KEY_WLAN, KEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
/* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
@@ -3189,11 +3212,25 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ },
+ };
+
+ static const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = {
+ /* Generic maps (fallback) */
+ {
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_KEYMAP_IBM_GENERIC,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_KEYMAP_LENOVO_GENERIC,
+ },
};
-#define TPACPI_HOTKEY_MAP_LEN ARRAY_SIZE(ibm_keycode_map)
-#define TPACPI_HOTKEY_MAP_SIZE sizeof(ibm_keycode_map)
-#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(ibm_keycode_map[0])
+#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
+#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0])
int res, i;
int status;
@@ -3202,6 +3239,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
bool tabletsw_state = false;
unsigned long quirks;
+ unsigned long keymap_id;
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"initializing hotkey subdriver\n");
@@ -3342,7 +3380,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
goto err_exit;
/* Set up key map */
-
hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
GFP_KERNEL);
if (!hotkey_keycode_map) {
@@ -3352,17 +3389,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
goto err_exit;
}
- if (tpacpi_is_lenovo()) {
- dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
- "using Lenovo default hot key map\n");
- memcpy(hotkey_keycode_map, &lenovo_keycode_map,
- TPACPI_HOTKEY_MAP_SIZE);
- } else {
- dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
- "using IBM default hot key map\n");
- memcpy(hotkey_keycode_map, &ibm_keycode_map,
- TPACPI_HOTKEY_MAP_SIZE);
- }
+ keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable,
+ ARRAY_SIZE(tpacpi_keymap_qtable));
+ BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps));
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "using keymap number %lu\n", keymap_id);
+
+ memcpy(hotkey_keycode_map, &tpacpi_keymaps[keymap_id],
+ TPACPI_HOTKEY_MAP_SIZE);
input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
@@ -3469,7 +3503,8 @@ static bool hotkey_notify_hotkey(const u32 hkey,
*send_acpi_ev = true;
*ignore_acpi_ev = false;
- if (scancode > 0 && scancode < 0x21) {
+ /* HKEY event 0x1001 is scancode 0x00 */
+ if (scancode > 0 && scancode <= TPACPI_HOTKEY_MAP_LEN) {
scancode--;
if (!(hotkey_source_mask & (1 << scancode))) {
tpacpi_input_send_key_masked(scancode);
@@ -6080,13 +6115,18 @@ static struct backlight_ops ibm_backlight_data = {
/* --------------------------------------------------------------------- */
+/*
+ * Call _BCL method of video device. On some ThinkPads this will
+ * switch the firmware to the ACPI brightness control mode.
+ */
+
static int __init tpacpi_query_bcl_levels(acpi_handle handle)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
int rc;
- if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
+ if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
printk(TPACPI_ERR "Unknown _BCL data, "
@@ -6103,55 +6143,22 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
return rc;
}
-static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle,
- u32 lvl, void *context, void **rv)
-{
- char name[ACPI_PATH_SEGMENT_LENGTH];
- struct acpi_buffer buffer = { sizeof(name), &name };
-
- if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
- !strncmp("_BCL", name, sizeof(name) - 1)) {
- BUG_ON(!rv || !*rv);
- **(int **)rv = tpacpi_query_bcl_levels(handle);
- return AE_CTRL_TERMINATE;
- } else {
- return AE_OK;
- }
-}
/*
* Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
*/
static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
{
- int status;
+ acpi_handle video_device;
int bcl_levels = 0;
- void *bcl_ptr = &bcl_levels;
-
- if (!vid_handle)
- TPACPI_ACPIHANDLE_INIT(vid);
-
- if (!vid_handle)
- return 0;
-
- /*
- * Search for a _BCL method, and execute it. This is safe on all
- * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista
- * BIOS in ACPI backlight control mode. We do NOT have to care
- * about calling the _BCL method in an enabled video device, any
- * will do for our purposes.
- */
- status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
- tpacpi_acpi_walk_find_bcl, NULL, NULL,
- &bcl_ptr);
+ tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device);
+ if (video_device)
+ bcl_levels = tpacpi_query_bcl_levels(video_device);
- if (ACPI_SUCCESS(status) && bcl_levels > 2) {
- tp_features.bright_acpimode = 1;
- return bcl_levels - 2;
- }
+ tp_features.bright_acpimode = (bcl_levels > 0);
- return 0;
+ return (bcl_levels > 2) ? (bcl_levels - 2) : 0;
}
/*
@@ -6244,28 +6251,6 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (tp_features.bright_unkfw)
return 1;
- if (tp_features.bright_acpimode) {
- if (acpi_video_backlight_support()) {
- if (brightness_enable > 1) {
- printk(TPACPI_NOTICE
- "Standard ACPI backlight interface "
- "available, not loading native one.\n");
- return 1;
- } else if (brightness_enable == 1) {
- printk(TPACPI_NOTICE
- "Backlight control force enabled, even if standard "
- "ACPI backlight interface is available\n");
- }
- } else {
- if (brightness_enable > 1) {
- printk(TPACPI_NOTICE
- "Standard ACPI backlight interface not "
- "available, thinkpad_acpi native "
- "brightness control enabled\n");
- }
- }
- }
-
if (!brightness_enable) {
dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
"brightness support disabled by "
@@ -6273,6 +6258,26 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
return 1;
}
+ if (acpi_video_backlight_support()) {
+ if (brightness_enable > 1) {
+ printk(TPACPI_INFO
+ "Standard ACPI backlight interface "
+ "available, not loading native one.\n");
+ return 1;
+ } else if (brightness_enable == 1) {
+ printk(TPACPI_WARN
+ "Cannot enable backlight brightness support, "
+ "ACPI is already handling it. Refer to the "
+ "acpi_backlight kernel parameter\n");
+ return 1;
+ }
+ } else if (tp_features.bright_acpimode && brightness_enable > 1) {
+ printk(TPACPI_NOTICE
+ "Standard ACPI backlight interface not "
+ "available, thinkpad_acpi native "
+ "brightness control enabled\n");
+ }
+
/*
* Check for module parameter bogosity, note that we
* init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index c6cbcb3f925..0e9a309b966 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -16,12 +16,11 @@
#ifdef CONFIG_MAGIC_SYSRQ
static int ctrlchar_sysrq_key;
-static struct tty_struct *sysrq_tty;
static void
ctrlchar_handle_sysrq(struct work_struct *work)
{
- handle_sysrq(ctrlchar_sysrq_key, sysrq_tty);
+ handle_sysrq(ctrlchar_sysrq_key);
}
static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
@@ -54,7 +53,6 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
/* racy */
if (len == 3 && buf[1] == '-') {
ctrlchar_sysrq_key = buf[2];
- sysrq_tty = tty;
schedule_work(&ctrlchar_work);
return CTRLCHAR_SYSRQ;
}
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 18d9a497863..8cd58e412b5 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -305,7 +305,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
if (kbd->sysrq) {
if (kbd->sysrq == K(KT_LATIN, '-')) {
kbd->sysrq = 0;
- handle_sysrq(value, kbd->tty);
+ handle_sysrq(value);
return;
}
if (value == '-') {
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 95a895dd4f1..c8dc392edd5 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -56,6 +56,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/timer.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <asm/dma.h>
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index f065204e401..95a26fb1626 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -132,7 +132,7 @@ void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha);
int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
-inline void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
+void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e031a734836..5d4a3822382 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1418,7 +1418,7 @@ static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha)
return QLA_SUCCESS;
}
-inline void
+void
qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
{
uint32_t drv_active;
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 7356a56ac45..be0ebce36e5 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -869,7 +869,9 @@ static int get_serial_info(struct m68k_serial * info,
tmp.close_delay = info->close_delay;
tmp.closing_wait = info->closing_wait;
tmp.custom_divisor = info->custom_divisor;
- copy_to_user(retinfo,&tmp,sizeof(*retinfo));
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
return 0;
}
@@ -882,7 +884,8 @@ static int set_serial_info(struct m68k_serial * info,
if (!new_info)
return -EFAULT;
- copy_from_user(&new_serial,new_info,sizeof(new_serial));
+ if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+ return -EFAULT;
old_info = *info;
if (!capable(CAP_SYS_ADMIN)) {
@@ -943,8 +946,7 @@ static int get_lsr_info(struct m68k_serial * info, unsigned int *value)
status = 0;
#endif
local_irq_restore(flags);
- put_user(status,value);
- return 0;
+ return put_user(status, value);
}
/*
@@ -999,27 +1001,18 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
send_break(info, arg ? arg*(100) : 250);
return 0;
case TIOCGSERIAL:
- if (access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(struct serial_struct)))
- return get_serial_info(info,
- (struct serial_struct *) arg);
- return -EFAULT;
+ return get_serial_info(info,
+ (struct serial_struct *) arg);
case TIOCSSERIAL:
return set_serial_info(info,
(struct serial_struct *) arg);
case TIOCSERGETLSR: /* Get line status register */
- if (access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(unsigned int)))
- return get_lsr_info(info, (unsigned int *) arg);
- return -EFAULT;
+ return get_lsr_info(info, (unsigned int *) arg);
case TIOCSERGSTRUCT:
- if (!access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(struct m68k_serial)))
+ if (copy_to_user((struct m68k_serial *) arg,
+ info, sizeof(struct m68k_serial)))
return -EFAULT;
- copy_to_user((struct m68k_serial *) arg,
- info, sizeof(struct m68k_serial));
return 0;
-
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c
index b745792ec25..eaafb98debe 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/serial/8250_early.c
@@ -203,13 +203,13 @@ static int __init parse_options(struct early_serial8250_device *device,
if (mmio || mmio32)
printk(KERN_INFO
- "Early serial console at MMIO%s 0x%llu (options '%s')\n",
+ "Early serial console at MMIO%s 0x%llx (options '%s')\n",
mmio32 ? "32" : "",
(unsigned long long)port->mapbase,
device->options);
else
printk(KERN_INFO
- "Early serial console at I/O port 0x%lu (options '%s')\n",
+ "Early serial console at I/O port 0x%lx (options '%s')\n",
port->iobase,
device->options);
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index e57fb3d228e..5318dd3774a 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
unsigned int sclk = get_sclk();
/* Set TCR1 and TCR2, TFSR is not enabled for uart */
- SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK));
+ SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
SPORT_PUT_TCR2(up, size + 1);
pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 659a695bdad..2af8fd11312 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -14,11 +14,10 @@
#include <linux/slab.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/nwpserial.h>
-#include <asm/prom.h>
-
struct of_serial_info {
int type;
int line;
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 7e5e5efea4e..cff9a306660 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -492,7 +492,7 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
sysrq_requested = 0;
if (ch && time_before(jiffies, sysrq_timeout)) {
spin_unlock_irqrestore(&port->sc_port.lock, flags);
- handle_sysrq(ch, NULL);
+ handle_sysrq(ch);
spin_lock_irqsave(&port->sc_port.lock, flags);
/* ignore actual sysrq command char */
continue;
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index 544f2e25d0e..6381a0282ee 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -55,7 +55,12 @@ EXPORT_SYMBOL(sunserial_unregister_minors);
int sunserial_console_match(struct console *con, struct device_node *dp,
struct uart_driver *drv, int line, bool ignore_line)
{
- if (!con || of_console_device != dp)
+ if (!con)
+ return 0;
+
+ drv->cons = con;
+
+ if (of_console_device != dp)
return 0;
if (!ignore_line) {
@@ -69,12 +74,10 @@ int sunserial_console_match(struct console *con, struct device_node *dp,
return 0;
}
- con->index = line;
- drv->cons = con;
-
- if (!console_set_on_cmdline)
+ if (!console_set_on_cmdline) {
+ con->index = line;
add_preferred_console(con->name, line, NULL);
-
+ }
return 1;
}
EXPORT_SYMBOL(sunserial_console_match);
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c
index 59be3efe063..052b3c7fa6a 100644
--- a/drivers/spi/coldfire_qspi.c
+++ b/drivers/spi/coldfire_qspi.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/io.h>
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4a7a7a7f11b..335311a98fd 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -113,8 +113,6 @@ source "drivers/staging/vme/Kconfig"
source "drivers/staging/memrar/Kconfig"
-source "drivers/staging/sep/Kconfig"
-
source "drivers/staging/iio/Kconfig"
source "drivers/staging/zram/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ca5c03eb3ce..e3f1e1b6095 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_FB_UDL) += udlfb/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
-obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c
index b4a8d5eb64f..05ca15a6c9f 100644
--- a/drivers/staging/batman-adv/bat_sysfs.c
+++ b/drivers/staging/batman-adv/bat_sysfs.c
@@ -267,6 +267,10 @@ static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
if (atomic_read(&bat_priv->log_level) == log_level_tmp)
return count;
+ bat_info(net_dev, "Changing log level from: %i to: %li\n",
+ atomic_read(&bat_priv->log_level),
+ log_level_tmp);
+
atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
return count;
}
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 92c216a5688..baa8b05b9e8 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -129,6 +129,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if)
static void update_mac_addresses(struct batman_if *batman_if)
{
+ if (!batman_if || !batman_if->packet_buff)
+ return;
+
addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
@@ -194,8 +197,6 @@ static void hardif_activate_interface(struct net_device *net_dev,
if (batman_if->if_status != IF_INACTIVE)
return;
- dev_hold(batman_if->net_dev);
-
update_mac_addresses(batman_if);
batman_if->if_status = IF_TO_BE_ACTIVATED;
@@ -222,8 +223,6 @@ static void hardif_deactivate_interface(struct net_device *net_dev,
(batman_if->if_status != IF_TO_BE_ACTIVATED))
return;
- dev_put(batman_if->net_dev);
-
batman_if->if_status = IF_INACTIVE;
bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev);
@@ -318,11 +317,13 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
if (ret != 1)
goto out;
+ dev_hold(net_dev);
+
batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
if (!batman_if) {
pr_err("Can't add interface (%s): out of memory\n",
net_dev->name);
- goto out;
+ goto release_dev;
}
batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
@@ -336,6 +337,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
batman_if->if_num = -1;
batman_if->net_dev = net_dev;
batman_if->if_status = IF_NOT_IN_USE;
+ batman_if->packet_buff = NULL;
INIT_LIST_HEAD(&batman_if->list);
check_known_mac_addr(batman_if->net_dev->dev_addr);
@@ -346,6 +348,8 @@ free_dev:
kfree(batman_if->dev);
free_if:
kfree(batman_if);
+release_dev:
+ dev_put(net_dev);
out:
return NULL;
}
@@ -374,6 +378,7 @@ static void hardif_remove_interface(struct batman_if *batman_if)
batman_if->if_status = IF_TO_BE_REMOVED;
list_del_rcu(&batman_if->list);
sysfs_del_hardif(&batman_if->hardif_obj);
+ dev_put(batman_if->net_dev);
call_rcu(&batman_if->rcu, hardif_free_interface);
}
@@ -393,15 +398,13 @@ static int hard_if_event(struct notifier_block *this,
/* FIXME: each batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device);
- if (!batman_if)
- batman_if = hardif_add_interface(net_dev);
+ if (!batman_if && event == NETDEV_REGISTER)
+ batman_if = hardif_add_interface(net_dev);
if (!batman_if)
goto out;
switch (event) {
- case NETDEV_REGISTER:
- break;
case NETDEV_UP:
hardif_activate_interface(soft_device, bat_priv, batman_if);
break;
@@ -442,8 +445,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct bat_priv *bat_priv = netdev_priv(soft_device);
struct batman_packet *batman_packet;
struct batman_if *batman_if;
- struct net_device_stats *stats;
- struct rtnl_link_stats64 temp;
int ret;
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -479,12 +480,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
if (batman_if->if_status != IF_ACTIVE)
goto err_free;
- stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp);
- if (stats) {
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- }
-
batman_packet = (struct batman_packet *)skb->data;
if (batman_packet->version != COMPAT_VERSION) {
diff --git a/drivers/staging/batman-adv/icmp_socket.c b/drivers/staging/batman-adv/icmp_socket.c
index fc3d32c1272..3ae7dd2d2d4 100644
--- a/drivers/staging/batman-adv/icmp_socket.c
+++ b/drivers/staging/batman-adv/icmp_socket.c
@@ -67,6 +67,7 @@ static int bat_socket_open(struct inode *inode, struct file *file)
INIT_LIST_HEAD(&socket_client->queue_list);
socket_client->queue_len = 0;
socket_client->index = i;
+ socket_client->bat_priv = inode->i_private;
spin_lock_init(&socket_client->lock);
init_waitqueue_head(&socket_client->queue_wait);
@@ -151,9 +152,8 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
static ssize_t bat_socket_write(struct file *file, const char __user *buff,
size_t len, loff_t *off)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct socket_client *socket_client = file->private_data;
+ struct bat_priv *bat_priv = socket_client->bat_priv;
struct icmp_packet_rr icmp_packet;
struct orig_node *orig_node;
struct batman_if *batman_if;
@@ -168,6 +168,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
return -EINVAL;
}
+ if (!bat_priv->primary_if)
+ return -EFAULT;
+
if (len >= sizeof(struct icmp_packet_rr))
packet_len = sizeof(struct icmp_packet_rr);
@@ -223,7 +226,8 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
if (batman_if->if_status != IF_ACTIVE)
goto dst_unreach;
- memcpy(icmp_packet.orig, batman_if->net_dev->dev_addr, ETH_ALEN);
+ memcpy(icmp_packet.orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
if (packet_len == sizeof(struct icmp_packet_rr))
memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN);
@@ -271,7 +275,7 @@ int bat_socket_setup(struct bat_priv *bat_priv)
goto err;
d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
- bat_priv->debug_dir, NULL, &fops);
+ bat_priv->debug_dir, bat_priv, &fops);
if (d)
goto err;
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c
index 2686019fe4e..ef7c20ae797 100644
--- a/drivers/staging/batman-adv/main.c
+++ b/drivers/staging/batman-adv/main.c
@@ -250,10 +250,13 @@ int choose_orig(void *data, int32_t size)
int is_my_mac(uint8_t *addr)
{
struct batman_if *batman_if;
+
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
- if ((batman_if->net_dev) &&
- (compare_orig(batman_if->net_dev->dev_addr, addr))) {
+ if (batman_if->if_status != IF_ACTIVE)
+ continue;
+
+ if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
rcu_read_unlock();
return 1;
}
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
index 28bb627ffa1..de5a8c1a810 100644
--- a/drivers/staging/batman-adv/originator.c
+++ b/drivers/staging/batman-adv/originator.c
@@ -391,11 +391,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
{
struct orig_node *orig_node;
+ unsigned long flags;
HASHIT(hashit);
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
@@ -404,11 +405,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
goto err;
}
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return 0;
err:
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return -ENOMEM;
}
@@ -468,12 +469,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
{
struct batman_if *batman_if_tmp;
struct orig_node *orig_node;
+ unsigned long flags;
HASHIT(hashit);
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
@@ -500,10 +502,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
rcu_read_unlock();
batman_if->if_num = -1;
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return 0;
err:
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return -ENOMEM;
}
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 066cc9149bf..032195e6de9 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -783,6 +783,8 @@ int recv_bat_packet(struct sk_buff *skb,
static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
@@ -801,6 +803,9 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
return NET_RX_DROP;
}
+ if (!bat_priv->primary_if)
+ return NET_RX_DROP;
+
/* answer echo request (ping) */
/* get routing information */
spin_lock_irqsave(&orig_hash_lock, flags);
@@ -830,7 +835,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
}
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
- memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
+ memcpy(icmp_packet->orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = ECHO_REPLY;
icmp_packet->ttl = TTL;
@@ -845,6 +851,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct icmp_packet *icmp_packet;
struct ethhdr *ethhdr;
@@ -865,6 +873,9 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
return NET_RX_DROP;
}
+ if (!bat_priv->primary_if)
+ return NET_RX_DROP;
+
/* get routing information */
spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
@@ -892,7 +903,8 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
}
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
- memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
+ memcpy(icmp_packet->orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = TTL_EXCEEDED;
icmp_packet->ttl = TTL;
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
index 21d0717afb0..9aa9d369c75 100644
--- a/drivers/staging/batman-adv/types.h
+++ b/drivers/staging/batman-adv/types.h
@@ -126,6 +126,7 @@ struct socket_client {
unsigned char index;
spinlock_t lock;
wait_queue_head_t queue_wait;
+ struct bat_priv *bat_priv;
};
struct socket_packet {
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index c6aa52f8dce..48d9fb1227d 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -222,7 +222,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |=
pcmcia_io_cfg_data_width(io->flags);
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
p_dev->resource[0]->start = io->win[0].base;
p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 56e11575c97..64a01147eca 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -327,6 +327,9 @@ static const struct net_device_ops device_ops = {
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
.ndo_set_multicast_list = netvsc_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
};
static int netvsc_probe(struct device *device)
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c
index 17bc7626f70..d78c569ac94 100644
--- a/drivers/staging/hv/ring_buffer.c
+++ b/drivers/staging/hv/ring_buffer.c
@@ -193,8 +193,7 @@ Description:
static inline u64
GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo)
{
- return ((u64)RingInfo->RingBuffer->WriteIndex << 32)
- || RingInfo->RingBuffer->ReadIndex;
+ return (u64)RingInfo->RingBuffer->WriteIndex << 32;
}
diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h
index 0063bde9a4b..8505a1c5f9e 100644
--- a/drivers/staging/hv/storvsc_api.h
+++ b/drivers/staging/hv/storvsc_api.h
@@ -28,10 +28,10 @@
#include "vmbus_api.h"
/* Defines */
-#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE)
+#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
-#define STORVSC_MAX_IO_REQUESTS 64
+#define STORVSC_MAX_IO_REQUESTS 128
/*
* In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index 075b61bd492..62882a437aa 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -495,7 +495,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
/* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
- if (j == 0)
+ if (bounce_addr == 0)
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
while (srclen) {
@@ -556,7 +556,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
destlen = orig_sgl[i].length;
/* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
- if (j == 0)
+ if (bounce_addr == 0)
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
while (destlen) {
@@ -615,6 +615,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
unsigned int request_size = 0;
int i;
struct scatterlist *sgl;
+ unsigned int sg_count = 0;
DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
"queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
@@ -697,6 +698,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
request->DataBuffer.Length = scsi_bufflen(scmnd);
if (scsi_sg_count(scmnd)) {
sgl = (struct scatterlist *)scsi_sglist(scmnd);
+ sg_count = scsi_sg_count(scmnd);
/* check if we need to bounce the sgl */
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
@@ -731,15 +733,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
scsi_sg_count(scmnd));
sgl = cmd_request->bounce_sgl;
+ sg_count = cmd_request->bounce_sgl_count;
}
request->DataBuffer.Offset = sgl[0].offset;
- for (i = 0; i < scsi_sg_count(scmnd); i++) {
+ for (i = 0; i < sg_count; i++) {
DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
i, sgl[i].length, sgl[i].offset);
request->DataBuffer.PfnArray[i] =
- page_to_pfn(sg_page((&sgl[i])));
+ page_to_pfn(sg_page((&sgl[i])));
}
} else if (scsi_sglist(scmnd)) {
/* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 638ad6b3589..9493128e5fd 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,6 +1,6 @@
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
- depends on CPU_CAVIUM_OCTEON
+ depends on CPU_CAVIUM_OCTEON && NETDEVICES
select PHYLIB
select MDIO_OCTEON
help
diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c
index cdc4dd50d63..8ec83d2dffb 100644
--- a/drivers/staging/pohmelfs/path_entry.c
+++ b/drivers/staging/pohmelfs/path_entry.c
@@ -44,9 +44,9 @@ int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int le
return -ENOENT;
}
- read_lock(&current->fs->lock);
+ spin_lock(&current->fs->lock);
path.mnt = mntget(current->fs->root.mnt);
- read_unlock(&current->fs->lock);
+ spin_unlock(&current->fs->lock);
path.dentry = d;
@@ -91,9 +91,9 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi)
return -ENOENT;
}
- read_lock(&current->fs->lock);
+ spin_lock(&current->fs->lock);
root = dget(current->fs->root.dentry);
- read_unlock(&current->fs->lock);
+ spin_unlock(&current->fs->lock);
spin_lock(&dcache_lock);
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index a0fe31de0a6..ebf9074a908 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */
{USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */
{USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */
+ {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom 2770 */
{USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */
{USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */
{USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */
@@ -95,7 +96,8 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x050d, 0x815c)},
{USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */
{USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */
- {USB_DEVICE(0x04E8, 0x2018)}, /* samsung */
+ {USB_DEVICE(0x04E8, 0x2018)}, /* samsung linkstick2 */
+ {USB_DEVICE(0x1690, 0x0740)}, /* Askey */
{USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */
{USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
{USB_DEVICE(0x7392, 0x7718)},
@@ -105,21 +107,34 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
{USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
{USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */
+ {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */
#endif /* RT2870 // */
#ifdef RT3070
{USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */
{USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */
{USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */
{USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x871C)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x822C)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x871B)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x822B)}, /* Ralink 3070 */
{USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */
{USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */
+ {USB_DEVICE(0x0DF6, 0x0048)}, /* Sitecom 3070 */
+ {USB_DEVICE(0x0DF6, 0x0047)}, /* Sitecom 3071 */
{USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */
{USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */
{USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */
+ {USB_DEVICE(0x083A, 0xA701)}, /* SMC 3070 */
+ {USB_DEVICE(0x083A, 0xA702)}, /* SMC 3072 */
{USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */
{USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */
{USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */
+ {USB_DEVICE(0x1740, 0x9707)}, /* EnGenius 3070 */
+ {USB_DEVICE(0x1740, 0x9708)}, /* EnGenius 3071 */
+ {USB_DEVICE(0x1740, 0x9709)}, /* EnGenius 3072 */
{USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */
+ {USB_DEVICE(0x13D3, 0x3305)}, /* AzureWave 3070*/
{USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */
{USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */
{USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */
@@ -132,14 +147,36 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */
{USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */
{USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */
+ {USB_DEVICE(0x07D1, 0x3C16)}, /* D-Link 3070 */
+ {USB_DEVICE(0x07D1, 0x3C17)}, /* D-Link 8070 */
{USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */
{USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */
{USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */
{USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */
{USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */
+ {USB_DEVICE(0x04BB, 0x0947)}, /* I-O DATA 3070 */
+ {USB_DEVICE(0x04BB, 0x0948)}, /* I-O DATA 3072 */
{USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */
+ {USB_DEVICE(0x20B8, 0x8888)}, /* PARA INDUSTRIAL 3070 */
+ {USB_DEVICE(0x0B05, 0x1784)}, /* Asus 3072 */
+ {USB_DEVICE(0x203D, 0x14A9)}, /* Encore 3070*/
+ {USB_DEVICE(0x0DB0, 0x899A)}, /* MSI 3070*/
+ {USB_DEVICE(0x0DB0, 0x3870)}, /* MSI 3070*/
+ {USB_DEVICE(0x0DB0, 0x870A)}, /* MSI 3070*/
+ {USB_DEVICE(0x0DB0, 0x6899)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x3822)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x3871)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x871A)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x822A)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x3821)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x821A)}, /* Ralink 3070 */
+ {USB_DEVICE(0x083A, 0xA703)}, /* IO-MAGIC */
+ {USB_DEVICE(0x13D3, 0x3307)}, /* Azurewave */
+ {USB_DEVICE(0x13D3, 0x3321)}, /* Azurewave */
+ {USB_DEVICE(0x07FA, 0x7712)}, /* Edimax */
+ {USB_DEVICE(0x0789, 0x0166)}, /* Edimax */
+ {USB_DEVICE(0x148F, 0x2070)}, /* Edimax */
#endif /* RT3070 // */
- {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom WL-608 */
{USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */
{USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */
{USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
deleted file mode 100644
index 0a9c39c7f2b..00000000000
--- a/drivers/staging/sep/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config DX_SEP
- tristate "Discretix SEP driver"
-# depends on MRST
- depends on RAR_REGISTER && PCI
- default y
- help
- Discretix SEP driver
-
- If unsure say M. The compiled module will be
- called sep_driver.ko
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
deleted file mode 100644
index 628d5f91941..00000000000
--- a/drivers/staging/sep/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_DX_SEP) := sep_driver.o
-
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
deleted file mode 100644
index ff0e931dab6..00000000000
--- a/drivers/staging/sep/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-Todo's so far (from Alan Cox)
-- Fix firmware loading
-- Get firmware into firmware git tree
-- Review and tidy each algorithm function
-- Check whether it can be plugged into any of the kernel crypto API
- interfaces
-- Do something about the magic shared memory interface and replace it
- with something saner (in Linux terms)
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
deleted file mode 100644
index 9200524bb64..00000000000
--- a/drivers/staging/sep/sep_dev.h
+++ /dev/null
@@ -1,110 +0,0 @@
-#ifndef __SEP_DEV_H__
-#define __SEP_DEV_H__
-
-/*
- *
- * sep_dev.h - Security Processor Device Structures
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Alan Cox alan@linux.intel.com
- *
- */
-
-struct sep_device {
- /* pointer to pci dev */
- struct pci_dev *pdev;
-
- unsigned long in_use;
-
- /* address of the shared memory allocated during init for SEP driver
- (coherent alloc) */
- void *shared_addr;
- /* the physical address of the shared area */
- dma_addr_t shared_bus;
-
- /* restricted access region (coherent alloc) */
- dma_addr_t rar_bus;
- void *rar_addr;
- /* firmware regions: cache is at rar_addr */
- unsigned long cache_size;
-
- /* follows the cache */
- dma_addr_t resident_bus;
- unsigned long resident_size;
- void *resident_addr;
-
- /* start address of the access to the SEP registers from driver */
- void __iomem *reg_addr;
- /* transaction counter that coordinates the transactions between SEP and HOST */
- unsigned long send_ct;
- /* counter for the messages from sep */
- unsigned long reply_ct;
- /* counter for the number of bytes allocated in the pool for the current
- transaction */
- unsigned long data_pool_bytes_allocated;
-
- /* array of pointers to the pages that represent input data for the synchronic
- DMA action */
- struct page **in_page_array;
-
- /* array of pointers to the pages that represent out data for the synchronic
- DMA action */
- struct page **out_page_array;
-
- /* number of pages in the sep_in_page_array */
- unsigned long in_num_pages;
-
- /* number of pages in the sep_out_page_array */
- unsigned long out_num_pages;
-
- /* global data for every flow */
- struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS];
-
- /* pointer to the workqueue that handles the flow done interrupts */
- struct workqueue_struct *flow_wq;
-
-};
-
-static struct sep_device *sep_dev;
-
-static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
-{
- void __iomem *addr = dev->reg_addr + reg;
- writel(value, addr);
-}
-
-static inline u32 sep_read_reg(struct sep_device *dev, int reg)
-{
- void __iomem *addr = dev->reg_addr + reg;
- return readl(addr);
-}
-
-/* wait for SRAM write complete(indirect write */
-static inline void sep_wait_sram_write(struct sep_device *dev)
-{
- u32 reg_val;
- do
- reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
- while (!(reg_val & 1));
-}
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index ecbde3467b1..00000000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2742 +0,0 @@
-/*
- *
- * sep_driver.c - Security Processor Driver main group of functions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <asm/ioctl.h>
-#include <linux/ioport.h>
-#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include "sep_driver_hw_defs.h"
-#include "sep_driver_config.h"
-#include "sep_driver_api.h"
-#include "sep_dev.h"
-
-#if SEP_DRIVER_ARM_DEBUG_MODE
-
-#define CRYS_SEP_ROM_length 0x4000
-#define CRYS_SEP_ROM_start_address 0x8000C000UL
-#define CRYS_SEP_ROM_start_address_offset 0xC000UL
-#define SEP_ROM_BANK_register 0x80008420UL
-#define SEP_ROM_BANK_register_offset 0x8420UL
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
-
-/*
- * THESE 2 definitions are specific to the board - must be
- * defined during integration
- */
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
-
-/* 2M size */
-
-static void sep_load_rom_code(struct sep_device *sep)
-{
- /* Index variables */
- unsigned long i, k, j;
- u32 reg;
- u32 error;
- u32 warning;
-
- /* Loading ROM from SEP_ROM_image.h file */
- k = sizeof(CRYS_SEP_ROM);
-
- edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
-
- edbg("SEP Driver: k is %lu\n", k);
- edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
- edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
-
- for (i = 0; i < 4; i++) {
- /* write bank */
- sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
-
- for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
- sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
-
- k = k - 4;
-
- if (k == 0) {
- j = CRYS_SEP_ROM_length;
- i = 4;
- }
- }
- }
-
- /* reset the SEP */
- sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
-
- /* poll for SEP ROM boot finish */
- do
- reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!reg);
-
- edbg("SEP Driver: ROM polling ended\n");
-
- switch (reg) {
- case 0x1:
- /* fatal error - read erro status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- edbg("SEP Driver: ROM polling case 1\n");
- break;
- case 0x4:
- /* Cold boot ended successfully */
- case 0x8:
- /* Warmboot ended successfully */
- case 0x10:
- /* ColdWarm boot ended successfully */
- error = 0;
- case 0x2:
- /* Boot First Phase ended */
- warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- case 0x20:
- edbg("SEP Driver: ROM polling case %d\n", reg);
- break;
- }
-
-}
-
-#else
-static void sep_load_rom_code(struct sep_device *sep) { }
-#endif /* SEP_DRIVER_ARM_DEBUG_MODE */
-
-
-
-/*----------------------------------------
- DEFINES
------------------------------------------*/
-
-#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
-#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
-
-/*--------------------------------------------
- GLOBAL variables
---------------------------------------------*/
-
-/* debug messages level */
-static int debug;
-module_param(debug, int , 0);
-MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
-
-/* Keep this a single static object for now to keep the conversion easy */
-
-static struct sep_device sep_instance;
-static struct sep_device *sep_dev = &sep_instance;
-
-/*
- mutex for the access to the internals of the sep driver
-*/
-static DEFINE_MUTEX(sep_mutex);
-
-
-/* wait queue head (event) of the driver */
-static DECLARE_WAIT_QUEUE_HEAD(sep_event);
-
-/**
- * sep_load_firmware - copy firmware cache/resident
- * @sep: device we are loading
- *
- * This functions copies the cache and resident from their source
- * location into destination shared memory.
- */
-
-static int sep_load_firmware(struct sep_device *sep)
-{
- const struct firmware *fw;
- char *cache_name = "sep/cache.image.bin";
- char *res_name = "sep/resident.image.bin";
- int error;
-
- edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
- edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
-
- /* load cache */
- error = request_firmware(&fw, cache_name, &sep->pdev->dev);
- if (error) {
- edbg("SEP Driver:cant request cache fw\n");
- return error;
- }
- edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
-
- memcpy(sep->rar_addr, (void *)fw->data, fw->size);
- sep->cache_size = fw->size;
- release_firmware(fw);
-
- sep->resident_bus = sep->rar_bus + sep->cache_size;
- sep->resident_addr = sep->rar_addr + sep->cache_size;
-
- /* load resident */
- error = request_firmware(&fw, res_name, &sep->pdev->dev);
- if (error) {
- edbg("SEP Driver:cant request res fw\n");
- return error;
- }
- edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
-
- memcpy(sep->resident_addr, (void *) fw->data, fw->size);
- sep->resident_size = fw->size;
- release_firmware(fw);
-
- edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
- sep->resident_addr, (unsigned long long)sep->resident_bus,
- sep->rar_addr, (unsigned long long)sep->rar_bus);
- return 0;
-}
-
-MODULE_FIRMWARE("sep/cache.image.bin");
-MODULE_FIRMWARE("sep/resident.image.bin");
-
-/**
- * sep_map_and_alloc_shared_area - allocate shared block
- * @sep: security processor
- * @size: size of shared area
- *
- * Allocate a shared buffer in host memory that can be used by both the
- * kernel and also the hardware interface via DMA.
- */
-
-static int sep_map_and_alloc_shared_area(struct sep_device *sep,
- unsigned long size)
-{
- /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
- sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
- &sep->shared_bus, GFP_KERNEL);
-
- if (!sep->shared_addr) {
- edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
- return -ENOMEM;
- }
- /* set the bus address of the shared area */
- edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
- size, sep->shared_addr, (unsigned long long)sep->shared_bus);
- return 0;
-}
-
-/**
- * sep_unmap_and_free_shared_area - free shared block
- * @sep: security processor
- *
- * Free the shared area allocated to the security processor. The
- * processor must have finished with this and any final posted
- * writes cleared before we do so.
- */
-static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
-{
- dma_free_coherent(&sep->pdev->dev, size,
- sep->shared_addr, sep->shared_bus);
-}
-
-/**
- * sep_shared_virt_to_bus - convert bus/virt addresses
- *
- * Returns the bus address inside the shared area according
- * to the virtual address.
- */
-
-static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
- void *virt_address)
-{
- dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
- edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
- virt_address);
- return pa;
-}
-
-/**
- * sep_shared_bus_to_virt - convert bus/virt addresses
- *
- * Returns virtual address inside the shared area according
- * to the bus address.
- */
-
-static void *sep_shared_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- return sep->shared_addr + (bus_address - sep->shared_bus);
-}
-
-
-/**
- * sep_try_open - attempt to open a SEP device
- * @sep: device to attempt to open
- *
- * Atomically attempt to get ownership of a SEP device.
- * Returns 1 if the device was opened, 0 on failure.
- */
-
-static int sep_try_open(struct sep_device *sep)
-{
- if (!test_and_set_bit(0, &sep->in_use))
- return 1;
- return 0;
-}
-
-/**
- * sep_open - device open method
- * @inode: inode of sep device
- * @filp: file handle to sep device
- *
- * Open method for the SEP device. Called when userspace opens
- * the SEP device node. Must also release the memory data pool
- * allocations.
- *
- * Returns zero on success otherwise an error code.
- */
-
-static int sep_open(struct inode *inode, struct file *filp)
-{
- if (sep_dev == NULL)
- return -ENODEV;
-
- /* check the blocking mode */
- if (filp->f_flags & O_NDELAY) {
- if (sep_try_open(sep_dev) == 0)
- return -EAGAIN;
- } else
- if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
- return -EINTR;
-
- /* Bind to the device, we only have one which makes it easy */
- filp->private_data = sep_dev;
- /* release data pool allocations */
- sep_dev->data_pool_bytes_allocated = 0;
- return 0;
-}
-
-
-/**
- * sep_release - close a SEP device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device. As the open protects against
- * multiple simultaenous opens that means this method is called when the
- * final reference to the open handle is dropped.
- */
-
-static int sep_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-#if 0 /*!SEP_DRIVER_POLLING_MODE */
- /* close IMR */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
- /* release IRQ line */
- free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
-#endif
- /* Ensure any blocked open progresses */
- clear_bit(0, &sep->in_use);
- wake_up(&sep_event);
- return 0;
-}
-
-/*---------------------------------------------------------------
- map function - this functions maps the message shared area
------------------------------------------------------------------*/
-static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- dma_addr_t bus_addr;
- struct sep_device *sep = filp->private_data;
-
- dbg("-------->SEP Driver: mmap start\n");
-
- /* check that the size of the mapped range is as the size of the message
- shared area */
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- edbg("SEP Driver mmap requested size is more than allowed\n");
- printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
- printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
- printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
- return -EAGAIN;
- }
-
- edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
- /* get bus address */
- bus_addr = sep->shared_bus;
-
- edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
-
- if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
- edbg("SEP Driver remap_page_range failed\n");
- printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
- return -EAGAIN;
- }
-
- dbg("SEP Driver:<-------- mmap end\n");
-
- return 0;
-}
-
-
-/*-----------------------------------------------
- poll function
-*----------------------------------------------*/
-static unsigned int sep_poll(struct file *filp, poll_table * wait)
-{
- unsigned long count;
- unsigned int mask = 0;
- unsigned long retval = 0; /* flow id */
- struct sep_device *sep = filp->private_data;
-
- dbg("---------->SEP Driver poll: start\n");
-
-
-#if SEP_DRIVER_POLLING_MODE
-
- while (sep->send_ct != (retval & 0x7FFFFFFF)) {
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
-
- for (count = 0; count < 10 * 4; count += 4)
- edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
- }
-
- sep->reply_ct++;
-#else
- /* add the event to the polling wait table */
- poll_wait(filp, &sep_event, wait);
-
-#endif
-
- edbg("sep->send_ct is %lu\n", sep->send_ct);
- edbg("sep->reply_ct is %lu\n", sep->reply_ct);
-
- /* check if the data is ready */
- if (sep->send_ct == sep->reply_ct) {
- for (count = 0; count < 12 * 4; count += 4)
- edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
-
- for (count = 0; count < 10 * 4; count += 4)
- edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
-
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- edbg("retval is %lu\n", retval);
- /* check if the this is sep reply or request */
- if (retval >> 31) {
- edbg("SEP Driver: sep request in\n");
- /* request */
- mask |= POLLOUT | POLLWRNORM;
- } else {
- edbg("SEP Driver: sep reply in\n");
- mask |= POLLIN | POLLRDNORM;
- }
- }
- dbg("SEP Driver:<-------- poll exit\n");
- return mask;
-}
-
-/**
- * sep_time_address - address in SEP memory of time
- * @sep: SEP device we want the address from
- *
- * Return the address of the two dwords in memory used for time
- * setting.
- */
-
-static u32 *sep_time_address(struct sep_device *sep)
-{
- return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
-}
-
-/**
- * sep_set_time - set the SEP time
- * @sep: the SEP we are setting the time for
- *
- * Calculates time and sets it at the predefined address.
- * Called with the sep mutex held.
- */
-static unsigned long sep_set_time(struct sep_device *sep)
-{
- struct timeval time;
- u32 *time_addr; /* address of time as seen by the kernel */
-
-
- dbg("sep:sep_set_time start\n");
-
- do_gettimeofday(&time);
-
- /* set value in the SYSTEM MEMORY offset */
- time_addr = sep_time_address(sep);
-
- time_addr[0] = SEP_TIME_VAL_TOKEN;
- time_addr[1] = time.tv_sec;
-
- edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
- edbg("SEP Driver:time_addr is %p\n", time_addr);
- edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
- return time.tv_sec;
-}
-
-/**
- * sep_dump_message - dump the message that is pending
- * @sep: sep device
- *
- * Dump out the message pending in the shared message area
- */
-
-static void sep_dump_message(struct sep_device *sep)
-{
- int count;
- for (count = 0; count < 12 * 4; count += 4)
- edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
-}
-
-/**
- * sep_send_command_handler - kick off a command
- * @sep: sep being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- */
-
-static void sep_send_command_handler(struct sep_device *sep)
-{
- dbg("sep:sep_send_command_handler start\n");
-
- mutex_lock(&sep_mutex);
- sep_set_time(sep);
-
- /* FIXME: flush cache */
- flush_cache_all();
-
- sep_dump_message(sep);
- /* update counter */
- sep->send_ct++;
- /* send interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
- dbg("SEP Driver:<-------- sep_send_command_handler end\n");
- mutex_unlock(&sep_mutex);
- return;
-}
-
-/**
- * sep_send_reply_command_handler - kick off a command reply
- * @sep: sep being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- */
-
-static void sep_send_reply_command_handler(struct sep_device *sep)
-{
- dbg("sep:sep_send_reply_command_handler start\n");
-
- /* flash cache */
- flush_cache_all();
-
- sep_dump_message(sep);
-
- mutex_lock(&sep_mutex);
- sep->send_ct++; /* update counter */
- /* send the interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
- /* update both counters */
- sep->send_ct++;
- sep->reply_ct++;
- mutex_unlock(&sep_mutex);
- dbg("sep: sep_send_reply_command_handler end\n");
-}
-
-/*
- This function handles the allocate data pool memory request
- This function returns calculates the bus address of the
- allocated memory, and the offset of this area from the mapped address.
- Therefore, the FVOs in user space can calculate the exact virtual
- address of this allocated memory
-*/
-static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error;
- struct sep_driver_alloc_t command_args;
-
- dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* allocate memory */
- if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- /* set the virtual and bus address */
- command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
- command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
-
- /* write the memory back to the user space */
- error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* set the allocation */
- sep->data_pool_bytes_allocated += command_args.num_bytes;
-
-end_function:
- dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
- return error;
-}
-
-/*
- This function handles write into allocated data pool command
-*/
-static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- void *virt_address;
- unsigned long va;
- unsigned long app_in_address;
- unsigned long num_bytes;
- void *data_pool_area_addr;
-
- dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
-
- /* get the application address */
- error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
- if (error)
- goto end_function;
-
- /* get the virtual kernel address address */
- error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
- if (error)
- goto end_function;
- virt_address = (void *)va;
-
- /* get the number of bytes */
- error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
- if (error)
- goto end_function;
-
- /* calculate the start of the data pool */
- data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
-
- /* check that the range of the virtual kernel address is correct */
- if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
- error = -EINVAL;
- goto end_function;
- }
- /* copy the application data */
- error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
- return error;
-}
-
-/*
- this function handles the read from data pool command
-*/
-static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- /* virtual address of dest application buffer */
- unsigned long app_out_address;
- /* virtual address of the data pool */
- unsigned long va;
- void *virt_address;
- unsigned long num_bytes;
- void *data_pool_area_addr;
-
- dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
-
- /* get the application address */
- error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
- if (error)
- goto end_function;
-
- /* get the virtual kernel address address */
- error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
- if (error)
- goto end_function;
- virt_address = (void *)va;
-
- /* get the number of bytes */
- error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
- if (error)
- goto end_function;
-
- /* calculate the start of the data pool */
- data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
- /* FIXME: These are incomplete all over the driver: what about + len
- and when doing that also overflows */
- /* check that the range of the virtual kernel address is correct */
- if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -EINVAL;
- goto end_function;
- }
-
- /* copy the application data */
- error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
- return error;
-}
-
-/*
- This function releases all the application virtual buffer physical pages,
- that were previously locked
-*/
-static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
-{
- unsigned long count;
-
- if (dirtyFlag) {
- for (count = 0; count < num_pages; count++) {
- /* the out array was written, therefore the data was changed */
- if (!PageReserved(page_array_ptr[count]))
- SetPageDirty(page_array_ptr[count]);
- page_cache_release(page_array_ptr[count]);
- }
- } else {
- /* free in pages - the data was only read, therefore no update was done
- on those pages */
- for (count = 0; count < num_pages; count++)
- page_cache_release(page_array_ptr[count]);
- }
-
- if (page_array_ptr)
- /* free the array */
- kfree(page_array_ptr);
-
- return 0;
-}
-
-/*
- This function locks all the physical pages of the kernel virtual buffer
- and construct a basic lli array, where each entry holds the physical
- page address and the size that application data holds in this physical pages
-*/
-static int sep_lock_kernel_pages(struct sep_device *sep,
- unsigned long kernel_virt_addr,
- unsigned long data_size,
- unsigned long *num_pages_ptr,
- struct sep_lli_entry_t **lli_array_ptr,
- struct page ***page_array_ptr)
-{
- int error = 0;
- /* the the page of the end address of the user space buffer */
- unsigned long end_page;
- /* the page of the start address of the user space buffer */
- unsigned long start_page;
- /* the range in pages */
- unsigned long num_pages;
- struct sep_lli_entry_t *lli_array;
- /* next kernel address to map */
- unsigned long next_kernel_address;
- unsigned long count;
-
- dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
-
- /* set start and end pages and num pages */
- end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = kernel_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
- edbg("SEP Driver: data_size is %lu\n", data_size);
- edbg("SEP Driver: start_page is %lx\n", start_page);
- edbg("SEP Driver: end_page is %lx\n", end_page);
- edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
- if (!lli_array) {
- edbg("SEP Driver: kmalloc for lli_array failed\n");
- error = -ENOMEM;
- goto end_function;
- }
-
- /* set the start address of the first page - app data may start not at
- the beginning of the page */
- lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
-
- /* check that not all the data is in the first page only */
- if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
-
- /* debug print */
- dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
- /* advance the address to the start of the next page */
- next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
-
- /* go from the second page to the prev before last */
- for (count = 1; count < (num_pages - 1); count++) {
- lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
- lli_array[count].block_size = PAGE_SIZE;
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- next_kernel_address += PAGE_SIZE;
- }
-
- /* if more then 1 pages locked - then update for the last page size needed */
- if (num_pages > 1) {
- /* update the address of the last page */
- lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
-
- /* set the size of the last page */
- lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
-
- if (lli_array[count].block_size == 0) {
- dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
- dbg("data_size is %lu\n", data_size);
- while (1);
- }
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- }
- /* set output params */
- *lli_array_ptr = lli_array;
- *num_pages_ptr = num_pages;
- *page_array_ptr = 0;
-end_function:
- dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
- return 0;
-}
-
-/*
- This function locks all the physical pages of the application virtual buffer
- and construct a basic lli array, where each entry holds the physical page
- address and the size that application data holds in this physical pages
-*/
-static int sep_lock_user_pages(struct sep_device *sep,
- unsigned long app_virt_addr,
- unsigned long data_size,
- unsigned long *num_pages_ptr,
- struct sep_lli_entry_t **lli_array_ptr,
- struct page ***page_array_ptr)
-{
- int error = 0;
- /* the the page of the end address of the user space buffer */
- unsigned long end_page;
- /* the page of the start address of the user space buffer */
- unsigned long start_page;
- /* the range in pages */
- unsigned long num_pages;
- struct page **page_array;
- struct sep_lli_entry_t *lli_array;
- unsigned long count;
- int result;
-
- dbg("SEP Driver:--------> sep_lock_user_pages start\n");
-
- /* set start and end pages and num pages */
- end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = app_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
- edbg("SEP Driver: data_size is %lu\n", data_size);
- edbg("SEP Driver: start_page is %lu\n", start_page);
- edbg("SEP Driver: end_page is %lu\n", end_page);
- edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
- /* allocate array of pages structure pointers */
- page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
- if (!page_array) {
- edbg("SEP Driver: kmalloc for page_array failed\n");
-
- error = -ENOMEM;
- goto end_function;
- }
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
- if (!lli_array) {
- edbg("SEP Driver: kmalloc for lli_array failed\n");
-
- error = -ENOMEM;
- goto end_function_with_error1;
- }
-
- /* convert the application virtual address into a set of physical */
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
- up_read(&current->mm->mmap_sem);
-
- /* check the number of pages locked - if not all then exit with error */
- if (result != num_pages) {
- dbg("SEP Driver: not all pages locked by get_user_pages\n");
-
- error = -ENOMEM;
- goto end_function_with_error2;
- }
-
- /* flush the cache */
- for (count = 0; count < num_pages; count++)
- flush_dcache_page(page_array[count]);
-
- /* set the start address of the first page - app data may start not at
- the beginning of the page */
- lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
-
- /* check that not all the data is in the first page only */
- if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
-
- /* debug print */
- dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
- /* go from the second page to the prev before last */
- for (count = 1; count < (num_pages - 1); count++) {
- lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
- lli_array[count].block_size = PAGE_SIZE;
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- }
-
- /* if more then 1 pages locked - then update for the last page size needed */
- if (num_pages > 1) {
- /* update the address of the last page */
- lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
-
- /* set the size of the last page */
- lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
-
- if (lli_array[count].block_size == 0) {
- dbg("app_virt_addr is %08lx\n", app_virt_addr);
- dbg("data_size is %lu\n", data_size);
- while (1);
- }
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
- count, lli_array[count].physical_address,
- count, lli_array[count].block_size);
- }
-
- /* set output params */
- *lli_array_ptr = lli_array;
- *num_pages_ptr = num_pages;
- *page_array_ptr = page_array;
- goto end_function;
-
-end_function_with_error2:
- /* release the cache */
- for (count = 0; count < num_pages; count++)
- page_cache_release(page_array[count]);
- kfree(lli_array);
-end_function_with_error1:
- kfree(page_array);
-end_function:
- dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
- return 0;
-}
-
-
-/*
- this function calculates the size of data that can be inserted into the lli
- table from this array the condition is that either the table is full
- (all etnries are entered), or there are no more entries in the lli array
-*/
-static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
-{
- unsigned long table_data_size = 0;
- unsigned long counter;
-
- /* calculate the data in the out lli table if till we fill the whole
- table or till the data has ended */
- for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
- table_data_size += lli_in_array_ptr[counter].block_size;
- return table_data_size;
-}
-
-/*
- this functions builds ont lli table from the lli_array according to
- the given size of data
-*/
-static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
-{
- unsigned long curr_table_data_size;
- /* counter of lli array entry */
- unsigned long array_counter;
-
- dbg("SEP Driver:--------> sep_build_lli_table start\n");
-
- /* init currrent table data size and lli array entry counter */
- curr_table_data_size = 0;
- array_counter = 0;
- *num_table_entries_ptr = 1;
-
- edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
- /* fill the table till table size reaches the needed amount */
- while (curr_table_data_size < table_data_size) {
- /* update the number of entries in table */
- (*num_table_entries_ptr)++;
-
- lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
- lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
- curr_table_data_size += lli_table_ptr->block_size;
-
- edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* check for overflow of the table data */
- if (curr_table_data_size > table_data_size) {
- edbg("SEP Driver:curr_table_data_size > table_data_size\n");
-
- /* update the size of block in the table */
- lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
-
- /* update the physical address in the lli array */
- lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
-
- /* update the block size left in the lli array */
- lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
- } else
- /* advance to the next entry in the lli_array */
- array_counter++;
-
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* move to the next entry in table */
- lli_table_ptr++;
- }
-
- /* set the info entry to default */
- lli_table_ptr->physical_address = 0xffffffff;
- lli_table_ptr->block_size = 0;
-
- edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* set the output parameter */
- *num_processed_entries_ptr += array_counter;
-
- edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
- dbg("SEP Driver:<-------- sep_build_lli_table end\n");
- return;
-}
-
-/*
- this function goes over the list of the print created tables and
- prints all the data
-*/
-static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
-{
- unsigned long table_count;
- unsigned long entries_count;
-
- dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
-
- table_count = 1;
- while ((unsigned long) lli_table_ptr != 0xffffffff) {
- edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
- edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
-
- /* print entries of the table (without info entry) */
- for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
- edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
- }
-
- /* point to the info entry */
- lli_table_ptr--;
-
- edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
- edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-
-
- table_data_size = lli_table_ptr->block_size & 0xffffff;
- num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
- lli_table_ptr = (struct sep_lli_entry_t *)
- (lli_table_ptr->physical_address);
-
- edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
-
- if ((unsigned long) lli_table_ptr != 0xffffffff)
- lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
-
- table_count++;
- }
- dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
-}
-
-
-/*
- This function prepares only input DMA table for synhronic symmetric
- operations (HASH)
-*/
-static int sep_prepare_input_dma_table(struct sep_device *sep,
- unsigned long app_virt_addr,
- unsigned long data_size,
- unsigned long block_size,
- unsigned long *lli_table_ptr,
- unsigned long *num_entries_ptr,
- unsigned long *table_data_size_ptr,
- bool isKernelVirtualAddress)
-{
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_entry_ptr;
- /* array of pointers ot page */
- struct sep_lli_entry_t *lli_array_ptr;
- /* points to the first entry to be processed in the lli_in_array */
- unsigned long current_entry;
- /* num entries in the virtual buffer */
- unsigned long sep_lli_entries;
- /* lli table pointer */
- struct sep_lli_entry_t *in_lli_table_ptr;
- /* the total data in one table */
- unsigned long table_data_size;
- /* number of entries in lli table */
- unsigned long num_entries_in_table;
- /* next table address */
- void *lli_table_alloc_addr;
- unsigned long result;
-
- dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
-
- edbg("SEP Driver:data_size is %lu\n", data_size);
- edbg("SEP Driver:block_size is %lu\n", block_size);
-
- /* initialize the pages pointers */
- sep->in_page_array = 0;
- sep->in_num_pages = 0;
-
- if (data_size == 0) {
- /* special case - created 2 entries table with zero data */
- in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
- /* FIXME: Should the entry below not be for _bus */
- in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
- in_lli_table_ptr->block_size = 0;
-
- in_lli_table_ptr++;
- in_lli_table_ptr->physical_address = 0xFFFFFFFF;
- in_lli_table_ptr->block_size = 0;
-
- *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
- *num_entries_ptr = 2;
- *table_data_size_ptr = 0;
-
- goto end_function;
- }
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true)
- /* lock the pages of the kernel buffer and translate them to pages */
- result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
- else
- /* lock the pages of the user buffer and translate them to pages */
- result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
-
- if (result)
- return result;
-
- edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
-
- current_entry = 0;
- info_entry_ptr = 0;
- sep_lli_entries = sep->in_num_pages;
-
- /* initiate to point after the message area */
- lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- /* loop till all the entries in in array are not processed */
- while (current_entry < sep_lli_entries) {
- /* set the new input and output tables */
- in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* calculate the maximum size of data for input table */
- table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
-
- /* now calculate the table size so that it will be module block size */
- table_data_size = (table_data_size / block_size) * block_size;
-
- edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
-
- /* construct input lli table */
- sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
-
- if (info_entry_ptr == 0) {
- /* set the output parameters to physical addresses */
- *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- *num_entries_ptr = num_entries_in_table;
- *table_data_size_ptr = table_data_size;
-
- edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
- } else {
- /* update the info entry of the previous in table */
- info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
- }
-
- /* save the pointer to the info entry of the current tables */
- info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- }
-
- /* print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
-
- /* the array of the pages */
- kfree(lli_array_ptr);
-end_function:
- dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
- return 0;
-
-}
-
-/*
- This function creates the input and output dma tables for
- symmetric operations (AES/DES) according to the block size from LLI arays
-*/
-static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
- struct sep_lli_entry_t *lli_in_array,
- unsigned long sep_in_lli_entries,
- struct sep_lli_entry_t *lli_out_array,
- unsigned long sep_out_lli_entries,
- unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
-{
- /* points to the area where next lli table can be allocated: keep void *
- as there is pointer scaling to fix otherwise */
- void *lli_table_alloc_addr;
- /* input lli table */
- struct sep_lli_entry_t *in_lli_table_ptr;
- /* output lli table */
- struct sep_lli_entry_t *out_lli_table_ptr;
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_in_entry_ptr;
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_out_entry_ptr;
- /* points to the first entry to be processed in the lli_in_array */
- unsigned long current_in_entry;
- /* points to the first entry to be processed in the lli_out_array */
- unsigned long current_out_entry;
- /* max size of the input table */
- unsigned long in_table_data_size;
- /* max size of the output table */
- unsigned long out_table_data_size;
- /* flag te signifies if this is the first tables build from the arrays */
- unsigned long first_table_flag;
- /* the data size that should be in table */
- unsigned long table_data_size;
- /* number of etnries in the input table */
- unsigned long num_entries_in_table;
- /* number of etnries in the output table */
- unsigned long num_entries_out_table;
-
- dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
-
- /* initiate to pint after the message area */
- lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- current_in_entry = 0;
- current_out_entry = 0;
- first_table_flag = 1;
- info_in_entry_ptr = 0;
- info_out_entry_ptr = 0;
-
- /* loop till all the entries in in array are not processed */
- while (current_in_entry < sep_in_lli_entries) {
- /* set the new input and output tables */
- in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* set the first output tables */
- out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* calculate the maximum size of data for input table */
- in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
-
- /* calculate the maximum size of data for output table */
- out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
-
- edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
- edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
-
- /* check where the data is smallest */
- table_data_size = in_table_data_size;
- if (table_data_size > out_table_data_size)
- table_data_size = out_table_data_size;
-
- /* now calculate the table size so that it will be module block size */
- table_data_size = (table_data_size / block_size) * block_size;
-
- dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
- /* construct input lli table */
- sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
-
- /* construct output lli table */
- sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
-
- /* if info entry is null - this is the first table built */
- if (info_in_entry_ptr == 0) {
- /* set the output parameters to physical addresses */
- *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- *in_num_entries_ptr = num_entries_in_table;
- *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
- *out_num_entries_ptr = num_entries_out_table;
- *table_data_size_ptr = table_data_size;
-
- edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
- edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
- } else {
- /* update the info entry of the previous in table */
- info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
-
- /* update the info entry of the previous in table */
- info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
- info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
- }
-
- /* save the pointer to the info entry of the current tables */
- info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
-
- edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
- edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
- edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
- }
-
- /* print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
- /* print output tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
- dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
- return 0;
-}
-
-
-/*
- This function builds input and output DMA tables for synhronic
- symmetric operations (AES, DES). It also checks that each table
- is of the modular block size
-*/
-static int sep_prepare_input_output_dma_table(struct sep_device *sep,
- unsigned long app_virt_in_addr,
- unsigned long app_virt_out_addr,
- unsigned long data_size,
- unsigned long block_size,
- unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
-{
- /* array of pointers of page */
- struct sep_lli_entry_t *lli_in_array;
- /* array of pointers of page */
- struct sep_lli_entry_t *lli_out_array;
- int result = 0;
-
- dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
-
- /* initialize the pages pointers */
- sep->in_page_array = 0;
- sep->out_page_array = 0;
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true) {
- /* lock the pages of the kernel buffer and translate them to pages */
- result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
- goto end_function;
- }
- } else {
- /* lock the pages of the user buffer and translate them to pages */
- result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
- goto end_function;
- }
- }
-
- if (isKernelVirtualAddress == true) {
- result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
- goto end_function_with_error1;
- }
- } else {
- result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
- goto end_function_with_error1;
- }
- }
- edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
- edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
- edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
-
- /* call the fucntion that creates table from the lli arrays */
- result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
- if (result) {
- edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
- goto end_function_with_error2;
- }
-
- /* fall through - free the lli entry arrays */
- dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
- dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
- dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
-end_function_with_error2:
- kfree(lli_out_array);
-end_function_with_error1:
- kfree(lli_in_array);
-end_function:
- dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
- return result;
-
-}
-
-/*
- this function handles tha request for creation of the DMA table
- for the synchronic symmetric operations (AES,DES)
-*/
-static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error;
- /* command arguments */
- struct sep_driver_build_sync_table_t command_args;
-
- dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- edbg("app_in_address is %08lx\n", command_args.app_in_address);
- edbg("app_out_address is %08lx\n", command_args.app_out_address);
- edbg("data_size is %lu\n", command_args.data_in_size);
- edbg("block_size is %lu\n", command_args.block_size);
-
- /* check if we need to build only input table or input/output */
- if (command_args.app_out_address)
- /* prepare input and output tables */
- error = sep_prepare_input_output_dma_table(sep,
- command_args.app_in_address,
- command_args.app_out_address,
- command_args.data_in_size,
- command_args.block_size,
- &command_args.in_table_address,
- &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
- else
- /* prepare input tables */
- error = sep_prepare_input_dma_table(sep,
- command_args.app_in_address,
- command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
-
- if (error)
- goto end_function;
- /* copy to user */
- if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
- return error;
-}
-
-/*
- this function handles the request for freeing dma table for synhronic actions
-*/
-static int sep_free_dma_table_data_handler(struct sep_device *sep)
-{
- dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
-
- /* free input pages array */
- sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
-
- /* free output pages array if needed */
- if (sep->out_page_array)
- sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
-
- /* reset all the values */
- sep->in_page_array = 0;
- sep->out_page_array = 0;
- sep->in_num_pages = 0;
- sep->out_num_pages = 0;
- dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
- return 0;
-}
-
-/*
- this function find a space for the new flow dma table
-*/
-static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
- unsigned long **table_address_ptr)
-{
- int error = 0;
- /* pointer to the id field of the flow dma table */
- unsigned long *start_table_ptr;
- /* Do not make start_addr unsigned long * unless fixing the offset
- computations ! */
- void *flow_dma_area_start_addr;
- unsigned long *flow_dma_area_end_addr;
- /* maximum table size in words */
- unsigned long table_size_in_words;
-
- /* find the start address of the flow DMA table area */
- flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- /* set end address of the flow table area */
- flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
-
- /* set table size in words */
- table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
-
- /* set the pointer to the start address of DMA area */
- start_table_ptr = flow_dma_area_start_addr;
-
- /* find the space for the next table */
- while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
- start_table_ptr += table_size_in_words;
-
- /* check if we reached the end of floa tables area */
- if (start_table_ptr >= flow_dma_area_end_addr)
- error = -1;
- else
- *table_address_ptr = start_table_ptr;
-
- return error;
-}
-
-/*
- This function creates one DMA table for flow and returns its data,
- and pointer to its info entry
-*/
-static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
- unsigned long virt_buff_addr,
- unsigned long virt_buff_size,
- struct sep_lli_entry_t *table_data,
- struct sep_lli_entry_t **info_entry_ptr,
- struct sep_flow_context_t *flow_data_ptr,
- bool isKernelVirtualAddress)
-{
- int error;
- /* the range in pages */
- unsigned long lli_array_size;
- struct sep_lli_entry_t *lli_array;
- struct sep_lli_entry_t *flow_dma_table_entry_ptr;
- unsigned long *start_dma_table_ptr;
- /* total table data counter */
- unsigned long dma_table_data_count;
- /* pointer that will keep the pointer to the pages of the virtual buffer */
- struct page **page_array_ptr;
- unsigned long entry_count;
-
- /* find the space for the new table */
- error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
- if (error)
- goto end_function;
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true)
- /* lock kernel buffer in the memory */
- error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
- else
- /* lock user buffer in the memory */
- error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
-
- if (error)
- goto end_function;
-
- /* set the pointer to page array at the beginning of table - this table is
- now considered taken */
- *start_dma_table_ptr = lli_array_size;
-
- /* point to the place of the pages pointers of the table */
- start_dma_table_ptr++;
-
- /* set the pages pointer */
- *start_dma_table_ptr = (unsigned long) page_array_ptr;
-
- /* set the pointer to the first entry */
- flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
-
- /* now create the entries for table */
- for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
- flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
-
- flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
-
- /* set the total data of a table */
- dma_table_data_count += lli_array[entry_count].block_size;
-
- flow_dma_table_entry_ptr++;
- }
-
- /* set the physical address */
- table_data->physical_address = virt_to_phys(start_dma_table_ptr);
-
- /* set the num_entries and total data size */
- table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
-
- /* set the info entry */
- flow_dma_table_entry_ptr->physical_address = 0xffffffff;
- flow_dma_table_entry_ptr->block_size = 0;
-
- /* set the pointer to info entry */
- *info_entry_ptr = flow_dma_table_entry_ptr;
-
- /* the array of the lli entries */
- kfree(lli_array);
-end_function:
- return error;
-}
-
-
-
-/*
- This function creates a list of tables for flow and returns the data for
- the first and last tables of the list
-*/
-static int sep_prepare_flow_dma_tables(struct sep_device *sep,
- unsigned long num_virtual_buffers,
- unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
-{
- int error;
- unsigned long virt_buff_addr;
- unsigned long virt_buff_size;
- struct sep_lli_entry_t table_data;
- struct sep_lli_entry_t *info_entry_ptr;
- struct sep_lli_entry_t *prev_info_entry_ptr;
- unsigned long i;
-
- /* init vars */
- error = 0;
- prev_info_entry_ptr = 0;
-
- /* init the first table to default */
- table_data.physical_address = 0xffffffff;
- first_table_data_ptr->physical_address = 0xffffffff;
- table_data.block_size = 0;
-
- for (i = 0; i < num_virtual_buffers; i++) {
- /* get the virtual buffer address */
- error = get_user(virt_buff_addr, &first_buff_addr);
- if (error)
- goto end_function;
-
- /* get the virtual buffer size */
- first_buff_addr++;
- error = get_user(virt_buff_size, &first_buff_addr);
- if (error)
- goto end_function;
-
- /* advance the address to point to the next pair of address|size */
- first_buff_addr++;
-
- /* now prepare the one flow LLI table from the data */
- error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
- if (error)
- goto end_function;
-
- if (i == 0) {
- /* if this is the first table - save it to return to the user
- application */
- *first_table_data_ptr = table_data;
-
- /* set the pointer to info entry */
- prev_info_entry_ptr = info_entry_ptr;
- } else {
- /* not first table - the previous table info entry should
- be updated */
- prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
-
- /* set the pointer to info entry */
- prev_info_entry_ptr = info_entry_ptr;
- }
- }
-
- /* set the last table data */
- *last_table_data_ptr = table_data;
-end_function:
- return error;
-}
-
-/*
- this function goes over all the flow tables connected to the given
- table and deallocate them
-*/
-static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
-{
- /* id pointer */
- unsigned long *table_ptr;
- /* end address of the flow dma area */
- unsigned long num_entries;
- unsigned long num_pages;
- struct page **pages_ptr;
- /* maximum table size in words */
- struct sep_lli_entry_t *info_entry_ptr;
-
- /* set the pointer to the first table */
- table_ptr = (unsigned long *) first_table_ptr->physical_address;
-
- /* set the num of entries */
- num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
- & SEP_NUM_ENTRIES_MASK;
-
- /* go over all the connected tables */
- while (*table_ptr != 0xffffffff) {
- /* get number of pages */
- num_pages = *(table_ptr - 2);
-
- /* get the pointer to the pages */
- pages_ptr = (struct page **) (*(table_ptr - 1));
-
- /* free the pages */
- sep_free_dma_pages(pages_ptr, num_pages, 1);
-
- /* goto to the info entry */
- info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
-
- table_ptr = (unsigned long *) info_entry_ptr->physical_address;
- num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
- }
-
- return;
-}
-
-/**
- * sep_find_flow_context - find a flow
- * @sep: the SEP we are working with
- * @flow_id: flow identifier
- *
- * Returns a pointer the matching flow, or NULL if the flow does not
- * exist.
- */
-
-static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
- unsigned long flow_id)
-{
- int count;
- /*
- * always search for flow with id default first - in case we
- * already started working on the flow there can be no situation
- * when 2 flows are with default flag
- */
- for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
- if (sep->flows[count].flow_id == flow_id)
- return &sep->flows[count];
- }
- return NULL;
-}
-
-
-/*
- this function handles the request to create the DMA tables for flow
-*/
-static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = -ENOENT;
- struct sep_driver_build_flow_table_t command_args;
- /* first table - output */
- struct sep_lli_entry_t first_table_data;
- /* dma table data */
- struct sep_lli_entry_t last_table_data;
- /* pointer to the info entry of the previuos DMA table */
- struct sep_lli_entry_t *prev_info_entry_ptr;
- /* pointer to the flow data strucutre */
- struct sep_flow_context_t *flow_context_ptr;
-
- dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
-
- /* init variables */
- prev_info_entry_ptr = 0;
- first_table_data.physical_address = 0xffffffff;
-
- /* find the free structure for flow data */
- error = -EINVAL;
- flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* create flow tables */
- error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
- if (error)
- goto end_function_with_error;
-
- /* check if flow is static */
- if (!command_args.flow_type)
- /* point the info entry of the last to the info entry of the first */
- last_table_data = first_table_data;
-
- /* set output params */
- command_args.first_table_addr = first_table_data.physical_address;
- command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
- command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function_with_error;
- }
-
- /* all the flow created - update the flow entry with temp id */
- flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
-
- /* set the processing tables data in the context */
- if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
- flow_context_ptr->input_tables_in_process = first_table_data;
- else
- flow_context_ptr->output_tables_in_process = first_table_data;
-
- goto end_function;
-
-end_function_with_error:
- /* free the allocated tables */
- sep_deallocated_flow_tables(&first_table_data);
-end_function:
- dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
- return error;
-}
-
-/*
- this function handles add tables to flow
-*/
-static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- unsigned long num_entries;
- struct sep_driver_add_flow_table_t command_args;
- struct sep_flow_context_t *flow_context_ptr;
- /* first dma table data */
- struct sep_lli_entry_t first_table_data;
- /* last dma table data */
- struct sep_lli_entry_t last_table_data;
- /* pointer to the info entry of the current DMA table */
- struct sep_lli_entry_t *info_entry_ptr;
-
- dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
-
- /* get input parameters */
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* find the flow structure for the flow id */
- flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- /* prepare the flow dma tables */
- error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
- if (error)
- goto end_function_with_error;
-
- /* now check if there is already an existing add table for this flow */
- if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
- /* this buffer was for input buffers */
- if (flow_context_ptr->input_tables_flag) {
- /* add table already exists - add the new tables to the end
- of the previous */
- num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
- info_entry_ptr = (struct sep_lli_entry_t *)
- (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
- /* connect to list of tables */
- *info_entry_ptr = first_table_data;
-
- /* set the first table data */
- first_table_data = flow_context_ptr->first_input_table;
- } else {
- /* set the input flag */
- flow_context_ptr->input_tables_flag = 1;
-
- /* set the first table data */
- flow_context_ptr->first_input_table = first_table_data;
- }
- /* set the last table data */
- flow_context_ptr->last_input_table = last_table_data;
- } else { /* this is output tables */
-
- /* this buffer was for input buffers */
- if (flow_context_ptr->output_tables_flag) {
- /* add table already exists - add the new tables to
- the end of the previous */
- num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
- info_entry_ptr = (struct sep_lli_entry_t *)
- (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
- /* connect to list of tables */
- *info_entry_ptr = first_table_data;
-
- /* set the first table data */
- first_table_data = flow_context_ptr->first_output_table;
- } else {
- /* set the input flag */
- flow_context_ptr->output_tables_flag = 1;
-
- /* set the first table data */
- flow_context_ptr->first_output_table = first_table_data;
- }
- /* set the last table data */
- flow_context_ptr->last_output_table = last_table_data;
- }
-
- /* set output params */
- command_args.first_table_addr = first_table_data.physical_address;
- command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
- command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
- if (error)
- error = -EFAULT;
-end_function_with_error:
- /* free the allocated tables */
- sep_deallocated_flow_tables(&first_table_data);
-end_function:
- dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
- return error;
-}
-
-/*
- this function add the flow add message to the specific flow
-*/
-static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_add_message_t command_args;
- struct sep_flow_context_t *flow_context_ptr;
-
- dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* check input */
- if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- /* find the flow context */
- flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- /* copy the message into context */
- flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
- error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
- return error;
-}
-
-
-/*
- this function returns the bus and virtual addresses of the static pool
-*/
-static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_static_pool_addr_t command_args;
-
- dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
-
- /*prepare the output parameters in the struct */
- command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
- command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-
- edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
- if (error)
- error = -EFAULT;
- dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
- return error;
-}
-
-/*
- this address gets the offset of the physical address from the start
- of the mapped area
-*/
-static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_get_mapped_offset_t command_args;
-
- dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- if (command_args.physical_address < sep->shared_bus) {
- error = -EINVAL;
- goto end_function;
- }
-
- /*prepare the output parameters in the struct */
- command_args.offset = command_args.physical_address - sep->shared_bus;
-
- edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
- return error;
-}
-
-
-/*
- ?
-*/
-static int sep_start_handler(struct sep_device *sep)
-{
- unsigned long reg_val;
- unsigned long error = 0;
-
- dbg("SEP Driver:--------> sep_start_handler start\n");
-
- /* wait in polling for message from SEP */
- do
- reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!reg_val);
-
- /* check the value */
- if (reg_val == 0x1)
- /* fatal error - read error status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- dbg("SEP Driver:<-------- sep_start_handler end\n");
- return error;
-}
-
-/*
- this function handles the request for SEP initialization
-*/
-static int sep_init_handler(struct sep_device *sep, unsigned long arg)
-{
- unsigned long message_word;
- unsigned long *message_ptr;
- struct sep_driver_init_t command_args;
- unsigned long counter;
- unsigned long error;
- unsigned long reg_val;
-
- dbg("SEP Driver:--------> sep_init_handler start\n");
- error = 0;
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
- dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n");
-
- /* PATCH - configure the DMA to single -burst instead of multi-burst */
- /*sep_configure_dma_burst(); */
-
- dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
-
- message_ptr = (unsigned long *) command_args.message_addr;
-
- /* set the base address of the SRAM */
- sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
-
- for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
- get_user(message_word, message_ptr);
- /* write data to SRAM */
- sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
- edbg("SEP Driver:message_word is %lu\n", message_word);
- /* wait for write complete */
- sep_wait_sram_write(sep);
- }
- dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
- /* signal SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
-
- do
- reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!(reg_val & 0xFFFFFFFD));
-
- dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
-
- /* check the value */
- if (reg_val == 0x1) {
- edbg("SEP Driver:init failed\n");
-
- error = sep_read_reg(sep, 0x8060);
- edbg("SEP Driver:sw monitor is %lu\n", error);
-
- /* fatal error - read erro status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- edbg("SEP Driver:error is %lu\n", error);
- }
-end_function:
- dbg("SEP Driver:<-------- sep_init_handler end\n");
- return error;
-
-}
-
-/*
- this function handles the request cache and resident reallocation
-*/
-static int sep_realloc_cache_resident_handler(struct sep_device *sep,
- unsigned long arg)
-{
- struct sep_driver_realloc_cache_resident_t command_args;
- int error;
-
- /* copy cache and resident to the their intended locations */
- error = sep_load_firmware(sep);
- if (error)
- return error;
-
- command_args.new_base_addr = sep->shared_bus;
-
- /* find the new base address according to the lowest address between
- cache, resident and shared area */
- if (sep->resident_bus < command_args.new_base_addr)
- command_args.new_base_addr = sep->resident_bus;
- if (sep->rar_bus < command_args.new_base_addr)
- command_args.new_base_addr = sep->rar_bus;
-
- /* set the return parameters */
- command_args.new_cache_addr = sep->rar_bus;
- command_args.new_resident_addr = sep->resident_bus;
-
- /* set the new shared area */
- command_args.new_shared_area_addr = sep->shared_bus;
-
- edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
- edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
- edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
- edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
-
- /* return to user */
- if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
- return -EFAULT;
- return 0;
-}
-
-/**
- * sep_get_time_handler - time request from user space
- * @sep: sep we are to set the time for
- * @arg: pointer to user space arg buffer
- *
- * This function reports back the time and the address in the SEP
- * shared buffer at which it has been placed. (Do we really need this!!!)
- */
-
-static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
-{
- struct sep_driver_get_time_t command_args;
-
- mutex_lock(&sep_mutex);
- command_args.time_value = sep_set_time(sep);
- command_args.time_physical_address = (unsigned long)sep_time_address(sep);
- mutex_unlock(&sep_mutex);
- if (copy_to_user((void __user *)arg,
- &command_args, sizeof(struct sep_driver_get_time_t)))
- return -EFAULT;
- return 0;
-
-}
-
-/*
- This API handles the end transaction request
-*/
-static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
-{
- dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
-
-#if 0 /*!SEP_DRIVER_POLLING_MODE */
- /* close IMR */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
-
- /* release IRQ line */
- free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
- /* lock the sep mutex */
- mutex_unlock(&sep_mutex);
-#endif
-
- dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
-
- return 0;
-}
-
-
-/**
- * sep_set_flow_id_handler - handle flow setting
- * @sep: the SEP we are configuring
- * @flow_id: the flow we are setting
- *
- * This function handler the set flow id command
- */
-static int sep_set_flow_id_handler(struct sep_device *sep,
- unsigned long flow_id)
-{
- int error = 0;
- struct sep_flow_context_t *flow_data_ptr;
-
- /* find the flow data structure that was just used for creating new flow
- - its id should be default */
-
- mutex_lock(&sep_mutex);
- flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
- if (flow_data_ptr)
- flow_data_ptr->flow_id = flow_id; /* set flow id */
- else
- error = -EINVAL;
- mutex_unlock(&sep_mutex);
- return error;
-}
-
-static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int error = 0;
- struct sep_device *sep = filp->private_data;
-
- dbg("------------>SEP Driver: ioctl start\n");
-
- edbg("SEP Driver: cmd is %x\n", cmd);
-
- switch (cmd) {
- case SEP_IOCSENDSEPCOMMAND:
- /* send command to SEP */
- sep_send_command_handler(sep);
- edbg("SEP Driver: after sep_send_command_handler\n");
- break;
- case SEP_IOCSENDSEPRPLYCOMMAND:
- /* send reply command to SEP */
- sep_send_reply_command_handler(sep);
- break;
- case SEP_IOCALLOCDATAPOLL:
- /* allocate data pool */
- error = sep_allocate_data_pool_memory_handler(sep, arg);
- break;
- case SEP_IOCWRITEDATAPOLL:
- /* write data into memory pool */
- error = sep_write_into_data_pool_handler(sep, arg);
- break;
- case SEP_IOCREADDATAPOLL:
- /* read data from data pool into application memory */
- error = sep_read_from_data_pool_handler(sep, arg);
- break;
- case SEP_IOCCREATESYMDMATABLE:
- /* create dma table for synhronic operation */
- error = sep_create_sync_dma_tables_handler(sep, arg);
- break;
- case SEP_IOCCREATEFLOWDMATABLE:
- /* create flow dma tables */
- error = sep_create_flow_dma_tables_handler(sep, arg);
- break;
- case SEP_IOCFREEDMATABLEDATA:
- /* free the pages */
- error = sep_free_dma_table_data_handler(sep);
- break;
- case SEP_IOCSETFLOWID:
- /* set flow id */
- error = sep_set_flow_id_handler(sep, (unsigned long)arg);
- break;
- case SEP_IOCADDFLOWTABLE:
- /* add tables to the dynamic flow */
- error = sep_add_flow_tables_handler(sep, arg);
- break;
- case SEP_IOCADDFLOWMESSAGE:
- /* add message of add tables to flow */
- error = sep_add_flow_tables_message_handler(sep, arg);
- break;
- case SEP_IOCSEPSTART:
- /* start command to sep */
- error = sep_start_handler(sep);
- break;
- case SEP_IOCSEPINIT:
- /* init command to sep */
- error = sep_init_handler(sep, arg);
- break;
- case SEP_IOCGETSTATICPOOLADDR:
- /* get the physical and virtual addresses of the static pool */
- error = sep_get_static_pool_addr_handler(sep, arg);
- break;
- case SEP_IOCENDTRANSACTION:
- error = sep_end_transaction_handler(sep, arg);
- break;
- case SEP_IOCREALLOCCACHERES:
- error = sep_realloc_cache_resident_handler(sep, arg);
- break;
- case SEP_IOCGETMAPPEDADDROFFSET:
- error = sep_get_physical_mapped_offset_handler(sep, arg);
- break;
- case SEP_IOCGETIME:
- error = sep_get_time_handler(sep, arg);
- break;
- default:
- error = -ENOTTY;
- break;
- }
- dbg("SEP Driver:<-------- ioctl end\n");
- return error;
-}
-
-
-
-#if !SEP_DRIVER_POLLING_MODE
-
-/* handler for flow done interrupt */
-
-static void sep_flow_done_handler(struct work_struct *work)
-{
- struct sep_flow_context_t *flow_data_ptr;
-
- /* obtain the mutex */
- mutex_lock(&sep_mutex);
-
- /* get the pointer to context */
- flow_data_ptr = (struct sep_flow_context_t *) work;
-
- /* free all the current input tables in sep */
- sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
-
- /* free all the current tables output tables in SEP (if needed) */
- if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
- sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
-
- /* check if we have additional tables to be sent to SEP only input
- flag may be checked */
- if (flow_data_ptr->input_tables_flag) {
- /* copy the message to the shared RAM and signal SEP */
- memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
-
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
- }
- mutex_unlock(&sep_mutex);
-}
-/*
- interrupt handler function
-*/
-static irqreturn_t sep_inthandler(int irq, void *dev_id)
-{
- irqreturn_t int_error;
- unsigned long reg_val;
- unsigned long flow_id;
- struct sep_flow_context_t *flow_context_ptr;
- struct sep_device *sep = dev_id;
-
- int_error = IRQ_HANDLED;
-
- /* read the IRR register to check if this is SEP interrupt */
- reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
- edbg("SEP Interrupt - reg is %08lx\n", reg_val);
-
- /* check if this is the flow interrupt */
- if (0 /*reg_val & (0x1 << 11) */ ) {
- /* read GPRO to find out the which flow is done */
- flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-
- /* find the contex of the flow */
- flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
- if (flow_context_ptr == NULL)
- goto end_function_with_error;
-
- /* queue the work */
- INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
- queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
-
- } else {
- /* check if this is reply interrupt from SEP */
- if (reg_val & (0x1 << 13)) {
- /* update the counter of reply messages */
- sep->reply_ct++;
- /* wake up the waiting process */
- wake_up(&sep_event);
- } else {
- int_error = IRQ_NONE;
- goto end_function;
- }
- }
-end_function_with_error:
- /* clear the interrupt */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
-end_function:
- return int_error;
-}
-
-#endif
-
-
-
-#if 0
-
-static void sep_wait_busy(struct sep_device *sep)
-{
- u32 reg;
-
- do {
- reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
- } while (reg);
-}
-
-/*
- PATCH for configuring the DMA to single burst instead of multi-burst
-*/
-static void sep_configure_dma_burst(struct sep_device *sep)
-{
-#define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
-
- /* request access to registers from SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
-
- sep_wait_busy(sep);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
-
- /* set the DMA burst register to single burst */
- sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
-
- /* release the sep busy */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
- sep_wait_busy(sep);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
-
-}
-
-#endif
-
-/*
- Function that is activated on the successful probe of the SEP device
-*/
-static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int error = 0;
- struct sep_device *sep;
- int counter;
- int size; /* size of memory for allocation */
-
- edbg("Sep pci probe starting\n");
- if (sep_dev != NULL) {
- dev_warn(&pdev->dev, "only one SEP supported.\n");
- return -EBUSY;
- }
-
- /* enable the device */
- error = pci_enable_device(pdev);
- if (error) {
- edbg("error enabling pci device\n");
- goto end_function;
- }
-
- /* set the pci dev pointer */
- sep_dev = &sep_instance;
- sep = &sep_instance;
-
- edbg("sep->shared_addr = %p\n", sep->shared_addr);
- /* transaction counter that coordinates the transactions between SEP
- and HOST */
- sep->send_ct = 0;
- /* counter for the messages from sep */
- sep->reply_ct = 0;
- /* counter for the number of bytes allocated in the pool
- for the current transaction */
- sep->data_pool_bytes_allocated = 0;
-
- /* calculate the total size for allocation */
- size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-
- /* allocate the shared area */
- if (sep_map_and_alloc_shared_area(sep, size)) {
- error = -ENOMEM;
- /* allocation failed */
- goto end_function_error;
- }
- /* now set the memory regions */
-#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
- /* Note: this test section will need moving before it could ever
- work as the registers are not yet mapped ! */
- /* send the new SHARED MESSAGE AREA to the SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
-
- /* poll for SEP response */
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
- while (retval != 0xffffffff && retval != sep->shared_bus)
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- /* check the return value (register) */
- if (retval != sep->shared_bus) {
- error = -ENOMEM;
- goto end_function_deallocate_sep_shared_area;
- }
-#endif
- /* init the flow contextes */
- for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
- sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
-
- sep->flow_wq = create_singlethread_workqueue("sepflowwq");
- if (sep->flow_wq == NULL) {
- error = -ENOMEM;
- edbg("sep_driver:flow queue creation failed\n");
- goto end_function_deallocate_sep_shared_area;
- }
- edbg("SEP Driver: create flow workqueue \n");
- sep->pdev = pci_dev_get(pdev);
-
- sep->reg_addr = pci_ioremap_bar(pdev, 0);
- if (!sep->reg_addr) {
- edbg("sep: ioremap of registers failed.\n");
- goto end_function_deallocate_sep_shared_area;
- }
- edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
-
- /* load the rom code */
- sep_load_rom_code(sep);
-
- /* set up system base address and shared memory location */
- sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
- 2 * SEP_RAR_IO_MEM_REGION_SIZE,
- &sep->rar_bus, GFP_KERNEL);
-
- if (!sep->rar_addr) {
- edbg("SEP Driver:can't allocate rar\n");
- goto end_function_uniomap;
- }
-
-
- edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
- edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
-
-#if !SEP_DRIVER_POLLING_MODE
-
- edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
-
- /* clear ICR register */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
-
- /* set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
- edbg("SEP Driver: about to call request_irq\n");
- /* get the interrupt line */
- error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
- if (error)
- goto end_function_free_res;
- return 0;
- edbg("SEP Driver: about to write IMR REG_ADDR");
-
- /* set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
-end_function_free_res:
- dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
- sep->rar_addr, sep->rar_bus);
-#endif /* SEP_DRIVER_POLLING_MODE */
-end_function_uniomap:
- iounmap(sep->reg_addr);
-end_function_deallocate_sep_shared_area:
- /* de-allocate shared area */
- sep_unmap_and_free_shared_area(sep, size);
-end_function_error:
- sep_dev = NULL;
-end_function:
- return error;
-}
-
-static const struct pci_device_id sep_pci_id_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
-
-/* field for registering driver to PCI device */
-static struct pci_driver sep_pci_driver = {
- .name = "sep_sec_driver",
- .id_table = sep_pci_id_tbl,
- .probe = sep_probe
- /* FIXME: remove handler */
-};
-
-/* major and minor device numbers */
-static dev_t sep_devno;
-
-/* the files operations structure of the driver */
-static struct file_operations sep_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_ioctl,
- .poll = sep_poll,
- .open = sep_open,
- .release = sep_release,
- .mmap = sep_mmap,
-};
-
-
-/* cdev struct of the driver */
-static struct cdev sep_cdev;
-
-/*
- this function registers the driver to the file system
-*/
-static int sep_register_driver_to_fs(void)
-{
- int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
- if (ret_val) {
- edbg("sep: major number allocation failed, retval is %d\n",
- ret_val);
- return ret_val;
- }
- /* init cdev */
- cdev_init(&sep_cdev, &sep_file_operations);
- sep_cdev.owner = THIS_MODULE;
-
- /* register the driver with the kernel */
- ret_val = cdev_add(&sep_cdev, sep_devno, 1);
- if (ret_val) {
- edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
- }
- return ret_val;
-}
-
-
-/*--------------------------------------------------------------
- init function
-----------------------------------------------------------------*/
-static int __init sep_init(void)
-{
- int ret_val = 0;
- dbg("SEP Driver:-------->Init start\n");
- /* FIXME: Probe can occur before we are ready to survive a probe */
- ret_val = pci_register_driver(&sep_pci_driver);
- if (ret_val) {
- edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
- goto end_function_unregister_from_fs;
- }
- /* register driver to fs */
- ret_val = sep_register_driver_to_fs();
- if (ret_val)
- goto end_function_unregister_pci;
- goto end_function;
-end_function_unregister_pci:
- pci_unregister_driver(&sep_pci_driver);
-end_function_unregister_from_fs:
- /* unregister from fs */
- cdev_del(&sep_cdev);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
-end_function:
- dbg("SEP Driver:<-------- Init end\n");
- return ret_val;
-}
-
-
-/*-------------------------------------------------------------
- exit function
---------------------------------------------------------------*/
-static void __exit sep_exit(void)
-{
- int size;
-
- dbg("SEP Driver:--------> Exit start\n");
-
- /* unregister from fs */
- cdev_del(&sep_cdev);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
- /* calculate the total size for de-allocation */
- size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
- /* FIXME: We need to do this in the unload for the device */
- /* free shared area */
- if (sep_dev) {
- sep_unmap_and_free_shared_area(sep_dev, size);
- edbg("SEP Driver: free pages SEP SHARED AREA \n");
- iounmap((void *) sep_dev->reg_addr);
- edbg("SEP Driver: iounmap \n");
- }
- edbg("SEP Driver: release_mem_region \n");
- dbg("SEP Driver:<-------- Exit end\n");
-}
-
-
-module_init(sep_init);
-module_exit(sep_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
deleted file mode 100644
index 7ef16da7c4e..00000000000
--- a/drivers/staging/sep/sep_driver_api.h
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- *
- * sep_driver_api.h - Security Processor Driver api definitions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#ifndef __SEP_DRIVER_API_H__
-#define __SEP_DRIVER_API_H__
-
-
-
-/*----------------------------------------------------------------
- IOCTL command defines
- -----------------------------------------------------------------*/
-
-/* magic number 1 of the sep IOCTL command */
-#define SEP_IOC_MAGIC_NUMBER 's'
-
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 0)
-
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPRPLYCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 1)
-
-/* allocate memory in data pool */
-#define SEP_IOCALLOCDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 2)
-
-/* write to pre-allocated memory in data pool */
-#define SEP_IOCWRITEDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 3)
-
-/* read from pre-allocated memory in data pool */
-#define SEP_IOCREADDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 4)
-
-/* create sym dma lli tables */
-#define SEP_IOCCREATESYMDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 5)
-
-/* create flow dma lli tables */
-#define SEP_IOCCREATEFLOWDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 6)
-
-/* free dynamic data aalocated during table creation */
-#define SEP_IOCFREEDMATABLEDATA _IO(SEP_IOC_MAGIC_NUMBER , 7)
-
-/* get the static pool area addresses (physical and virtual) */
-#define SEP_IOCGETSTATICPOOLADDR _IO(SEP_IOC_MAGIC_NUMBER , 8)
-
-/* set flow id command */
-#define SEP_IOCSETFLOWID _IO(SEP_IOC_MAGIC_NUMBER , 9)
-
-/* add tables to the dynamic flow */
-#define SEP_IOCADDFLOWTABLE _IO(SEP_IOC_MAGIC_NUMBER , 10)
-
-/* add flow add tables message */
-#define SEP_IOCADDFLOWMESSAGE _IO(SEP_IOC_MAGIC_NUMBER , 11)
-
-/* start sep command */
-#define SEP_IOCSEPSTART _IO(SEP_IOC_MAGIC_NUMBER , 12)
-
-/* init sep command */
-#define SEP_IOCSEPINIT _IO(SEP_IOC_MAGIC_NUMBER , 13)
-
-/* end transaction command */
-#define SEP_IOCENDTRANSACTION _IO(SEP_IOC_MAGIC_NUMBER , 15)
-
-/* reallocate cache and resident */
-#define SEP_IOCREALLOCCACHERES _IO(SEP_IOC_MAGIC_NUMBER , 16)
-
-/* get the offset of the address starting from the beginnnig of the map area */
-#define SEP_IOCGETMAPPEDADDROFFSET _IO(SEP_IOC_MAGIC_NUMBER , 17)
-
-/* get time address and value */
-#define SEP_IOCGETIME _IO(SEP_IOC_MAGIC_NUMBER , 19)
-
-/*-------------------------------------------
- TYPEDEFS
-----------------------------------------------*/
-
-/*
- init command struct
-*/
-struct sep_driver_init_t {
- /* start of the 1G of the host memory address that SEP can access */
- unsigned long message_addr;
-
- /* start address of resident */
- unsigned long message_size_in_words;
-
-};
-
-
-/*
- realloc cache resident command
-*/
-struct sep_driver_realloc_cache_resident_t {
- /* new cache address */
- u64 new_cache_addr;
- /* new resident address */
- u64 new_resident_addr;
- /* new resident address */
- u64 new_shared_area_addr;
- /* new base address */
- u64 new_base_addr;
-};
-
-struct sep_driver_alloc_t {
- /* virtual address of allocated space */
- unsigned long offset;
-
- /* physical address of allocated space */
- unsigned long phys_address;
-
- /* number of bytes to allocate */
- unsigned long num_bytes;
-};
-
-/*
- */
-struct sep_driver_write_t {
- /* application space address */
- unsigned long app_address;
-
- /* address of the data pool */
- unsigned long datapool_address;
-
- /* number of bytes to write */
- unsigned long num_bytes;
-};
-
-/*
- */
-struct sep_driver_read_t {
- /* application space address */
- unsigned long app_address;
-
- /* address of the data pool */
- unsigned long datapool_address;
-
- /* number of bytes to read */
- unsigned long num_bytes;
-};
-
-/*
-*/
-struct sep_driver_build_sync_table_t {
- /* address value of the data in */
- unsigned long app_in_address;
-
- /* size of data in */
- unsigned long data_in_size;
-
- /* address of the data out */
- unsigned long app_out_address;
-
- /* the size of the block of the operation - if needed,
- every table will be modulo this parameter */
- unsigned long block_size;
-
- /* the physical address of the first input DMA table */
- unsigned long in_table_address;
-
- /* number of entries in the first input DMA table */
- unsigned long in_table_num_entries;
-
- /* the physical address of the first output DMA table */
- unsigned long out_table_address;
-
- /* number of entries in the first output DMA table */
- unsigned long out_table_num_entries;
-
- /* data in the first input table */
- unsigned long table_data_size;
-
- /* distinct user/kernel layout */
- bool isKernelVirtualAddress;
-
-};
-
-/*
-*/
-struct sep_driver_build_flow_table_t {
- /* flow type */
- unsigned long flow_type;
-
- /* flag for input output */
- unsigned long input_output_flag;
-
- /* address value of the data in */
- unsigned long virt_buff_data_addr;
-
- /* size of data in */
- unsigned long num_virtual_buffers;
-
- /* the physical address of the first input DMA table */
- unsigned long first_table_addr;
-
- /* number of entries in the first input DMA table */
- unsigned long first_table_num_entries;
-
- /* data in the first input table */
- unsigned long first_table_data_size;
-
- /* distinct user/kernel layout */
- bool isKernelVirtualAddress;
-};
-
-
-struct sep_driver_add_flow_table_t {
- /* flow id */
- unsigned long flow_id;
-
- /* flag for input output */
- unsigned long inputOutputFlag;
-
- /* address value of the data in */
- unsigned long virt_buff_data_addr;
-
- /* size of data in */
- unsigned long num_virtual_buffers;
-
- /* address of the first table */
- unsigned long first_table_addr;
-
- /* number of entries in the first table */
- unsigned long first_table_num_entries;
-
- /* data size of the first table */
- unsigned long first_table_data_size;
-
- /* distinct user/kernel layout */
- bool isKernelVirtualAddress;
-
-};
-
-/*
- command struct for set flow id
-*/
-struct sep_driver_set_flow_id_t {
- /* flow id to set */
- unsigned long flow_id;
-};
-
-
-/* command struct for add tables message */
-struct sep_driver_add_message_t {
- /* flow id to set */
- unsigned long flow_id;
-
- /* message size in bytes */
- unsigned long message_size_in_bytes;
-
- /* address of the message */
- unsigned long message_address;
-};
-
-/* command struct for static pool addresses */
-struct sep_driver_static_pool_addr_t {
- /* physical address of the static pool */
- unsigned long physical_static_address;
-
- /* virtual address of the static pool */
- unsigned long virtual_static_address;
-};
-
-/* command struct for getiing offset of the physical address from
- the start of the mapped area */
-struct sep_driver_get_mapped_offset_t {
- /* physical address of the static pool */
- unsigned long physical_address;
-
- /* virtual address of the static pool */
- unsigned long offset;
-};
-
-/* command struct for getting time value and address */
-struct sep_driver_get_time_t {
- /* physical address of stored time */
- unsigned long time_physical_address;
-
- /* value of the stored time */
- unsigned long time_value;
-};
-
-
-/*
- structure that represent one entry in the DMA LLI table
-*/
-struct sep_lli_entry_t {
- /* physical address */
- unsigned long physical_address;
-
- /* block size */
- unsigned long block_size;
-};
-
-/*
- structure that reperesents data needed for lli table construction
-*/
-struct sep_lli_prepare_table_data_t {
- /* pointer to the memory where the first lli entry to be built */
- struct sep_lli_entry_t *lli_entry_ptr;
-
- /* pointer to the array of lli entries from which the table is to be built */
- struct sep_lli_entry_t *lli_array_ptr;
-
- /* number of elements in lli array */
- int lli_array_size;
-
- /* number of entries in the created table */
- int num_table_entries;
-
- /* number of array entries processed during table creation */
- int num_array_entries_processed;
-
- /* the totatl data size in the created table */
- int lli_table_total_data_size;
-};
-
-/*
- structure that represent tone table - it is not used in code, jkust
- to show what table looks like
-*/
-struct sep_lli_table_t {
- /* number of pages mapped in this tables. If 0 - means that the table
- is not defined (used as a valid flag) */
- unsigned long num_pages;
- /*
- pointer to array of page pointers that represent the mapping of the
- virtual buffer defined by the table to the physical memory. If this
- pointer is NULL, it means that the table is not defined
- (used as a valid flag)
- */
- struct page **table_page_array_ptr;
-
- /* maximum flow entries in table */
- struct sep_lli_entry_t lli_entries[SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE];
-};
-
-
-/*
- structure for keeping the mapping of the virtual buffer into physical pages
-*/
-struct sep_flow_buffer_data {
- /* pointer to the array of page structs pointers to the pages of the
- virtual buffer */
- struct page **page_array_ptr;
-
- /* number of pages taken by the virtual buffer */
- unsigned long num_pages;
-
- /* this flag signals if this page_array is the last one among many that were
- sent in one setting to SEP */
- unsigned long last_page_array_flag;
-};
-
-/*
- struct that keeps all the data for one flow
-*/
-struct sep_flow_context_t {
- /*
- work struct for handling the flow done interrupt in the workqueue
- this structure must be in the first place, since it will be used
- forcasting to the containing flow context
- */
- struct work_struct flow_wq;
-
- /* flow id */
- unsigned long flow_id;
-
- /* additional input tables exists */
- unsigned long input_tables_flag;
-
- /* additional output tables exists */
- unsigned long output_tables_flag;
-
- /* data of the first input file */
- struct sep_lli_entry_t first_input_table;
-
- /* data of the first output table */
- struct sep_lli_entry_t first_output_table;
-
- /* last input table data */
- struct sep_lli_entry_t last_input_table;
-
- /* last output table data */
- struct sep_lli_entry_t last_output_table;
-
- /* first list of table */
- struct sep_lli_entry_t input_tables_in_process;
-
- /* output table in process (in sep) */
- struct sep_lli_entry_t output_tables_in_process;
-
- /* size of messages in bytes */
- unsigned long message_size_in_bytes;
-
- /* message */
- unsigned char message[SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES];
-};
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
deleted file mode 100644
index 6008fe5eca0..00000000000
--- a/drivers/staging/sep/sep_driver_config.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- *
- * sep_driver_config.h - Security Processor Driver configuration
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#ifndef __SEP_DRIVER_CONFIG_H__
-#define __SEP_DRIVER_CONFIG_H__
-
-
-/*--------------------------------------
- DRIVER CONFIGURATION FLAGS
- -------------------------------------*/
-
-/* if flag is on , then the driver is running in polling and
- not interrupt mode */
-#define SEP_DRIVER_POLLING_MODE 1
-
-/* flag which defines if the shared area address should be
- reconfiged (send to SEP anew) during init of the driver */
-#define SEP_DRIVER_RECONFIG_MESSAGE_AREA 0
-
-/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
-#define SEP_DRIVER_ARM_DEBUG_MODE 0
-
-/*-------------------------------------------
- INTERNAL DATA CONFIGURATION
- -------------------------------------------*/
-
-/* flag for the input array */
-#define SEP_DRIVER_IN_FLAG 0
-
-/* flag for output array */
-#define SEP_DRIVER_OUT_FLAG 1
-
-/* maximum number of entries in one LLI tables */
-#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP 8
-
-
-/*--------------------------------------------------------
- SHARED AREA memory total size is 36K
- it is divided is following:
-
- SHARED_MESSAGE_AREA 8K }
- }
- STATIC_POOL_AREA 4K } MAPPED AREA ( 24 K)
- }
- DATA_POOL_AREA 12K }
-
- SYNCHRONIC_DMA_TABLES_AREA 5K
-
- FLOW_DMA_TABLES_AREA 4K
-
- SYSTEM_MEMORY_AREA 3k
-
- SYSTEM_MEMORY total size is 3k
- it is divided as following:
-
- TIME_MEMORY_AREA 8B
------------------------------------------------------------*/
-
-
-
-/*
- the maximum length of the message - the rest of the message shared
- area will be dedicated to the dma lli tables
-*/
-#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES (8 * 1024)
-
-/* the size of the message shared area in pages */
-#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES (8 * 1024)
-
-/* the size of the data pool static area in pages */
-#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES (4 * 1024)
-
-/* the size of the data pool shared area size in pages */
-#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (12 * 1024)
-
-/* the size of the message shared area in pages */
-#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 5)
-
-
-/* the size of the data pool shared area size in pages */
-#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
-
-/* system data (time, caller id etc') pool */
-#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES 100
-
-
-/* area size that is mapped - we map the MESSAGE AREA, STATIC POOL and
- DATA POOL areas. area must be module 4k */
-#define SEP_DRIVER_MMMAP_AREA_SIZE (1024 * 24)
-
-
-/*-----------------------------------------------
- offsets of the areas starting from the shared area start address
-*/
-
-/* message area offset */
-#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES 0
-
-/* static pool area offset */
-#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
-
-/* data pool area offset */
-#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
-
-/* synhronic dma tables area offset */
-#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
-
-/* sep driver flow dma tables area offset */
-#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES)
-
-/* system memory offset in bytes */
-#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
- (SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES)
-
-/* offset of the time area */
-#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
- (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
-
-
-
-/* start physical address of the SEP registers memory in HOST */
-#define SEP_IO_MEM_REGION_START_ADDRESS 0x80000000
-
-/* size of the SEP registers memory region in HOST (for now 100 registers) */
-#define SEP_IO_MEM_REGION_SIZE (2 * 0x100000)
-
-/* define the number of IRQ for SEP interrupts */
-#define SEP_DIRVER_IRQ_NUM 1
-
-/* maximum number of add buffers */
-#define SEP_MAX_NUM_ADD_BUFFERS 100
-
-/* number of flows */
-#define SEP_DRIVER_NUM_FLOWS 4
-
-/* maximum number of entries in flow table */
-#define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE 25
-
-/* offset of the num entries in the block length entry of the LLI */
-#define SEP_NUM_ENTRIES_OFFSET_IN_BITS 24
-
-/* offset of the interrupt flag in the block length entry of the LLI */
-#define SEP_INT_FLAG_OFFSET_IN_BITS 31
-
-/* mask for extracting data size from LLI */
-#define SEP_TABLE_DATA_SIZE_MASK 0xFFFFFF
-
-/* mask for entries after being shifted left */
-#define SEP_NUM_ENTRIES_MASK 0x7F
-
-/* default flow id */
-#define SEP_FREE_FLOW_ID 0xFFFFFFFF
-
-/* temp flow id used during cretiong of new flow until receiving
- real flow id from sep */
-#define SEP_TEMP_FLOW_ID (SEP_DRIVER_NUM_FLOWS + 1)
-
-/* maximum add buffers message length in bytes */
-#define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES (7 * 4)
-
-/* maximum number of concurrent virtual buffers */
-#define SEP_MAX_VIRT_BUFFERS_CONCURRENT 100
-
-/* the token that defines the start of time address */
-#define SEP_TIME_VAL_TOKEN 0x12345678
-
-/* DEBUG LEVEL MASKS */
-#define SEP_DEBUG_LEVEL_BASIC 0x1
-
-#define SEP_DEBUG_LEVEL_EXTENDED 0x4
-
-
-/* Debug helpers */
-
-#define dbg(fmt, args...) \
-do {\
- if (debug & SEP_DEBUG_LEVEL_BASIC) \
- printk(KERN_DEBUG fmt, ##args); \
-} while(0);
-
-#define edbg(fmt, args...) \
-do { \
- if (debug & SEP_DEBUG_LEVEL_EXTENDED) \
- printk(KERN_DEBUG fmt, ##args); \
-} while(0);
-
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
deleted file mode 100644
index ea6abd8a14b..00000000000
--- a/drivers/staging/sep/sep_driver_hw_defs.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- *
- * sep_driver_hw_defs.h - Security Processor Driver hardware definitions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#ifndef SEP_DRIVER_HW_DEFS__H
-#define SEP_DRIVER_HW_DEFS__H
-
-/*--------------------------------------------------------------------------*/
-/* Abstract: HW Registers Defines. */
-/* */
-/* Note: This file was automatically created !!! */
-/* DO NOT EDIT THIS FILE !!! */
-/*--------------------------------------------------------------------------*/
-
-
-/* cf registers */
-#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
-#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
-#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
-#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
-#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
-#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
-#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
-#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
-#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
-#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
-#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
-#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
-#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
-#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
-#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
-#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
-#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
-#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
-#define HW_R3B_REG_ADDR 0x00C0UL
-#define HW_R4B_REG_ADDR 0x0100UL
-#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
-#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
-#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
-#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
-#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
-#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
-#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
-#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
-#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
-#define HW_CSA_REG_ADDR 0x0140UL
-#define HW_SINB_REG_ADDR 0x0180UL
-#define HW_SOUTB_REG_ADDR 0x0184UL
-#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
-#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
-#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
-#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
-#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
-#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
-#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
-#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
-#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
-#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
-#define HW_PKI_CLR_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
-#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
-#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
-#define HW_DES_KEY_0_REG_ADDR 0x0208UL
-#define HW_DES_KEY_1_REG_ADDR 0x020CUL
-#define HW_DES_KEY_2_REG_ADDR 0x0210UL
-#define HW_DES_KEY_3_REG_ADDR 0x0214UL
-#define HW_DES_KEY_4_REG_ADDR 0x0218UL
-#define HW_DES_KEY_5_REG_ADDR 0x021CUL
-#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
-#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
-#define HW_DES_IV_0_REG_ADDR 0x0228UL
-#define HW_DES_IV_1_REG_ADDR 0x022CUL
-#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
-#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
-#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
-#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
-#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
-#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
-#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
-#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
-#define HW_AES_KEY_0_REG_ADDR 0x0400UL
-#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
-#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
-#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
-#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
-#define HW_AES_IV_0_REG_ADDR 0x0440UL
-#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
-#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
-#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
-#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
-#define HW_AES_CTR1_REG_ADDR 0x0460UL
-#define HW_AES_SK_REG_ADDR 0x0478UL
-#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
-#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
-#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
-#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
-#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
-#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
-#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
-#define HW_HASH_H0_REG_ADDR 0x0640UL
-#define HW_HASH_H1_REG_ADDR 0x0644UL
-#define HW_HASH_H2_REG_ADDR 0x0648UL
-#define HW_HASH_H3_REG_ADDR 0x064CUL
-#define HW_HASH_H4_REG_ADDR 0x0650UL
-#define HW_HASH_H5_REG_ADDR 0x0654UL
-#define HW_HASH_H6_REG_ADDR 0x0658UL
-#define HW_HASH_H7_REG_ADDR 0x065CUL
-#define HW_HASH_H8_REG_ADDR 0x0660UL
-#define HW_HASH_H9_REG_ADDR 0x0664UL
-#define HW_HASH_H10_REG_ADDR 0x0668UL
-#define HW_HASH_H11_REG_ADDR 0x066CUL
-#define HW_HASH_H12_REG_ADDR 0x0670UL
-#define HW_HASH_H13_REG_ADDR 0x0674UL
-#define HW_HASH_H14_REG_ADDR 0x0678UL
-#define HW_HASH_H15_REG_ADDR 0x067CUL
-#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
-#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
-#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
-#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
-#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
-#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
-#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
-#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
-#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
-#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
-#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
-#define HW_HASH_DATA_REG_ADDR 0x07ECUL
-#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
-#define HW_DRNG_VALID_REG_ADDR 0x0804UL
-#define HW_DRNG_DATA_REG_ADDR 0x0808UL
-#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
-#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
-#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
-#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
-#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
-#define HW_CLK_STATUS_REG_ADDR 0x0824UL
-#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
-#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
-#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
-#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
-#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
-#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
-#define HW_AES_BUSY_REG_ADDR 0x0914UL
-#define HW_DES_BUSY_REG_ADDR 0x0918UL
-#define HW_HASH_BUSY_REG_ADDR 0x091CUL
-#define HW_CONTENT_REG_ADDR 0x0924UL
-#define HW_VERSION_REG_ADDR 0x0928UL
-#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
-#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
-#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
-#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
-#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
-#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
-#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
-#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
-#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
-#define HW_OLD_DATA_REG_ADDR 0x0C48UL
-#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
-#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
-#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
-#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
-#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
-#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
-#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
-#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
-#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
-#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
-#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
-#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
-#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
-#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
-#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
-#define HW_HOST_IRR_REG_ADDR 0x0A00UL
-#define HW_HOST_IMR_REG_ADDR 0x0A04UL
-#define HW_HOST_ICR_REG_ADDR 0x0A08UL
-#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
-#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
-#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
-#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
-#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
-#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
-#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
-#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
-#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
-#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
-#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
-#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
-#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
-#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
-#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
-#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
-#define HW_HOST_SEP_HOST_GPR3_REG_ADDR 0x0B0CUL
-#define HW_HOST_HOST_SEP_GPR0_REG_ADDR 0x0B80UL
-#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
-#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
-#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
-#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL
-#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
-#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
-#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
-
-#endif /* ifndef HW_DEFS */
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig
index 5e2ffefb60a..d231ae27299 100644
--- a/drivers/staging/spectra/Kconfig
+++ b/drivers/staging/spectra/Kconfig
@@ -2,6 +2,7 @@
menuconfig SPECTRA
tristate "Denali Spectra Flash Translation Layer"
depends on BLOCK
+ depends on X86_MRST
default n
---help---
Enable the FTL pseudo-filesystem used with the NAND Flash
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
index d0c5c97eda3..fa21a0fd8e8 100644
--- a/drivers/staging/spectra/ffsport.c
+++ b/drivers/staging/spectra/ffsport.c
@@ -27,6 +27,8 @@
#include <linux/kthread.h>
#include <linux/log2.h>
#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
/**** Helper functions used for Div, Remainder operation on u64 ****/
@@ -113,7 +115,6 @@ u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
#define GLOB_SBD_NAME "nd"
#define GLOB_SBD_IRQ_NUM (29)
-#define GLOB_VERSION "driver version 20091110"
#define GLOB_SBD_IOCTL_GC (0x7701)
#define GLOB_SBD_IOCTL_WL (0x7702)
@@ -272,13 +273,6 @@ static int get_res_blk_num_os(void)
return res_blks;
}
-static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
-{
- rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
- /* rq->timeout = 5 * HZ; */
- rq->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
/* Transfer a full request. */
static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
{
@@ -296,8 +290,7 @@ static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
IdentifyDeviceData.PagesPerBlock *
res_blks_os;
- if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
- req->cmd[0] == REQ_LB_OP_FLUSH) {
+ if (req->cmd_type & REQ_FLUSH) {
if (force_flush_cache()) /* Fail to flush cache */
return -EIO;
else
@@ -597,11 +590,23 @@ int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
return -ENOTTY;
}
+int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static struct block_device_operations GLOB_SBD_ops = {
.owner = THIS_MODULE,
.open = GLOB_SBD_open,
.release = GLOB_SBD_release,
- .locked_ioctl = GLOB_SBD_ioctl,
+ .ioctl = GLOB_SBD_unlocked_ioctl,
.getgeo = GLOB_SBD_getgeo,
};
@@ -650,8 +655,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
/* Here we force report 512 byte hardware sector size to Kernel */
blk_queue_logical_block_size(dev->queue, 512);
- blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
- SBD_prepare_flush);
+ blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH);
dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
if (IS_ERR(dev->thread)) {
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
index 134aa5166a8..9b5218b6ada 100644
--- a/drivers/staging/spectra/flash.c
+++ b/drivers/staging/spectra/flash.c
@@ -61,7 +61,6 @@ static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
u8 cache_blk, u16 flag);
static int FTL_Cache_Write(void);
-static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
static void FTL_Calculate_LRU(void);
static u32 FTL_Get_Block_Index(u32 wBlockNum);
@@ -86,8 +85,6 @@ static u32 FTL_Replace_MWBlock(void);
static int FTL_Replace_Block(u64 blk_addr);
static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
-static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
-
struct device_info_tag DeviceInfo;
struct flash_cache_tag Cache;
static struct spectra_l2_cache_info cache_l2;
@@ -775,7 +772,7 @@ static void dump_cache_l2_table(void)
{
struct list_head *p;
struct spectra_l2_cache_list *pnd;
- int n, i;
+ int n;
n = 0;
list_for_each(p, &cache_l2.table.list) {
@@ -1538,79 +1535,6 @@ static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Update_Block
-* Inputs: pointer to buffer,page address,block address
-* Outputs: PASS=0 / FAIL=1
-* Description: It updates the cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Update_Block(u8 *pData,
- u64 old_page_addr, u64 blk_addr)
-{
- int i, j;
- u8 *buf = pData;
- int wResult = PASS;
- int wFoundInCache;
- u64 page_addr;
- u64 addr;
- u64 old_blk_addr;
- u16 page_offset;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- old_blk_addr = (u64)(old_page_addr >>
- DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
- page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
- DeviceInfo.nBitsInPageDataSize);
-
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
- page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
- if (i != page_offset) {
- wFoundInCache = FAIL;
- for (j = 0; j < CACHE_ITEM_NUM; j++) {
- addr = Cache.array[j].address;
- addr = FTL_Get_Physical_Block_Addr(addr) +
- GLOB_u64_Remainder(addr, 2);
- if ((addr >= page_addr) && addr <
- (page_addr + Cache.cache_item_size)) {
- wFoundInCache = PASS;
- buf = Cache.array[j].buf;
- Cache.array[j].changed = SET;
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = j;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[j].address;
- int_cache[ftl_cmd_cnt].cache.changed =
- Cache.array[j].changed;
-#endif
-#endif
- break;
- }
- }
- if (FAIL == wFoundInCache) {
- if (ERR == FTL_Cache_Read_All(g_pTempBuf,
- page_addr)) {
- wResult = FAIL;
- break;
- }
- buf = g_pTempBuf;
- }
- } else {
- buf = pData;
- }
-
- if (FAIL == FTL_Cache_Write_All(buf,
- blk_addr + (page_addr - old_blk_addr))) {
- wResult = FAIL;
- break;
- }
- }
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: FTL_Copy_Block
* Inputs: source block address
* Destination block address
@@ -1698,7 +1622,7 @@ static int get_l2_cache_blks(void)
static int erase_l2_cache_blocks(void)
{
int i, ret = PASS;
- u32 pblk, lblk;
+ u32 pblk, lblk = BAD_BLOCK;
u64 addr;
u32 *pbt = (u32 *)g_pBlockTable;
@@ -2004,87 +1928,6 @@ static int search_l2_cache(u8 *buf, u64 logical_addr)
return ret;
}
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write_Back
-* Inputs: pointer to data cached in sys memory
-* address of free block in flash
-* Outputs: PASS=0 / FAIL=1
-* Description: writes all the pages of Cache Block to flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
-{
- int i, j, iErase;
- u64 old_page_addr, addr, phy_addr;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 lba;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
- GLOB_u64_Remainder(blk_addr, 2);
-
- iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
-
- pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
-
-#if CMD_DMA
- p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
- DeviceInfo.nBitsInBlockDataSize);
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- for (i = 0; i < RETRY_TIMES; i++) {
- if (PASS == iErase) {
- phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
- if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
- lba = BLK_FROM_ADDR(blk_addr);
- MARK_BLOCK_AS_BAD(pbt[lba]);
- i = RETRY_TIMES;
- break;
- }
- }
-
- for (j = 0; j < CACHE_ITEM_NUM; j++) {
- addr = Cache.array[j].address;
- if ((addr <= blk_addr) &&
- ((addr + Cache.cache_item_size) > blk_addr))
- cache_block_to_write = j;
- }
-
- phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
- if (PASS == FTL_Cache_Update_Block(pData,
- old_page_addr, phy_addr)) {
- cache_block_to_write = UNHIT_CACHE_ITEM;
- break;
- } else {
- iErase = PASS;
- }
- }
-
- if (i >= RETRY_TIMES) {
- if (ERR == FTL_Flash_Error_Handle(pData,
- old_page_addr, blk_addr))
- return ERR;
- else
- return FAIL;
- }
-
- return PASS;
-}
-
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: FTL_Cache_Write_Page
* Inputs: Pointer to buffer, page address, cache block number
@@ -2370,159 +2213,6 @@ static int FTL_Write_Block_Table(int wForce)
return 1;
}
-/******************************************************************
-* Function: GLOB_FTL_Flash_Format
-* Inputs: none
-* Outputs: PASS
-* Description: The block table stores bad block info, including MDF+
-* blocks gone bad over the ages. Therefore, if we have a
-* block table in place, then use it to scan for bad blocks
-* If not, then scan for MDF.
-* Now, a block table will only be found if spectra was already
-* being used. For a fresh flash, we'll go thru scanning for
-* MDF. If spectra was being used, then there is a chance that
-* the MDF has been corrupted. Spectra avoids writing to the
-* first 2 bytes of the spare area to all pages in a block. This
-* covers all known flash devices. However, since flash
-* manufacturers have no standard of where the MDF is stored,
-* this cannot guarantee that the MDF is protected for future
-* devices too. The initial scanning for the block table assures
-* this. It is ok even if the block table is outdated, as all
-* we're looking for are bad block markers.
-* Use this when mounting a file system or starting a
-* new flash.
-*
-*********************************************************************/
-static int FTL_Format_Flash(u8 valid_block_table)
-{
- u32 i, j;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 tempNode;
- int ret;
-
-#if CMD_DMA
- u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
- if (ftl_cmd_cnt)
- return FAIL;
-#endif
-
- if (FAIL == FTL_Check_Block_Table(FAIL))
- valid_block_table = 0;
-
- if (valid_block_table) {
- u8 switched = 1;
- u32 block, k;
-
- k = DeviceInfo.wSpectraStartBlock;
- while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
- switched = 0;
- k++;
- for (j = DeviceInfo.wSpectraStartBlock, i = 0;
- j <= DeviceInfo.wSpectraEndBlock;
- j++, i++) {
- block = (pbt[i] & ~BAD_BLOCK) -
- DeviceInfo.wSpectraStartBlock;
- if (block != i) {
- switched = 1;
- tempNode = pbt[i];
- pbt[i] = pbt[block];
- pbt[block] = tempNode;
- }
- }
- }
- if ((k == DeviceInfo.wSpectraEndBlock) && switched)
- valid_block_table = 0;
- }
-
- if (!valid_block_table) {
- memset(g_pBlockTable, 0,
- DeviceInfo.wDataBlockNum * sizeof(u32));
- memset(g_pWearCounter, 0,
- DeviceInfo.wDataBlockNum * sizeof(u8));
- if (DeviceInfo.MLCDevice)
- memset(g_pReadCounter, 0,
- DeviceInfo.wDataBlockNum * sizeof(u16));
-#if CMD_DMA
- memset(g_pBTStartingCopy, 0,
- DeviceInfo.wDataBlockNum * sizeof(u32));
- memset(g_pWearCounterCopy, 0,
- DeviceInfo.wDataBlockNum * sizeof(u8));
- if (DeviceInfo.MLCDevice)
- memset(g_pReadCounterCopy, 0,
- DeviceInfo.wDataBlockNum * sizeof(u16));
-#endif
- for (j = DeviceInfo.wSpectraStartBlock, i = 0;
- j <= DeviceInfo.wSpectraEndBlock;
- j++, i++) {
- if (GLOB_LLD_Get_Bad_Block((u32)j))
- pbt[i] = (u32)(BAD_BLOCK | j);
- }
- }
-
- nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
-
- for (j = DeviceInfo.wSpectraStartBlock, i = 0;
- j <= DeviceInfo.wSpectraEndBlock;
- j++, i++) {
- if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
- ret = GLOB_LLD_Erase_Block(j);
- if (FAIL == ret) {
- pbt[i] = (u32)(j);
- MARK_BLOCK_AS_BAD(pbt[i]);
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, (int)j);
- } else {
- pbt[i] = (u32)(SPARE_BLOCK | j);
- }
- }
-#if CMD_DMA
- pbtStartingCopy[i] = pbt[i];
-#endif
- }
-
- g_wBlockTableOffset = 0;
- for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock))
- && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
- ;
- if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
- printk(KERN_ERR "All blocks bad!\n");
- return FAIL;
- } else {
- g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
- if (i != BLOCK_TABLE_INDEX) {
- tempNode = pbt[i];
- pbt[i] = pbt[BLOCK_TABLE_INDEX];
- pbt[BLOCK_TABLE_INDEX] = tempNode;
- }
- }
- pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
-
-#if CMD_DMA
- pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
-#endif
-
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- memset(g_pBTBlocks, 0xFF,
- (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
- g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
- FTL_Write_Block_Table(FAIL);
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- Cache.array[i].address = NAND_CACHE_INIT_ADDR;
- Cache.array[i].use_cnt = 0;
- Cache.array[i].changed = CLEAR;
- }
-
-#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
- memcpy((void *)&cache_start_copy, (void *)&Cache,
- sizeof(struct flash_cache_tag));
-#endif
- return PASS;
-}
-
static int force_format_nand(void)
{
u32 i;
@@ -3031,112 +2721,6 @@ static int FTL_Read_Block_Table(void)
return wResult;
}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Flash_Error_Handle
-* Inputs: Pointer to data
-* Page address
-* Block address
-* Outputs: PASS=0 / FAIL=1
-* Description: It handles any error occured during Spectra operation
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
- u64 blk_addr)
-{
- u32 i;
- int j;
- u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
- u64 phy_addr;
- int wErase = FAIL;
- int wResult = FAIL;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (ERR == GLOB_FTL_Garbage_Collection())
- return ERR;
-
- do {
- for (i = DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock;
- i > 0; i--) {
- if (IS_SPARE_BLOCK(i)) {
- tmp_node = (u32)(BAD_BLOCK |
- pbt[blk_node]);
- pbt[blk_node] = (u32)(pbt[i] &
- (~SPARE_BLOCK));
- pbt[i] = tmp_node;
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index =
- blk_node;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[blk_node];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = i;
- p_BTableChangesDelta->BT_Entry_Value = pbt[i];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- wResult = PASS;
- break;
- }
- }
-
- if (FAIL == wResult) {
- if (FAIL == GLOB_FTL_Garbage_Collection())
- break;
- else
- continue;
- }
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-
- for (j = 0; j < RETRY_TIMES; j++) {
- if (PASS == wErase) {
- if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
- MARK_BLOCK_AS_BAD(pbt[blk_node]);
- break;
- }
- }
- if (PASS == FTL_Cache_Update_Block(pData,
- old_page_addr,
- phy_addr)) {
- wResult = PASS;
- break;
- } else {
- wResult = FAIL;
- wErase = PASS;
- }
- }
- } while (FAIL == wResult);
-
- FTL_Write_Block_Table(FAIL);
-
- return wResult;
-}
-
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: FTL_Get_Page_Num
* Inputs: Size in bytes
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 368c30a9d5f..4af83d5318f 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -219,6 +219,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
return -ENOENT;
params.key_len = len;
params.key = wlandev->wep_keys[key_index];
+ params.seq_len = 0;
callback(cookie, &params);
@@ -735,6 +736,8 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
priv->band.n_channels = ARRAY_SIZE(prism2_channels);
priv->band.bitrates = priv->rates;
priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
+ priv->band.band = IEEE80211_BAND_2GHZ;
+ priv->band.ht_cap.ht_supported = false;
wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
set_wiphy_dev(wiphy, dev);
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 77d4d715a78..722c840ac63 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -769,6 +769,7 @@ static int __init zram_init(void)
free_devices:
while (dev_id)
destroy_device(&devices[--dev_id]);
+ kfree(devices);
unregister:
unregister_blkdev(zram_major, "zram");
out:
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 1c320bfa6fb..f383cb42b1d 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1127,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
{
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
int ret;
/* instance init */
@@ -1171,15 +1172,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
goto fail;
}
- usb_fill_int_urb(instance->rcv_urb,
+ if (!cmd_ep) {
+ dbg("cxacru_bind: no command endpoint");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_INT) {
+ usb_fill_int_urb(instance->rcv_urb,
usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
instance->rcv_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->rcv_done, 1);
- usb_fill_int_urb(instance->snd_urb,
+ usb_fill_int_urb(instance->snd_urb,
usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
instance->snd_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->snd_done, 4);
+ } else {
+ usb_fill_bulk_urb(instance->rcv_urb,
+ usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
+ instance->rcv_buf, PAGE_SIZE,
+ cxacru_blocking_completion, &instance->rcv_done);
+
+ usb_fill_bulk_urb(instance->snd_urb,
+ usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
+ instance->snd_buf, PAGE_SIZE,
+ cxacru_blocking_completion, &instance->snd_done);
+ }
mutex_init(&instance->cm_serialize);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 1833b3a7151..bc62fae0680 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -965,7 +965,8 @@ static int acm_probe(struct usb_interface *intf,
}
if (!buflen) {
- if (intf->cur_altsetting->endpoint->extralen &&
+ if (intf->cur_altsetting->endpoint &&
+ intf->cur_altsetting->endpoint->extralen &&
intf->cur_altsetting->endpoint->extra) {
dev_dbg(&intf->dev,
"Seeking extra descriptors on endpoint\n");
@@ -1481,6 +1482,11 @@ static int acm_reset_resume(struct usb_interface *intf)
USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
USB_CDC_ACM_PROTO_VENDOR)
+#define SAMSUNG_PCSUITE_ACM_INFO(x) \
+ USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
+ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
+ USB_CDC_ACM_PROTO_VENDOR)
+
/*
* USB driver structure.
*/
@@ -1591,6 +1597,17 @@ static const struct usb_device_id acm_ids[] = {
{ NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
{ NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
{ NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
+ { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
+ { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
+ { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
+ { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
+ { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
+ { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
+ { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+ { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
@@ -1599,6 +1616,10 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = NOT_A_MODEM,
},
+ /* control interfaces without any protocol set */
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ USB_CDC_PROTO_NONE) },
+
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER) },
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index fd4c36ea5e4..844683e5038 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1724,6 +1724,15 @@ free_interfaces:
if (ret)
goto free_interfaces;
+ /* if it's already configured, clear out old state first.
+ * getting rid of old interfaces means unbinding their drivers.
+ */
+ if (dev->state != USB_STATE_ADDRESS)
+ usb_disable_device(dev, 1); /* Skip ep0 */
+
+ /* Get rid of pending async Set-Config requests for this device */
+ cancel_async_set_config(dev);
+
/* Make sure we have bandwidth (and available HCD resources) for this
* configuration. Remove endpoints from the schedule if we're dropping
* this configuration to set configuration 0. After this point, the
@@ -1733,20 +1742,11 @@ free_interfaces:
mutex_lock(&hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
- usb_autosuspend_device(dev);
mutex_unlock(&hcd->bandwidth_mutex);
+ usb_autosuspend_device(dev);
goto free_interfaces;
}
- /* if it's already configured, clear out old state first.
- * getting rid of old interfaces means unbinding their drivers.
- */
- if (dev->state != USB_STATE_ADDRESS)
- usb_disable_device(dev, 1); /* Skip ep0 */
-
- /* Get rid of pending async Set-Config requests for this device */
- cancel_async_set_config(dev);
-
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1761,8 +1761,8 @@ free_interfaces:
if (!cp) {
usb_set_device_state(dev, USB_STATE_ADDRESS);
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
- usb_autosuspend_device(dev);
mutex_unlock(&hcd->bandwidth_mutex);
+ usb_autosuspend_device(dev);
goto free_interfaces;
}
mutex_unlock(&hcd->bandwidth_mutex);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index e483f80822d..1160c55de7f 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -723,12 +723,12 @@ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
/**
* usb_string_ids_n() - allocate unused string IDs in batch
- * @cdev: the device whose string descriptor IDs are being allocated
+ * @c: the device whose string descriptor IDs are being allocated
* @n: number of string IDs to allocate
* Context: single threaded during gadget setup
*
* Returns the first requested ID. This ID and next @n-1 IDs are now
- * valid IDs. At least providind that @n is non zore because if it
+ * valid IDs. At least provided that @n is non-zero because if it
* is, returns last requested ID which is now very useful information.
*
* @usb_string_ids_n() is called from bind() callbacks to allocate
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 166bf71fd34..e03058fe23c 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1609,6 +1609,7 @@ static int __init m66592_probe(struct platform_device *pdev)
/* initialize ucd */
m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
if (m66592 == NULL) {
+ ret = -ENOMEM;
pr_err("kzalloc error\n");
goto clean_up;
}
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 70a81784275..2456ccd9965 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1557,6 +1557,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
/* initialize ucd */
r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
if (r8a66597 == NULL) {
+ ret = -ENOMEM;
printk(KERN_ERR "kzalloc error\n");
goto clean_up;
}
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 020fa5a25fd..972d5ddd1e1 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -293,9 +293,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_VENDOR_DESCRIPTION:
pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
- length = strlen (rndis_per_dev_params [configNr].vendorDescr);
- memcpy (outbuf,
- rndis_per_dev_params [configNr].vendorDescr, length);
+ if ( rndis_per_dev_params [configNr].vendorDescr ) {
+ length = strlen (rndis_per_dev_params [configNr].vendorDescr);
+ memcpy (outbuf,
+ rndis_per_dev_params [configNr].vendorDescr, length);
+ } else {
+ outbuf[0] = 0;
+ }
retval = 0;
break;
@@ -1148,7 +1152,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
-int __init rndis_init (void)
+int rndis_init(void)
{
u8 i;
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index c236aaa9dcd..907c3300811 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -262,7 +262,7 @@ int rndis_signal_disconnect (int configNr);
int rndis_state (int configNr);
extern void rndis_set_host_mac (int configNr, const u8 *addr);
-int __devinit rndis_init (void);
+int rndis_init(void);
void rndis_exit (void);
#endif /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 521ebed0118..a229744a8c7 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -12,8 +12,6 @@
* published by the Free Software Foundation.
*/
-#define DEBUG
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 2dcffdac86d..5e807f083bc 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -94,7 +94,7 @@ uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
break;
}
- if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) {
+ if (i == ARRAY_SIZE(uvc_formats)) {
printk(KERN_INFO "Unsupported format 0x%08x.\n",
fmt->fmt.pix.pixelformat);
return -EINVAL;
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 335ee699fd8..ba52be47302 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
}
rv = usb_add_hcd(hcd, irq, 0);
- if (rv == 0)
- return 0;
+ if (rv)
+ goto err_ehci;
+
+ return 0;
+err_ehci:
+ if (ehci->has_amcc_usb23)
+ iounmap(ehci->ohci_hcctrl_reg);
iounmap(hcd->regs);
err_ioremap:
irq_dispose_mapping(irq);
err_irq:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
- if (ehci->has_amcc_usb23)
- iounmap(ehci->ohci_hcctrl_reg);
err_rmr:
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index d1a3dfc9a40..bdba8c5d844 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -829,6 +829,7 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
* almost immediately. With ISP1761, this register requires a delay of
* 195ns between a write and subsequent read (see section 15.1.1.3).
*/
+ mmiowb();
ndelay(195);
skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
@@ -870,6 +871,7 @@ static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
* almost immediately. With ISP1761, this register requires a delay of
* 195ns between a write and subsequent read (see section 15.1.1.3).
*/
+ mmiowb();
ndelay(195);
skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bc3f4f42706..48e60d166ff 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci,
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
- *trb = (*trb)++;
+ (*trb)++;
}
}
@@ -1551,6 +1551,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* calc actual length */
if (ep->skip) {
td->urb->iso_frame_desc[idx].actual_length = 0;
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring, false);
return finish_td(xhci, td, event_trb, event, ep, status, true);
}
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index d240de097c6..801324af947 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -439,7 +439,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
/* drain secondary buffer */
int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary;
i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount);
- if (i < 0) {
+ if (i) {
retval = -EFAULT;
goto exit;
}
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 2de49c8887c..bc88c79875a 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -542,7 +542,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
retval = io_res;
else {
io_res = copy_to_user(user_buffer, buffer, dev->report_size);
- if (io_res < 0)
+ if (io_res)
retval = -EFAULT;
}
break;
@@ -574,7 +574,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
}
io_res = copy_to_user((struct iowarrior_info __user *)arg, &info,
sizeof(struct iowarrior_info));
- if (io_res < 0)
+ if (io_res)
retval = -EFAULT;
break;
}
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 0e8888588d4..05aaac1c386 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -550,6 +550,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
struct twl4030_usb_data *pdata = pdev->dev.platform_data;
struct twl4030_usb *twl;
int status, err;
+ u8 pwr;
if (!pdata) {
dev_dbg(&pdev->dev, "platform_data not available\n");
@@ -568,7 +569,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
twl->otg.set_peripheral = twl4030_set_peripheral;
twl->otg.set_suspend = twl4030_set_suspend;
twl->usb_mode = pdata->usb_mode;
- twl->asleep = 1;
+
+ pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+
+ twl->asleep = (pwr & PHY_PWR_PHYPWD);
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 2bef4415c19..4f1744c5871 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,7 @@ static int debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
@@ -88,6 +89,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
+ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
@@ -109,6 +111,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
@@ -122,14 +125,14 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
- { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
- { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
- { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
- { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
{ USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
@@ -222,8 +225,8 @@ static struct usb_serial_driver cp210x_device = {
#define BITS_STOP_2 0x0002
/* CP210X_SET_BREAK */
-#define BREAK_ON 0x0000
-#define BREAK_OFF 0x0001
+#define BREAK_ON 0x0001
+#define BREAK_OFF 0x0000
/* CP210X_(SET_MHS|GET_MDMSTS) */
#define CONTROL_DTR 0x0001
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index eb12d9b096b..97cc87d654c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -180,6 +180,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+ { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -750,6 +751,16 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
+ { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -1376,7 +1387,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
}
/* set max packet size based on descriptor */
- priv->max_packet_size = ep_desc->wMaxPacketSize;
+ priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
}
@@ -1831,7 +1842,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
- if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+ if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(tty, *ch, flag);
}
} else {
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6e612c52e76..15a4583775a 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -110,6 +110,9 @@
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+/* Lenz LI-USB Computer Interface. */
+#define FTDI_LENZ_LIUSB_PID 0xD780
+
/*
* Xsens Technologies BV products (http://www.xsens.com).
*/
@@ -132,6 +135,18 @@
#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
/*
+ * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
+ */
+#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8
+#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9
+#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA
+#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
+#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC
+#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD
+#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE
+#define FTDI_CHAMSYS_WING_PID 0xDAFF
+
+/*
* Westrex International devices submitted by Cory Lee
*/
#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
@@ -989,6 +1004,12 @@
#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
/*
+ * Ionics PlugComputer
+ */
+#define IONICS_VID 0x1c0c
+#define IONICS_PLUGCOMPUTER_PID 0x0102
+
+/*
* Dresden Elektronik Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index ca92f67747c..e6833e216fc 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -343,7 +343,7 @@ void usb_serial_generic_process_read_urb(struct urb *urb)
tty_insert_flip_string(tty, ch, urb->actual_length);
else {
for (i = 0; i < urb->actual_length; i++, ch++) {
- if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+ if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(tty, *ch, TTY_NORMAL);
}
}
@@ -448,12 +448,11 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
#ifdef CONFIG_MAGIC_SYSRQ
-int usb_serial_handle_sysrq_char(struct tty_struct *tty,
- struct usb_serial_port *port, unsigned int ch)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
{
if (port->sysrq && port->port.console) {
if (ch && time_before(jiffies, port->sysrq)) {
- handle_sysrq(ch, tty);
+ handle_sysrq(ch);
port->sysrq = 0;
return 1;
}
@@ -462,8 +461,7 @@ int usb_serial_handle_sysrq_char(struct tty_struct *tty,
return 0;
}
#else
-int usb_serial_handle_sysrq_char(struct tty_struct *tty,
- struct usb_serial_port *port, unsigned int ch)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
{
return 0;
}
@@ -518,6 +516,7 @@ void usb_serial_generic_disconnect(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i)
generic_cleanup(serial->port[i]);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_disconnect);
void usb_serial_generic_release(struct usb_serial *serial)
{
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index dc47f986df5..a7cfc595293 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial)
/* Check if we have an old version in the I2C and
update if necessary */
- if (download_cur_ver != download_new_ver) {
+ if (download_cur_ver < download_new_ver) {
dbg("%s - Update I2C dld from %d.%d to %d.%d",
__func__,
firmware_version->Ver_Major,
@@ -1284,7 +1284,7 @@ static int download_fw(struct edgeport_serial *serial)
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
- return status;
+ return -EINVAL;
}
/* Update I2C with type 0xf2 record with correct
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 585b7e66374..1c9b6e9b238 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -119,16 +119,20 @@
* by making a change here, in moschip_port_id_table, and in
* moschip_id_table_combined
*/
-#define USB_VENDOR_ID_BANDB 0x0856
-#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
-#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
-#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
-#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
-#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
-#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
-#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
-#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
-#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
+#define USB_VENDOR_ID_BANDB 0x0856
+#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
+#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00
+#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
+#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01
+#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
+#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
+#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
+#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
+#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
+#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
+#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
+#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
+#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
/* This driver also supports
* ATEN UC2324 device using Moschip MCS7840
@@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index a6b207c8491..1f00f243c26 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -25,6 +25,7 @@ static int debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
+ { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9fc6ea2c681..c46911af282 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -164,6 +164,14 @@ static void option_instat_callback(struct urb *urb);
#define YISO_VENDOR_ID 0x0EAB
#define YISO_PRODUCT_U893 0xC893
+/*
+ * NOVATEL WIRELESS PRODUCTS
+ *
+ * Note from Novatel Wireless:
+ * If your Novatel modem does not work on linux, don't
+ * change the option module, but check our website. If
+ * that does not help, contact ddeschepper@nvtl.com
+*/
/* MERLIN EVDO PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_V640 0x1100
#define NOVATELWIRELESS_PRODUCT_V620 0x1110
@@ -185,24 +193,39 @@ static void option_instat_callback(struct urb *urb);
#define NOVATELWIRELESS_PRODUCT_EU730 0x2400
#define NOVATELWIRELESS_PRODUCT_EU740 0x2410
#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420
-
/* OVATION PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
-#define NOVATELWIRELESS_PRODUCT_U727 0x5010
-#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
-#define NOVATELWIRELESS_PRODUCT_MC760 0x6000
+/*
+ * Note from Novatel Wireless:
+ * All PID in the 5xxx range are currently reserved for
+ * auto-install CDROMs, and should not be added to this
+ * module.
+ *
+ * #define NOVATELWIRELESS_PRODUCT_U727 0x5010
+ * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
+*/
#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
-
-/* FUTURE NOVATEL PRODUCTS */
-#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
-#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
-#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
-#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000
-#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001
-#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000
-#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001
-#define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001
+#define NOVATELWIRELESS_PRODUCT_MC780 0x6010
+#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000
+#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001
+#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007
+#define NOVATELWIRELESS_PRODUCT_MC996D 0x7030
+#define NOVATELWIRELESS_PRODUCT_MF3470 0x7041
+#define NOVATELWIRELESS_PRODUCT_MC547 0x7042
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
+#define NOVATELWIRELESS_PRODUCT_G1 0xA001
+#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
+#define NOVATELWIRELESS_PRODUCT_G2 0xA010
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
@@ -365,6 +388,10 @@ static void option_instat_callback(struct urb *urb);
#define OLIVETTI_VENDOR_ID 0x0b3c
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
+/* Celot products */
+#define CELOT_VENDOR_ID 0x211f
+#define CELOT_PRODUCT_CT680M 0x6801
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -486,36 +513,44 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
{ USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -887,10 +922,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
-
{ USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
-
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 6b600182227..8ae4c6cbc38 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
+ { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
@@ -788,7 +789,7 @@ static void pl2303_process_read_urb(struct urb *urb)
if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
- if (!usb_serial_handle_sysrq_char(tty, port, data[i]))
+ if (!usb_serial_handle_sysrq_char(port, data[i]))
tty_insert_flip_char(tty, data[i], tty_flag);
} else {
tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a871645389d..43eb9bdad42 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -128,6 +128,10 @@
#define CRESSI_VENDOR_ID 0x04b8
#define CRESSI_EDY_PRODUCT_ID 0x0521
+/* Zeagle dive computer interface */
+#define ZEAGLE_VENDOR_ID 0x04b8
+#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522
+
/* Sony, USB data cable for CMD-Jxx mobile phones */
#define SONY_VENDOR_ID 0x054c
#define SONY_QN3USB_PRODUCT_ID 0x0437
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 6e82d4f54bc..e986002b384 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -15,6 +15,7 @@
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/serial_reg.h>
#include <linux/uaccess.h>
#define QT_OPEN_CLOSE_CHANNEL 0xca
@@ -27,36 +28,11 @@
#define QT_HW_FLOW_CONTROL_MASK 0xc5
#define QT_SW_FLOW_CONTROL_MASK 0xc6
-#define MODEM_CTL_REGISTER 0x04
-#define MODEM_STATUS_REGISTER 0x06
-
-
-#define SERIAL_LSR_OE 0x02
-#define SERIAL_LSR_PE 0x04
-#define SERIAL_LSR_FE 0x08
-#define SERIAL_LSR_BI 0x10
-
-#define SERIAL_LSR_TEMT 0x40
-
-#define SERIAL_MCR_DTR 0x01
-#define SERIAL_MCR_RTS 0x02
-#define SERIAL_MCR_LOOP 0x10
-
-#define SERIAL_MSR_CTS 0x10
-#define SERIAL_MSR_CD 0x80
-#define SERIAL_MSR_RI 0x40
-#define SERIAL_MSR_DSR 0x20
#define SERIAL_MSR_MASK 0xf0
-#define SERIAL_CRTSCTS ((SERIAL_MCR_RTS << 8) | SERIAL_MSR_CTS)
+#define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS)
-#define SERIAL_8_DATA 0x03
-#define SERIAL_7_DATA 0x02
-#define SERIAL_6_DATA 0x01
-#define SERIAL_5_DATA 0x00
-
-#define SERIAL_ODD_PARITY 0X08
-#define SERIAL_EVEN_PARITY 0X18
+#define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR)
#define MAX_BAUD_RATE 460800
@@ -70,7 +46,7 @@
#define FULLPWRBIT 0x00000080
#define NEXT_BOARD_POWER_BIT 0x00000004
-static int debug = 1;
+static int debug;
/* Version Information */
#define DRIVER_VERSION "v0.1"
@@ -99,10 +75,12 @@ static struct usb_driver ssu100_driver = {
};
struct ssu100_port_private {
+ spinlock_t status_lock;
u8 shadowLSR;
u8 shadowMSR;
wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
unsigned short max_packet_size;
+ struct async_icount icount;
};
static void ssu100_release(struct usb_serial *serial)
@@ -150,9 +128,10 @@ static inline int ssu100_getregister(struct usb_device *dev,
static inline int ssu100_setregister(struct usb_device *dev,
unsigned short uart,
+ unsigned short reg,
u16 data)
{
- u16 value = (data << 8) | MODEM_CTL_REGISTER;
+ u16 value = (data << 8) | reg;
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
QT_SET_GET_REGISTER, 0x40, value, uart,
@@ -178,11 +157,11 @@ static inline int update_mctrl(struct usb_device *dev, unsigned int set,
clear &= ~set; /* 'set' takes precedence over 'clear' */
urb_value = 0;
if (set & TIOCM_DTR)
- urb_value |= SERIAL_MCR_DTR;
+ urb_value |= UART_MCR_DTR;
if (set & TIOCM_RTS)
- urb_value |= SERIAL_MCR_RTS;
+ urb_value |= UART_MCR_RTS;
- result = ssu100_setregister(dev, 0, urb_value);
+ result = ssu100_setregister(dev, 0, UART_MCR, urb_value);
if (result < 0)
dbg("%s Error from MODEM_CTRL urb", __func__);
@@ -264,24 +243,24 @@ static void ssu100_set_termios(struct tty_struct *tty,
if (cflag & PARENB) {
if (cflag & PARODD)
- urb_value |= SERIAL_ODD_PARITY;
+ urb_value |= UART_LCR_PARITY;
else
urb_value |= SERIAL_EVEN_PARITY;
}
switch (cflag & CSIZE) {
case CS5:
- urb_value |= SERIAL_5_DATA;
+ urb_value |= UART_LCR_WLEN5;
break;
case CS6:
- urb_value |= SERIAL_6_DATA;
+ urb_value |= UART_LCR_WLEN6;
break;
case CS7:
- urb_value |= SERIAL_7_DATA;
+ urb_value |= UART_LCR_WLEN7;
break;
default:
case CS8:
- urb_value |= SERIAL_8_DATA;
+ urb_value |= UART_LCR_WLEN8;
break;
}
@@ -333,6 +312,7 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
struct ssu100_port_private *priv = usb_get_serial_port_data(port);
u8 *data;
int result;
+ unsigned long flags;
dbg("%s - port %d", __func__, port->number);
@@ -350,11 +330,10 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
return result;
}
- priv->shadowLSR = data[0] & (SERIAL_LSR_OE | SERIAL_LSR_PE |
- SERIAL_LSR_FE | SERIAL_LSR_BI);
-
- priv->shadowMSR = data[1] & (SERIAL_MSR_CTS | SERIAL_MSR_DSR |
- SERIAL_MSR_RI | SERIAL_MSR_CD);
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->shadowLSR = data[0];
+ priv->shadowMSR = data[1];
+ spin_unlock_irqrestore(&priv->status_lock, flags);
kfree(data);
@@ -398,11 +377,51 @@ static int get_serial_info(struct usb_serial_port *port,
return 0;
}
+static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ struct async_icount prev, cur;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ prev = priv->icount;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ while (1) {
+ wait_event_interruptible(priv->delta_msr_wait,
+ ((priv->icount.rng != prev.rng) ||
+ (priv->icount.dsr != prev.dsr) ||
+ (priv->icount.dcd != prev.dcd) ||
+ (priv->icount.cts != prev.cts)));
+
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ cur = priv->icount;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if ((prev.rng == cur.rng) &&
+ (prev.dsr == cur.dsr) &&
+ (prev.dcd == cur.dcd) &&
+ (prev.cts == cur.cts))
+ return -EIO;
+
+ if ((arg & TIOCM_RNG && (prev.rng != cur.rng)) ||
+ (arg & TIOCM_DSR && (prev.dsr != cur.dsr)) ||
+ (arg & TIOCM_CD && (prev.dcd != cur.dcd)) ||
+ (arg & TIOCM_CTS && (prev.cts != cur.cts)))
+ return 0;
+ }
+ return 0;
+}
+
static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ void __user *user_arg = (void __user *)arg;
dbg("%s cmd 0x%04x", __func__, cmd);
@@ -412,28 +431,28 @@ static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
(struct serial_struct __user *) arg);
case TIOCMIWAIT:
- while (priv != NULL) {
- u8 prevMSR = priv->shadowMSR & SERIAL_MSR_MASK;
- interruptible_sleep_on(&priv->delta_msr_wait);
- /* see if a signal did it */
- if (signal_pending(current))
- return -ERESTARTSYS;
- else {
- u8 diff = (priv->shadowMSR & SERIAL_MSR_MASK) ^ prevMSR;
- if (!diff)
- return -EIO; /* no change => error */
-
- /* Return 0 if caller wanted to know about
- these bits */
-
- if (((arg & TIOCM_RNG) && (diff & SERIAL_MSR_RI)) ||
- ((arg & TIOCM_DSR) && (diff & SERIAL_MSR_DSR)) ||
- ((arg & TIOCM_CD) && (diff & SERIAL_MSR_CD)) ||
- ((arg & TIOCM_CTS) && (diff & SERIAL_MSR_CTS)))
- return 0;
- }
- }
+ return wait_modem_info(port, arg);
+
+ case TIOCGICOUNT:
+ {
+ struct serial_icounter_struct icount;
+ struct async_icount cnow = priv->icount;
+ memset(&icount, 0, sizeof(icount));
+ icount.cts = cnow.cts;
+ icount.dsr = cnow.dsr;
+ icount.rng = cnow.rng;
+ icount.dcd = cnow.dcd;
+ icount.rx = cnow.rx;
+ icount.tx = cnow.tx;
+ icount.frame = cnow.frame;
+ icount.overrun = cnow.overrun;
+ icount.parity = cnow.parity;
+ icount.brk = cnow.brk;
+ icount.buf_overrun = cnow.buf_overrun;
+ if (copy_to_user(user_arg, &icount, sizeof(icount)))
+ return -EFAULT;
return 0;
+ }
default:
break;
@@ -455,6 +474,7 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port)
unsigned num_endpoints;
int i;
+ unsigned long flags;
num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -466,7 +486,9 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port)
}
/* set max packet size based on descriptor */
+ spin_lock_irqsave(&priv->status_lock, flags);
priv->max_packet_size = ep_desc->wMaxPacketSize;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
}
@@ -485,9 +507,9 @@ static int ssu100_attach(struct usb_serial *serial)
return -ENOMEM;
}
+ spin_lock_init(&priv->status_lock);
init_waitqueue_head(&priv->delta_msr_wait);
usb_set_serial_port_data(port, priv);
-
ssu100_set_max_packet_size(port);
return ssu100_initdevice(serial->dev);
@@ -506,20 +528,20 @@ static int ssu100_tiocmget(struct tty_struct *tty, struct file *file)
if (!d)
return -ENOMEM;
- r = ssu100_getregister(dev, 0, MODEM_CTL_REGISTER, d);
+ r = ssu100_getregister(dev, 0, UART_MCR, d);
if (r < 0)
goto mget_out;
- r = ssu100_getregister(dev, 0, MODEM_STATUS_REGISTER, d+1);
+ r = ssu100_getregister(dev, 0, UART_MSR, d+1);
if (r < 0)
goto mget_out;
- r = (d[0] & SERIAL_MCR_DTR ? TIOCM_DTR : 0) |
- (d[0] & SERIAL_MCR_RTS ? TIOCM_RTS : 0) |
- (d[1] & SERIAL_MSR_CTS ? TIOCM_CTS : 0) |
- (d[1] & SERIAL_MSR_CD ? TIOCM_CAR : 0) |
- (d[1] & SERIAL_MSR_RI ? TIOCM_RI : 0) |
- (d[1] & SERIAL_MSR_DSR ? TIOCM_DSR : 0);
+ r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) |
+ (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) |
+ (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) |
+ (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) |
+ (d[1] & UART_MSR_RI ? TIOCM_RI : 0) |
+ (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0);
mget_out:
kfree(d);
@@ -546,7 +568,7 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
if (!port->serial->disconnected) {
/* Disable flow control */
if (!on &&
- ssu100_setregister(dev, 0, 0) < 0)
+ ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
dev_err(&port->dev, "error from flowcontrol urb\n");
/* drop RTS and DTR */
if (on)
@@ -557,34 +579,88 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
mutex_unlock(&port->serial->disc_mutex);
}
+static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->shadowMSR = msr;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if (msr & UART_MSR_ANY_DELTA) {
+ /* update input line counters */
+ if (msr & UART_MSR_DCTS)
+ priv->icount.cts++;
+ if (msr & UART_MSR_DDSR)
+ priv->icount.dsr++;
+ if (msr & UART_MSR_DDCD)
+ priv->icount.dcd++;
+ if (msr & UART_MSR_TERI)
+ priv->icount.rng++;
+ wake_up_interruptible(&priv->delta_msr_wait);
+ }
+}
+
+static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
+ char *tty_flag)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->shadowLSR = lsr;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ *tty_flag = TTY_NORMAL;
+ if (lsr & UART_LSR_BRK_ERROR_BITS) {
+ /* we always want to update icount, but we only want to
+ * update tty_flag for one case */
+ if (lsr & UART_LSR_BI) {
+ priv->icount.brk++;
+ *tty_flag = TTY_BREAK;
+ usb_serial_handle_break(port);
+ }
+ if (lsr & UART_LSR_PE) {
+ priv->icount.parity++;
+ if (*tty_flag == TTY_NORMAL)
+ *tty_flag = TTY_PARITY;
+ }
+ if (lsr & UART_LSR_FE) {
+ priv->icount.frame++;
+ if (*tty_flag == TTY_NORMAL)
+ *tty_flag = TTY_FRAME;
+ }
+ if (lsr & UART_LSR_OE){
+ priv->icount.overrun++;
+ if (*tty_flag == TTY_NORMAL)
+ *tty_flag = TTY_OVERRUN;
+ }
+ }
+
+}
+
static int ssu100_process_packet(struct tty_struct *tty,
struct usb_serial_port *port,
struct ssu100_port_private *priv,
char *packet, int len)
{
int i;
- char flag;
+ char flag = TTY_NORMAL;
char *ch;
dbg("%s - port %d", __func__, port->number);
- if (len < 4) {
- dbg("%s - malformed packet", __func__);
- return 0;
- }
-
- if ((packet[0] == 0x1b) && (packet[1] == 0x1b) &&
+ if ((len >= 4) &&
+ (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
((packet[2] == 0x00) || (packet[2] == 0x01))) {
- if (packet[2] == 0x00)
- priv->shadowLSR = packet[3] & (SERIAL_LSR_OE |
- SERIAL_LSR_PE |
- SERIAL_LSR_FE |
- SERIAL_LSR_BI);
-
- if (packet[2] == 0x01) {
- priv->shadowMSR = packet[3];
- wake_up_interruptible(&priv->delta_msr_wait);
+ if (packet[2] == 0x00) {
+ ssu100_update_lsr(port, packet[3], &flag);
+ if (flag == TTY_OVERRUN)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
+ if (packet[2] == 0x01)
+ ssu100_update_msr(port, packet[3]);
len -= 4;
ch = packet + 4;
@@ -596,7 +672,7 @@ static int ssu100_process_packet(struct tty_struct *tty,
if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
- if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+ if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(tty, *ch, flag);
}
} else
@@ -631,7 +707,6 @@ static void ssu100_process_read_urb(struct urb *urb)
tty_kref_put(tty);
}
-
static struct usb_serial_driver ssu100_device = {
.driver = {
.owner = THIS_MODULE,
@@ -653,6 +728,7 @@ static struct usb_serial_driver ssu100_device = {
.tiocmset = ssu100_tiocmset,
.ioctl = ssu100_ioctl,
.set_termios = ssu100_set_termios,
+ .disconnect = usb_serial_generic_disconnect,
};
static int __init ssu100_init(void)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 2a982e62963..7a2177c79bd 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -736,6 +736,7 @@ int usb_serial_probe(struct usb_interface *interface,
serial = create_serial(dev, interface, type);
if (!serial) {
+ module_put(type->driver.owner);
dev_err(&interface->dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
@@ -746,11 +747,11 @@ int usb_serial_probe(struct usb_interface *interface,
id = get_iface_id(type, interface);
retval = type->probe(serial, id);
- module_put(type->driver.owner);
if (retval) {
dbg("sub driver rejected device");
kfree(serial);
+ module_put(type->driver.owner);
return retval;
}
}
@@ -822,6 +823,7 @@ int usb_serial_probe(struct usb_interface *interface,
if (num_bulk_in == 0 || num_bulk_out == 0) {
dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
kfree(serial);
+ module_put(type->driver.owner);
return -ENODEV;
}
}
@@ -835,22 +837,15 @@ int usb_serial_probe(struct usb_interface *interface,
dev_err(&interface->dev,
"Generic device with no bulk out, not allowed.\n");
kfree(serial);
+ module_put(type->driver.owner);
return -EIO;
}
}
#endif
if (!num_ports) {
/* if this device type has a calc_num_ports function, call it */
- if (type->calc_num_ports) {
- if (!try_module_get(type->driver.owner)) {
- dev_err(&interface->dev,
- "module get failed, exiting\n");
- kfree(serial);
- return -EIO;
- }
+ if (type->calc_num_ports)
num_ports = type->calc_num_ports(serial);
- module_put(type->driver.owner);
- }
if (!num_ports)
num_ports = type->num_ports;
}
@@ -1039,13 +1034,7 @@ int usb_serial_probe(struct usb_interface *interface,
/* if this device type has an attach function, call it */
if (type->attach) {
- if (!try_module_get(type->driver.owner)) {
- dev_err(&interface->dev,
- "module get failed, exiting\n");
- goto probe_error;
- }
retval = type->attach(serial);
- module_put(type->driver.owner);
if (retval < 0)
goto probe_error;
serial->attached = 1;
@@ -1088,10 +1077,12 @@ int usb_serial_probe(struct usb_interface *interface,
exit:
/* success */
usb_set_intfdata(interface, serial);
+ module_put(type->driver.owner);
return 0;
probe_error:
usb_serial_put(serial);
+ module_put(type->driver.owner);
return -EIO;
}
EXPORT_SYMBOL_GPL(usb_serial_probe);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e05557d5299..c579dcc9200 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return 0;
}
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+{
+ INIT_LIST_HEAD(&work->node);
+ work->fn = fn;
+ init_waitqueue_head(&work->done);
+ work->flushing = 0;
+ work->queue_seq = work->done_seq = 0;
+}
+
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev)
{
- struct vhost_work *work = &poll->work;
-
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
poll->mask = mask;
poll->dev = dev;
- INIT_LIST_HEAD(&work->node);
- work->fn = fn;
- init_waitqueue_head(&work->done);
- work->flushing = 0;
- work->queue_seq = work->done_seq = 0;
+ vhost_work_init(&poll->work, fn);
}
/* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue(poll->wqh, &poll->wait);
}
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- struct vhost_work *work = &poll->work;
unsigned seq;
int left;
int flushing;
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
wait_event(work->done, ({
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
left = seq - work->done_seq <= 0;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
left;
}));
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
flushing = --work->flushing;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
BUG_ON(flushing < 0);
}
-void vhost_poll_queue(struct vhost_poll *poll)
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
+{
+ vhost_work_flush(poll->dev, &poll->work);
+}
+
+static inline void vhost_work_queue(struct vhost_dev *dev,
+ struct vhost_work *work)
{
- struct vhost_dev *dev = poll->dev;
- struct vhost_work *work = &poll->work;
unsigned long flags;
spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
spin_unlock_irqrestore(&dev->work_lock, flags);
}
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+ vhost_work_queue(poll->dev, &poll->work);
+}
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
return dev->mm == current->mm ? 0 : -EPERM;
}
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+ s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+ struct vhost_attach_cgroups_struct attach;
+ attach.owner = current;
+ vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_work_queue(dev, &attach.work);
+ vhost_work_flush(dev, &attach.work);
+ return attach.ret;
+}
+
/* Caller should have device mutex */
static long vhost_dev_set_owner(struct vhost_dev *dev)
{
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
}
dev->worker = worker;
- err = cgroup_attach_task_current_cg(worker);
+ wake_up_process(worker); /* avoid contributing to loadavg */
+
+ err = vhost_attach_cgroups(dev);
if (err)
goto err_cgroup;
- wake_up_process(worker); /* avoid contributing to loadavg */
return 0;
err_cgroup:
kthread_stop(worker);
+ dev->worker = NULL;
err_worker:
if (dev->mm)
mmput(dev->mm);
@@ -323,7 +359,10 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
dev->mm = NULL;
WARN_ON(!list_empty(&dev->work_list));
- kthread_stop(dev->worker);
+ if (dev->worker) {
+ kthread_stop(dev->worker);
+ dev->worker = NULL;
+ }
}
static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index afe21e6eb54..1c2c68356ea 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -80,7 +80,10 @@ static void clcdfb_disable(struct clcd_fb *fb)
/*
* Disable CLCD clock source.
*/
- clk_disable(fb->clk);
+ if (fb->clk_enabled) {
+ fb->clk_enabled = false;
+ clk_disable(fb->clk);
+ }
}
static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
@@ -88,7 +91,10 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
/*
* Enable the CLCD clock source.
*/
- clk_enable(fb->clk);
+ if (!fb->clk_enabled) {
+ fb->clk_enabled = true;
+ clk_enable(fb->clk);
+ }
/*
* Bring up by first enabling..
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index f3a4e15672d..f96a471cb1a 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) {
static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
#if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
/*
- * memcpy_toio works for us if:
+ * iowrite32_rep works for us if:
* (1) Copies data as 32bit quantities, not byte after byte,
* (2) Performs LE ordered stores, and
* (3) It copes with unaligned source (destination is guaranteed to be page
* aligned and length is guaranteed to be multiple of 4).
*/
- memcpy_toio(va.vaddr, src, len);
+ iowrite32_rep(va.vaddr, src, len >> 2);
#else
u_int32_t __iomem* addr = va.vaddr;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 72f91bff29c..13365ba3521 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -112,6 +112,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
#define VALID_EVTCHN(chn) ((chn) != 0)
static struct irq_chip xen_dynamic_chip;
+static struct irq_chip xen_percpu_chip;
/* Constructor for packed IRQ information. */
static struct irq_info mk_unbound_info(void)
@@ -377,7 +378,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = find_unbound_irq();
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "event");
+ handle_edge_irq, "event");
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_evtchn_info(evtchn);
@@ -403,8 +404,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
if (irq < 0)
goto out;
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "ipi");
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ handle_percpu_irq, "ipi");
bind_ipi.vcpu = cpu;
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
@@ -444,8 +445,8 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
irq = find_unbound_irq();
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "virq");
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ handle_percpu_irq, "virq");
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_virq_info(evtchn, virq);
@@ -964,6 +965,16 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
.retrigger = retrigger_dynirq,
};
+static struct irq_chip xen_percpu_chip __read_mostly = {
+ .name = "xen-percpu",
+
+ .disable = disable_dynirq,
+ .mask = disable_dynirq,
+ .unmask = enable_dynirq,
+
+ .ack = ack_dynirq,
+};
+
int xen_set_callback_via(uint64_t via)
{
struct xen_hvm_param a;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 1799bd89031..ef9c7db5207 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -237,7 +237,7 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
goto again;
if (sysrq_key != '\0')
- handle_sysrq(sysrq_key, NULL);
+ handle_sysrq(sysrq_key);
}
static struct xenbus_watch sysrq_watch = {