summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-10 21:56:47 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-10 21:56:47 -0800
commit3ef884b4c04e857c283cc77ca70ad8f638d94b0e (patch)
treec8c5b872e836e6ffe8bd08ab3477f9e8260575ed
parent4e5df8069b0e4e36c6b528b3be7da298e6f454cd (diff)
parent4361e52ad0372e6fd2240a2207b49a4de1f45ca9 (diff)
downloadlinux-3.10-3ef884b4c04e857c283cc77ca70ad8f638d94b0e.tar.gz
linux-3.10-3ef884b4c04e857c283cc77ca70ad8f638d94b0e.tar.bz2
linux-3.10-3ef884b4c04e857c283cc77ca70ad8f638d94b0e.zip
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (189 commits) drm/radeon/kms: fix warning about cur_placement being uninitialised. drm/ttm: Print debug information on memory manager when eviction fails drm: Add memory manager debug function drm/radeon/kms: restore surface registers on resume. drm/radeon/kms/r600/r700: fallback gracefully on ucode failure drm/ttm: Initialize eviction placement in case the driver callback doesn't drm/radeon/kms: cleanup structure and module if initialization fails drm/radeon/kms: actualy set the eviction placements we choose drm/radeon/kms: Fix NULL ptr dereference drm/radeon/kms/avivo: add support for new pll selection algo drm/radeon/kms/avivo: fix some bugs in the display bandwidth setup drm/radeon/kms: fix return value from fence function. drm/radeon: Remove tests for -ERESTART from the TTM code. drm/ttm: Have the TTM code return -ERESTARTSYS instead of -ERESTART. drm/radeon/kms: Convert radeon to new TTM validation API (V2) drm/ttm: Rework validation & memory space allocation (V3) drm: Add search/get functions to get a block in a specific range drm/radeon/kms: fix avivo tiling regression since radeon object rework drm/i915: Remove a debugging printk from hangcheck drm/radeon/kms: make sure i2c id matches ...
-rw-r--r--drivers/char/agp/intel-agp.c103
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_crtc.c176
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/drm_dp_i2c_helper.c (renamed from drivers/gpu/drm/i915/intel_dp_i2c.c)76
-rw-r--r--drivers/gpu/drm/drm_drv.c42
-rw-r--r--drivers/gpu/drm/drm_edid.c328
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c23
-rw-r--r--drivers/gpu/drm/drm_fops.c112
-rw-r--r--drivers/gpu/drm/drm_irq.c130
-rw-r--r--drivers/gpu/drm/drm_mm.c110
-rw-r--r--drivers/gpu/drm/drm_modes.c28
-rw-r--r--drivers/gpu/drm/drm_stub.c15
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c9
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c16
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c37
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c34
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c120
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c40
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h80
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c114
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c163
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c92
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h71
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c86
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c137
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h17
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c50
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1036
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c162
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h44
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c7
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c140
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1416
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c14
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c58
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c33
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c59
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c790
-rw-r--r--drivers/gpu/drm/radeon/r100.c245
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h10
-rw-r--r--drivers/gpu/drm/radeon/r300.c33
-rw-r--r--drivers/gpu/drm/radeon/r420.c25
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c1147
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c34
-rw-r--r--drivers/gpu/drm/radeon/r600d.h212
-rw-r--r--drivers/gpu/drm/radeon/radeon.h165
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h70
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c332
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c688
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c194
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c145
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c276
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c182
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c125
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h149
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c560
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h157
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h60
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c94
-rw-r--r--drivers/gpu/drm/radeon/rs400.c17
-rw-r--r--drivers/gpu/drm/radeon/rs600.c236
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h112
-rw-r--r--drivers/gpu/drm/radeon/rs690.c57
-rw-r--r--drivers/gpu/drm/radeon/rv515.c24
-rw-r--r--drivers/gpu/drm/radeon/rv770.c79
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c545
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c117
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c311
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c452
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--include/drm/drm.h65
-rw-r--r--include/drm/drmP.h87
-rw-r--r--include/drm/drm_crtc.h47
-rw-r--r--include/drm/drm_dp_helper.h (renamed from drivers/gpu/drm/i915/intel_dp.h)74
-rw-r--r--include/drm/drm_edid.h8
-rw-r--r--include/drm/drm_mm.h35
-rw-r--r--include/drm/drm_mode.h80
-rw-r--r--include/drm/drm_os_linux.h2
-rw-r--r--include/drm/i915_drm.h78
-rw-r--r--include/drm/mga_drm.h2
-rw-r--r--include/drm/radeon_drm.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h56
-rw-r--r--include/drm/ttm/ttm_bo_driver.h37
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h107
-rw-r--r--include/drm/ttm/ttm_lock.h247
-rw-r--r--include/drm/ttm/ttm_memory.h1
-rw-r--r--include/drm/ttm/ttm_object.h267
-rw-r--r--include/drm/via_drm.h2
122 files changed, 12028 insertions, 2947 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 3cb56a049e2..30c36ac2cd0 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -36,10 +36,10 @@
#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
-#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010
-#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011
-#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000
-#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
@@ -50,20 +50,20 @@
#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
-#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
-#define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
-#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040
-#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
-#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
-#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062
-#define PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB 0x006a
-#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -83,22 +83,22 @@
#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
+#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB)
extern int agp_memory_reserved;
@@ -178,6 +178,7 @@ static struct _intel_private {
* popup and for the GTT.
*/
int gtt_entries; /* i830+ */
+ int gtt_total_size;
union {
void __iomem *i9xx_flush_page;
void *i8xx_flush_page;
@@ -653,7 +654,7 @@ static void intel_i830_init_gtt_entries(void)
size = 512;
}
size += 4; /* add in BIOS popup space */
- } else if (IS_G33 && !IS_IGD) {
+ } else if (IS_G33 && !IS_PINEVIEW) {
/* G33's GTT size defined in gmch_ctrl */
switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
case G33_PGETBL_SIZE_1M:
@@ -669,7 +670,7 @@ static void intel_i830_init_gtt_entries(void)
size = 512;
}
size += 4;
- } else if (IS_G4X || IS_IGD) {
+ } else if (IS_G4X || IS_PINEVIEW) {
/* On 4 series hardware, GTT stolen is separate from graphics
* stolen, ignore it in stolen gtt entries counting. However,
* 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -1153,7 +1154,7 @@ static int intel_i915_configure(void)
readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+ for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
writel(agp_bridge->scratch_page, intel_private.gtt+i);
}
readl(intel_private.gtt+i-1); /* PCI Posting. */
@@ -1308,6 +1309,8 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
if (!intel_private.gtt)
return -ENOMEM;
+ intel_private.gtt_total_size = gtt_map_size / 4;
+
temp &= 0xfff80000;
intel_private.registers = ioremap(temp, 128 * 4096);
@@ -1352,15 +1355,15 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
{
switch (agp_bridge->dev->device) {
case PCI_DEVICE_ID_INTEL_GM45_HB:
- case PCI_DEVICE_ID_INTEL_IGD_E_HB:
+ case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
case PCI_DEVICE_ID_INTEL_Q45_HB:
case PCI_DEVICE_ID_INTEL_G45_HB:
case PCI_DEVICE_ID_INTEL_G41_HB:
case PCI_DEVICE_ID_INTEL_B43_HB:
- case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
- case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
- case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
- case PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
*gtt_offset = *gtt_size = MB(2);
break;
default:
@@ -1395,6 +1398,8 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
if (!intel_private.gtt)
return -ENOMEM;
+ intel_private.gtt_total_size = gtt_size / 4;
+
intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers) {
iounmap(intel_private.gtt);
@@ -2340,14 +2345,14 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview",
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
- "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
- "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
+ "GM45", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0,
+ "Eaglelake", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
"Q45/Q43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
@@ -2356,14 +2361,14 @@ static const struct intel_driver_description {
"B43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
"G41", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
- "IGDNG/D", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
- "IGDNG/M", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
- "IGDNG/MA", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
- "IGDNG/MC2", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
+ "Ironlake/D", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ "Ironlake/M", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ "Ironlake/MA", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ "Ironlake/MC2", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL }
};
@@ -2545,8 +2550,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82945G_HB),
ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
+ ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB),
ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
ID(PCI_DEVICE_ID_INTEL_82G35_HB),
ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
@@ -2557,15 +2562,15 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_Q35_HB),
ID(PCI_DEVICE_ID_INTEL_Q33_HB),
ID(PCI_DEVICE_ID_INTEL_GM45_HB),
- ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
+ ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB),
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
ID(PCI_DEVICE_ID_INTEL_G45_HB),
ID(PCI_DEVICE_ID_INTEL_G41_HB),
ID(PCI_DEVICE_ID_INTEL_B43_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
{ }
};
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 3c8827a7aab..91567ac806f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3f7c500b211..5124401f266 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+ { DRM_MODE_DIRTY_OFF, "Off" },
+ { DRM_MODE_DIRTY_ON, "On" },
+ { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+ drm_dirty_info_enum_list)
+
struct drm_conn_prop_enum_list {
int type;
char *name;
@@ -247,7 +256,8 @@ static void drm_mode_object_put(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
}
-void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
@@ -802,6 +812,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_dithering_property);
/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+ struct drm_property *dirty_info;
+ int i;
+
+ if (dev->mode_config.dirty_info_property)
+ return 0;
+
+ dirty_info =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM |
+ DRM_MODE_PROP_IMMUTABLE,
+ "dirty",
+ ARRAY_SIZE(drm_dirty_info_enum_list));
+ for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
+ drm_property_add_enum(dirty_info, i,
+ drm_dirty_info_enum_list[i].type,
+ drm_dirty_info_enum_list[i].name);
+ dev->mode_config.dirty_info_property = dirty_info;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
+/**
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
@@ -1753,6 +1793,71 @@ out:
return ret;
}
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_clip_rect __user *clips_ptr;
+ struct drm_clip_rect *clips = NULL;
+ struct drm_mode_fb_dirty_cmd *r = data;
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned flags;
+ int num_clips;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_ERROR("invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out_err1;
+ }
+ fb = obj_to_fb(obj);
+
+ num_clips = r->num_clips;
+ clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+ if (!num_clips != !clips_ptr) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+ /* If userspace annotates copy, clips must come in pairs */
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ if (num_clips && clips_ptr) {
+ clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ if (!clips) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = copy_from_user(clips, clips_ptr,
+ num_clips * sizeof(*clips));
+ if (ret)
+ goto out_err2;
+ }
+
+ if (fb->funcs->dirty) {
+ ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+ } else {
+ ret = -ENOSYS;
+ goto out_err2;
+ }
+
+out_err2:
+ kfree(clips);
+out_err1:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+
/**
* drm_fb_release - remove and free the FBs on this file
* @filp: file * from the ioctl
@@ -2478,3 +2583,72 @@ out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_page_flip *page_flip = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_pending_vblank_event *e = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+ page_flip->reserved != 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj)
+ goto out;
+ crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->page_flip == NULL)
+ goto out;
+
+ obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj)
+ goto out;
+ fb = obj_to_fb(obj);
+
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ ret = -ENOMEM;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (file_priv->event_space < sizeof e->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+ file_priv->event_space -= sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = page_flip->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy =
+ (void (*) (struct drm_pending_event *)) kfree;
+ }
+
+ ret = crtc->funcs->page_flip(crtc, fb, e);
+ if (ret) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+ }
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index bbfd110a716..4231d6db72e 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
count = (*connector_funcs->get_modes)(connector);
if (!count) {
- count = drm_add_modes_noedid(connector, 800, 600);
+ count = drm_add_modes_noedid(connector, 1024, 768);
if (!count)
return 0;
}
@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
{
int count = 0;
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_parse_command_line(dev);
count = drm_helper_probe_connector_modes(dev,
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/drm_dp_i2c_helper.c
index a63b6f57d2d..548887c8506 100644
--- a/drivers/gpu/drm/i915/intel_dp_i2c.c
+++ b/drivers/gpu/drm/drm_dp_i2c_helper.c
@@ -28,84 +28,20 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
#include "drmP.h"
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
-
-#define MODE_I2C_START 1
-#define MODE_I2C_WRITE 2
-#define MODE_I2C_READ 4
-#define MODE_I2C_STOP 8
-
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- uint16_t address = algo_data->address;
- uint8_t msg[5];
- uint8_t reply[2];
- int msg_bytes;
- int reply_bytes;
int ret;
-
- /* Set up the command byte */
- if (mode & MODE_I2C_READ)
- msg[0] = AUX_I2C_READ << 4;
- else
- msg[0] = AUX_I2C_WRITE << 4;
-
- if (!(mode & MODE_I2C_STOP))
- msg[0] |= AUX_I2C_MOT << 4;
-
- msg[1] = address >> 8;
- msg[2] = address;
-
- switch (mode) {
- case MODE_I2C_WRITE:
- msg[3] = 0;
- msg[4] = write_byte;
- msg_bytes = 5;
- reply_bytes = 1;
- break;
- case MODE_I2C_READ:
- msg[3] = 0;
- msg_bytes = 4;
- reply_bytes = 2;
- break;
- default:
- msg_bytes = 3;
- reply_bytes = 1;
- break;
- }
-
- for (;;) {
- ret = (*algo_data->aux_ch)(adapter,
- msg, msg_bytes,
- reply, reply_bytes);
- if (ret < 0) {
- DRM_DEBUG("aux_ch failed %d\n", ret);
- return ret;
- }
- switch (reply[0] & AUX_I2C_REPLY_MASK) {
- case AUX_I2C_REPLY_ACK:
- if (mode == MODE_I2C_READ) {
- *read_byte = reply[1];
- }
- return reply_bytes - 1;
- case AUX_I2C_REPLY_NACK:
- DRM_DEBUG("aux_ch nack\n");
- return -EREMOTEIO;
- case AUX_I2C_REPLY_DEFER:
- DRM_DEBUG("aux_ch defer\n");
- udelay(100);
- break;
- default:
- DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
- return -EREMOTEIO;
- }
- }
+
+ ret = (*algo_data->aux_ch)(adapter, mode,
+ write_byte, read_byte);
+ return ret;
}
/*
@@ -224,7 +160,7 @@ i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
if (ret >= 0)
ret = num;
i2c_algo_dp_aux_stop(adapter, reading);
- DRM_DEBUG("dp_aux_xfer return %d\n", ret);
+ DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a75ca63deea..ff2f1042cb4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -145,6 +145,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -366,6 +368,29 @@ module_init(drm_core_init);
module_exit(drm_core_exit);
/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+{
+ int len;
+
+ /* don't overflow userbuf */
+ len = strlen(value);
+ if (len > *buf_len)
+ len = *buf_len;
+
+ /* let userspace know exact length of driver value (which could be
+ * larger than the userspace-supplied buffer) */
+ *buf_len = strlen(value);
+
+ /* finally, try filling in the userbuf */
+ if (len && buf)
+ if (copy_to_user(buf, value, len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
* Get version information
*
* \param inode device inode.
@@ -380,16 +405,21 @@ static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_version *version = data;
- int len;
+ int err;
version->version_major = dev->driver->major;
version->version_minor = dev->driver->minor;
version->version_patchlevel = dev->driver->patchlevel;
- DRM_COPY(version->name, dev->driver->name);
- DRM_COPY(version->date, dev->driver->date);
- DRM_COPY(version->desc, dev->driver->desc);
-
- return 0;
+ err = drm_copy_field(version->name, &version->name_len,
+ dev->driver->name);
+ if (!err)
+ err = drm_copy_field(version->date, &version->date_len,
+ dev->driver->date);
+ if (!err)
+ err = drm_copy_field(version->desc, &version->desc_len,
+ dev->driver->desc);
+
+ return err;
}
/**
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b54ba63d506..c39b26f1abe 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -123,18 +123,20 @@ static const u8 edid_header[] = {
*/
static bool edid_is_valid(struct edid *edid)
{
- int i;
+ int i, score = 0;
u8 csum = 0;
u8 *raw_edid = (u8 *)edid;
- if (memcmp(edid->header, edid_header, sizeof(edid_header)))
- goto bad;
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ if (score == 8) ;
+ else if (score >= 6) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else
goto bad;
- }
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
@@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid)
goto bad;
}
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+
return 1;
bad:
@@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = {
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
+static const int drm_num_dmt_modes =
+ sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
- int i, count;
+ int i;
struct drm_display_mode *ptr, *mode;
- count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
mode = NULL;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < drm_num_dmt_modes; i++) {
ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
@@ -834,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
return modes;
}
+/*
+ * XXX fix this for:
+ * - GTF secondary curve formula
+ * - EDID 1.4 range offsets
+ * - CVT extended bits
+ */
+static bool
+mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+{
+ struct detailed_data_monitor_range *range;
+ int hsync, vrefresh;
+
+ range = &timing->data.other_data.data.range;
+
+ hsync = drm_mode_hsync(mode);
+ vrefresh = drm_mode_vrefresh(mode);
+
+ if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+ return false;
+
+ if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+ return false;
+
+ if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
+ /* be forgiving since it's in units of 10MHz */
+ int max_clock = range->pixel_clock_mhz * 10 + 9;
+ max_clock *= 1000;
+ if (mode->clock > max_clock)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
+ * need to account for them.
+ */
+static int drm_gtf_modes_for_range(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < drm_num_dmt_modes; i++) {
+ if (mode_in_range(drm_dmt_modes + i, timing)) {
+ newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, j, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ struct cvt_timing *cvt;
+ const int rates[] = { 60, 85, 75, 60, 50 };
+
+ for (i = 0; i < 4; i++) {
+ int width, height;
+ cvt = &(timing->data.other_data.data.cvt[i]);
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
+ switch (cvt->code[1] & 0xc0) {
+ case 0x00:
+ width = height * 4 / 3;
+ break;
+ case 0x40:
+ width = height * 16 / 9;
+ break;
+ case 0x80:
+ width = height * 16 / 10;
+ break;
+ case 0xc0:
+ width = height * 15 / 9;
+ break;
+ }
+
+ for (j = 1; j < 5; j++) {
+ if (cvt->code[2] & (1 << j)) {
+ newmode = drm_cvt_mode(dev, width, height,
+ rates[j], j == 0,
+ false, false);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int add_detailed_modes(struct drm_connector *connector,
+ struct detailed_timing *timing,
+ struct edid *edid, u32 quirks, int preferred)
+{
+ int i, modes = 0;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ int timing_level = standard_timing_level(edid);
+ int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(dev, edid, timing, quirks);
+ if (!newmode)
+ return 0;
+
+ if (preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, newmode);
+ return 1;
+ }
+
+ /* other timing types */
+ switch (data->type) {
+ case EDID_DETAIL_MONITOR_RANGE:
+ if (gtf)
+ modes += drm_gtf_modes_for_range(connector, timing);
+ break;
+ case EDID_DETAIL_STD_MODES:
+ /* Six modes per detailed section */
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[i];
+ newmode = drm_mode_std(dev, std, edid->revision,
+ timing_level);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ break;
+ case EDID_DETAIL_CVT_3BYTE:
+ modes += drm_cvt_modes(connector, timing);
+ break;
+ default:
+ break;
+ }
+
+ return modes;
+}
+
/**
- * add_detailed_modes - get detailed mode info from EDID data
+ * add_detailed_info - get detailed mode info from EDID data
* @connector: attached connector
* @edid: EDID block to scan
* @quirks: quirks to apply
@@ -846,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
static int add_detailed_info(struct drm_connector *connector,
struct edid *edid, u32 quirks)
{
- struct drm_device *dev = connector->dev;
- int i, j, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
+ int i, modes = 0;
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
- struct detailed_non_pixel *data = &timing->data.other_data;
- struct drm_display_mode *newmode;
-
- /* X server check is version 1.1 or higher */
- if (edid->version == 1 && edid->revision >= 1 &&
- !timing->pixel_clock) {
- /* Other timing or info */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_SERIAL:
- break;
- case EDID_DETAIL_MONITOR_STRING:
- break;
- case EDID_DETAIL_MONITOR_RANGE:
- /* Get monitor range data */
- break;
- case EDID_DETAIL_MONITOR_NAME:
- break;
- case EDID_DETAIL_MONITOR_CPDATA:
- break;
- case EDID_DETAIL_STD_MODES:
- for (j = 0; j < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[j];
- newmode = drm_mode_std(dev, std,
- edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- default:
- break;
- }
- } else {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- continue;
+ int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
- /* First detailed mode is preferred */
- if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, newmode);
+ /* In 1.0, only timings are allowed */
+ if (!timing->pixel_clock && edid->version == 1 &&
+ edid->revision == 0)
+ continue;
- modes++;
- }
+ modes += add_detailed_modes(connector, timing, edid, quirks,
+ preferred);
}
return modes;
}
+
/**
* add_detailed_mode_eedid - get detailed mode info from addtional timing
* EDID block
@@ -920,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector,
static int add_detailed_info_eedid(struct drm_connector *connector,
struct edid *edid, u32 quirks)
{
- struct drm_device *dev = connector->dev;
- int i, j, modes = 0;
+ int i, modes = 0;
char *edid_ext = NULL;
struct detailed_timing *timing;
- struct detailed_non_pixel *data;
- struct drm_display_mode *newmode;
int edid_ext_num;
int start_offset, end_offset;
int timing_level;
@@ -976,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
for (i = start_offset; i < end_offset;
i += sizeof(struct detailed_timing)) {
timing = (struct detailed_timing *)(edid_ext + i);
- data = &timing->data.other_data;
- /* Detailed mode timing */
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- continue;
-
- drm_mode_probed_add(connector, newmode);
-
- modes++;
- continue;
- }
-
- /* Other timing or info */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_SERIAL:
- break;
- case EDID_DETAIL_MONITOR_STRING:
- break;
- case EDID_DETAIL_MONITOR_RANGE:
- /* Get monitor range data */
- break;
- case EDID_DETAIL_MONITOR_NAME:
- break;
- case EDID_DETAIL_MONITOR_CPDATA:
- break;
- case EDID_DETAIL_STD_MODES:
- /* Five modes per detailed section */
- for (j = 0; j < 5; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[j];
- newmode = drm_mode_std(dev, std,
- edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- default:
- break;
- }
+ modes += add_detailed_modes(connector, timing, edid, quirks, 0);
}
return modes;
@@ -1066,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
struct i2c_adapter *adapter,
char *buf, int len)
{
- int ret;
+ int i;
- ret = drm_do_probe_ddc_edid(adapter, buf, len);
- if (ret != 0) {
- goto end;
- }
- if (!edid_is_valid((struct edid *)buf)) {
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- ret = -1;
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, buf, len))
+ return -1;
+ if (edid_is_valid((struct edid *)buf))
+ return 0;
}
-end:
- return ret;
+
+ /* repeated checksum failures; warn, but carry on */
+ dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+ drm_get_connector_name(connector));
+ return -1;
}
/**
@@ -1296,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector,
ptr->vdisplay > vdisplay)
continue;
}
+ if (drm_mode_vrefresh(ptr) > 61)
+ continue;
mode = drm_mode_duplicate(dev, ptr);
if (mode) {
drm_mode_probed_add(connector, mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 65ef011fa8b..1b49fa055f4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
mutex_unlock(&dev->mode_config.mutex);
}
}
- if (dpms_mode == DRM_MODE_DPMS_OFF) {
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, dpms_mode);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ mutex_lock(&dev->mode_config.mutex);
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ mutex_unlock(&dev->mode_config.mutex);
}
}
}
@@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
+ /* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
drm_fb_helper_on(info);
break;
+ /* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
- drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+ drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
break;
+ /* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
break;
+ /* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
break;
+ /* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN:
drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
break;
@@ -905,8 +908,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
if (new_fb) {
info->var.pixclock = 0;
- if (register_framebuffer(info) < 0)
+ ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
+ if (ret)
+ return ret;
+ if (register_framebuffer(info) < 0) {
+ fb_dealloc_cmap(&info->cmap);
return -EINVAL;
+ }
} else {
drm_fb_helper_set_par(info);
}
@@ -936,6 +944,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
drm_fb_helper_crtc_free(helper);
+ fb_dealloc_cmap(&helper->fb->fbdev->cmap);
}
EXPORT_SYMBOL(drm_fb_helper_free);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 251bc0e3b5e..08d14df3bb4 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
+ INIT_LIST_HEAD(&priv->event_list);
+ init_waitqueue_head(&priv->event_wait);
+ priv->event_space = 4096; /* set aside 4k for event buffer */
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
@@ -297,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
}
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, priv, true);
+ if (ret) {
+ /* drop both references if this fails */
+ drm_master_put(&priv->minor->master);
+ drm_master_put(&priv->master);
+ mutex_unlock(&dev->struct_mutex);
+ goto out_free;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
} else {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
@@ -413,6 +428,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
}
}
+static void drm_events_release(struct drm_file *file_priv)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e, *et;
+ struct drm_pending_vblank_event *v, *vt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* Remove pending flips */
+ list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
+ if (v->base.file_priv == file_priv) {
+ list_del(&v->base.link);
+ drm_vblank_put(dev, v->pipe);
+ v->base.destroy(&v->base);
+ }
+
+ /* Remove unconsumed events */
+ list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+ e->destroy(e);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
/**
* Release file.
*
@@ -451,6 +490,8 @@ int drm_release(struct inode *inode, struct file *filp)
if (file_priv->minor->master)
drm_master_release(dev, filp);
+ drm_events_release(file_priv);
+
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
@@ -504,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp)
if (file_priv->minor->master == file_priv->master) {
/* drop the reference held my the minor */
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, true);
drm_master_put(&file_priv->minor->master);
}
}
@@ -544,9 +587,74 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);
-/** No-op. */
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+ size_t total, size_t max, struct drm_pending_event **out)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e;
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ *out = NULL;
+ if (list_empty(&file_priv->event_list))
+ goto out;
+ e = list_first_entry(&file_priv->event_list,
+ struct drm_pending_event, link);
+ if (e->event->length + total > max)
+ goto out;
+
+ file_priv->event_space += e->event->length;
+ list_del(&e->link);
+ *out = e;
+ ret = true;
+
+out:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return ret;
+}
+
+ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_pending_event *e;
+ size_t total;
+ ssize_t ret;
+
+ ret = wait_event_interruptible(file_priv->event_wait,
+ !list_empty(&file_priv->event_list));
+ if (ret < 0)
+ return ret;
+
+ total = 0;
+ while (drm_dequeue_event(file_priv, total, count, &e)) {
+ if (copy_to_user(buffer + total,
+ e->event, e->event->length)) {
+ total = -EFAULT;
+ break;
+ }
+
+ total += e->event->length;
+ e->destroy(e);
+ }
+
+ return total;
+}
+EXPORT_SYMBOL(drm_read);
+
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
{
- return 0;
+ struct drm_file *file_priv = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &file_priv->event_wait, wait);
+
+ if (!list_empty(&file_priv->event_list))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
}
EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0a6f0b3bdc7..7998ee66b31 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -429,15 +429,21 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
- !dev->vblank_enabled[crtc]) {
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
- if (ret)
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ if (!dev->vblank_enabled[crtc]) {
+ ret = dev->driver->enable_vblank(dev, crtc);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ if (ret)
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ else {
+ dev->vblank_enabled[crtc] = 1;
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+ } else {
+ if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
- else {
- dev->vblank_enabled[crtc] = 1;
- drm_update_vblank_count(dev, crtc);
+ ret = -EINVAL;
}
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -464,6 +470,18 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_put);
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ dev->vblank_enabled[crtc] = 0;
+ dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_off);
+
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
@@ -550,6 +568,63 @@ out:
return ret;
}
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+ union drm_wait_vblank *vblwait,
+ struct drm_file *file_priv)
+{
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL)
+ return -ENOMEM;
+
+ e->pipe = pipe;
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = vblwait->request.signal;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+ do_gettimeofday(&now);
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (file_priv->event_space < sizeof e->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+ return -ENOMEM;
+ }
+
+ file_priv->event_space -= sizeof e->event;
+ seq = drm_vblank_count(dev, pipe);
+ if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
+ vblwait->request.sequence = seq + 1;
+ vblwait->reply.sequence = vblwait->request.sequence;
+ }
+
+ DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+ vblwait->request.sequence, seq, pipe);
+
+ e->event.sequence = vblwait->request.sequence;
+ if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, e->pipe);
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ } else {
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return 0;
+}
+
/**
* Wait for VBLANK.
*
@@ -609,6 +684,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
goto done;
}
+ if (flags & _DRM_VBLANK_EVENT)
+ return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1<<23)) {
vblwait->request.sequence = seq + 1;
@@ -641,6 +719,38 @@ done:
return ret;
}
+void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+
+ do_gettimeofday(&now);
+ seq = drm_vblank_count(dev, crtc);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ if ((seq - e->event.sequence) > (1<<23))
+ continue;
+
+ DRM_DEBUG("vblank event on %d, current %d\n",
+ e->event.sequence, seq);
+
+ e->event.sequence = seq;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, e->pipe);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
/**
* drm_handle_vblank - handle a vblank event
* @dev: DRM device
@@ -651,7 +761,11 @@ done:
*/
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
+ if (!dev->num_crtcs)
+ return;
+
atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ drm_handle_vblank_events(dev, crtc);
}
EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 97dc5a4f0de..d7d7eac3ddd 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
}
EXPORT_SYMBOL(drm_mm_get_block_generic);
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
+{
+ struct drm_mm_node *align_splitoff = NULL;
+ unsigned tmp = 0;
+ unsigned wasted = 0;
+
+ if (node->start < start)
+ wasted += start - node->start;
+ if (alignment)
+ tmp = ((node->start + wasted) % alignment);
+
+ if (tmp)
+ wasted += alignment - tmp;
+ if (wasted) {
+ align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
+ if (unlikely(align_splitoff == NULL))
+ return NULL;
+ }
+
+ if (node->size == size) {
+ list_del_init(&node->fl_entry);
+ node->free = 0;
+ } else {
+ node = drm_mm_split_at_start(node, size, atomic);
+ }
+
+ if (align_splitoff)
+ drm_mm_put_block(align_splitoff);
+
+ return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_range_generic);
+
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
}
EXPORT_SYMBOL(drm_mm_search_free);
+struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int best_match)
+{
+ struct list_head *list;
+ const struct list_head *free_stack = &mm->fl_entry;
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long best_size;
+ unsigned wasted;
+
+ best = NULL;
+ best_size = ~0UL;
+
+ list_for_each(list, free_stack) {
+ entry = list_entry(list, struct drm_mm_node, fl_entry);
+ wasted = 0;
+
+ if (entry->size < size)
+ continue;
+
+ if (entry->start > end || (entry->start+entry->size) < start)
+ continue;
+
+ if (entry->start < start)
+ wasted += start - entry->start;
+
+ if (alignment) {
+ register unsigned tmp = (entry->start + wasted) % alignment;
+ if (tmp)
+ wasted += alignment - tmp;
+ }
+
+ if (entry->size >= size + wasted) {
+ if (!best_match)
+ return entry;
+ if (size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+ }
+
+ return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_in_range);
+
int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->ml_entry;
@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm)
}
EXPORT_SYMBOL(drm_mm_takedown);
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+ struct drm_mm_node *entry;
+ int total_used = 0, total_free = 0, total = 0;
+
+ list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+ prefix, entry->start, entry->start + entry->size,
+ entry->size, entry->free ? "free" : "used");
+ total += entry->size;
+ if (entry->free)
+ total_free += entry->size;
+ else
+ total_used += entry->size;
+ }
+ printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+ total_used, total_free);
+}
+EXPORT_SYMBOL(drm_mm_debug_table);
+
#if defined(CONFIG_DEBUG_FS)
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
@@ -395,7 +503,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
else
total_used += entry->size;
}
- seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used);
+ seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 51f677215f1..6d81a02463a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_mode_height);
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(struct drm_display_mode *mode)
+{
+ unsigned int calc_val;
+
+ if (mode->hsync)
+ return mode->hsync;
+
+ if (mode->htotal < 0)
+ return 0;
+
+ calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+ calc_val += 500; /* round to 1000Hz */
+ calc_val /= 1000; /* truncate to kHz */
+
+ return calc_val;
+}
+EXPORT_SYMBOL(drm_mode_hsync);
+
/**
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
@@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height);
* LOCKING:
* None.
*
- * Return @mode's vrefresh rate or calculate it if necessary.
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
*
* FIXME: why is this needed? shouldn't vrefresh be set already?
*
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 55bb8a82d61..ad73e141afd 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master)
kref_get(&master->refcount);
return master;
}
+EXPORT_SYMBOL(drm_master_get);
static void drm_master_destroy(struct kref *kref)
{
@@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master)
kref_put(&(*master)->refcount, drm_master_destroy);
*master = NULL;
}
+EXPORT_SYMBOL(drm_master_put);
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ int ret = 0;
+
if (file_priv->is_master)
return 0;
@@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
+ }
+ }
mutex_unlock(&dev->struct_mutex);
}
@@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master);
file_priv->is_master = 0;
mutex_unlock(&dev->struct_mutex);
@@ -220,9 +233,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
+ INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
+ spin_lock_init(&dev->event_lock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fa7b9be096b..9929f84ec3e 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_lvds.o \
intel_bios.o \
intel_dp.o \
- intel_dp_i2c.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
@@ -23,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_fb.o \
intel_tv.o \
intel_dvo.o \
+ intel_overlay.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 621815b531d..1184c14ba87 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -249,7 +249,8 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
if (val != CH7017_DEVICE_ID_VALUE &&
val != CH7018_DEVICE_ID_VALUE &&
val != CH7019_DEVICE_ID_VALUE) {
- DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+ "Slave %d.\n",
val, i2cbus->adapter.name,dvo->slave_addr);
goto fail;
}
@@ -284,7 +285,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
uint8_t horizontal_active_pixel_output, vertical_active_line_output;
uint8_t active_input_line_output;
- DRM_DEBUG("Registers before mode setting\n");
+ DRM_DEBUG_KMS("Registers before mode setting\n");
ch7017_dump_regs(dvo);
/* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
@@ -346,7 +347,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
/* Turn the LVDS back on with new settings. */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
- DRM_DEBUG("Registers after mode setting\n");
+ DRM_DEBUG_KMS("Registers after mode setting\n");
ch7017_dump_regs(dvo);
}
@@ -386,7 +387,7 @@ static void ch7017_dump_regs(struct intel_dvo_device *dvo)
#define DUMP(reg) \
do { \
ch7017_read(dvo, reg, &val); \
- DRM_DEBUG(#reg ": %02x\n", val); \
+ DRM_DEBUG_KMS(#reg ": %02x\n", val); \
} while (0)
DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index a9b89628968..d56ff5cc22b 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -152,7 +152,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
};
if (!ch7xxx->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -179,7 +179,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
return true;
if (!ch7xxx->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -207,7 +207,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
name = ch7xxx_get_id(vendor);
if (!name) {
- DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+ "slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
@@ -217,13 +218,14 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
goto out;
if (device != CH7xxx_DID) {
- DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+ "slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
ch7xxx->quiet = false;
- DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+ DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
name, vendor, device);
return true;
out:
@@ -315,8 +317,8 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
if ((i % 8) == 0 )
- DRM_DEBUG("\n %02X: ", i);
- DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]);
+ DRM_LOG_KMS("\n %02X: ", i);
+ DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
}
}
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index aa176f9921f..24169e528f0 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -202,7 +202,8 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
};
if (!priv->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+ "%s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -230,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
return true;
if (!priv->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -261,7 +262,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
* the address it's responding on.
*/
if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
- DRM_DEBUG("ivch detect failed due to address mismatch "
+ DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
"(%d vs %d)\n",
(temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
goto out;
@@ -367,41 +368,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
uint16_t val;
ivch_read(dvo, VR00, &val);
- DRM_DEBUG("VR00: 0x%04x\n", val);
+ DRM_LOG_KMS("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val);
- DRM_DEBUG("VR01: 0x%04x\n", val);
+ DRM_LOG_KMS("VR01: 0x%04x\n", val);
ivch_read(dvo, VR30, &val);
- DRM_DEBUG("VR30: 0x%04x\n", val);
+ DRM_LOG_KMS("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val);
- DRM_DEBUG("VR40: 0x%04x\n", val);
+ DRM_LOG_KMS("VR40: 0x%04x\n", val);
/* GPIO registers */
ivch_read(dvo, VR80, &val);
- DRM_DEBUG("VR80: 0x%04x\n", val);
+ DRM_LOG_KMS("VR80: 0x%04x\n", val);
ivch_read(dvo, VR81, &val);
- DRM_DEBUG("VR81: 0x%04x\n", val);
+ DRM_LOG_KMS("VR81: 0x%04x\n", val);
ivch_read(dvo, VR82, &val);
- DRM_DEBUG("VR82: 0x%04x\n", val);
+ DRM_LOG_KMS("VR82: 0x%04x\n", val);
ivch_read(dvo, VR83, &val);
- DRM_DEBUG("VR83: 0x%04x\n", val);
+ DRM_LOG_KMS("VR83: 0x%04x\n", val);
ivch_read(dvo, VR84, &val);
- DRM_DEBUG("VR84: 0x%04x\n", val);
+ DRM_LOG_KMS("VR84: 0x%04x\n", val);
ivch_read(dvo, VR85, &val);
- DRM_DEBUG("VR85: 0x%04x\n", val);
+ DRM_LOG_KMS("VR85: 0x%04x\n", val);
ivch_read(dvo, VR86, &val);
- DRM_DEBUG("VR86: 0x%04x\n", val);
+ DRM_LOG_KMS("VR86: 0x%04x\n", val);
ivch_read(dvo, VR87, &val);
- DRM_DEBUG("VR87: 0x%04x\n", val);
+ DRM_LOG_KMS("VR87: 0x%04x\n", val);
ivch_read(dvo, VR88, &val);
- DRM_DEBUG("VR88: 0x%04x\n", val);
+ DRM_LOG_KMS("VR88: 0x%04x\n", val);
/* Scratch register 0 - AIM Panel type */
ivch_read(dvo, VR8E, &val);
- DRM_DEBUG("VR8E: 0x%04x\n", val);
+ DRM_LOG_KMS("VR8E: 0x%04x\n", val);
/* Scratch register 1 - Status register */
ivch_read(dvo, VR8F, &val);
- DRM_DEBUG("VR8F: 0x%04x\n", val);
+ DRM_LOG_KMS("VR8F: 0x%04x\n", val);
}
static void ivch_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index e1c1f7341e5..0001c13f0a8 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -105,7 +105,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
};
if (!sil->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -131,7 +131,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
return true;
if (!sil->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -158,7 +158,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (SIL164_VID & 0xff)) {
- DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
@@ -167,13 +167,13 @@ static bool sil164_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (SIL164_DID & 0xff)) {
- DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
sil->quiet = false;
- DRM_DEBUG("init sil164 dvo controller successfully!\n");
+ DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
return true;
out:
@@ -241,15 +241,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
uint8_t val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
- DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
sil164_readb(dvo, SIL164_FREQ_HI, &val);
- DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG8, &val);
- DRM_DEBUG("SIL164_REG8: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG9, &val);
- DRM_DEBUG("SIL164_REG9: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REGC, &val);
- DRM_DEBUG("SIL164_REGC: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
}
static void sil164_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 9ecc907384e..c7c391bc116 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -130,7 +130,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
};
if (!tfp->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -156,7 +156,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
return true;
if (!tfp->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -191,13 +191,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
tfp->quiet = true;
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
- DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
+ "Slave %d.\n",
id, adapter->name, dvo->slave_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
- DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
+ "Slave %d.\n",
id, adapter->name, dvo->slave_addr);
goto out;
}
@@ -262,33 +264,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
uint8_t val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
- DRM_DEBUG("TFP410_REV: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_1, &val);
- DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_2, &val);
- DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_3, &val);
- DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_USERCFG, &val);
- DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_DLY, &val);
- DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CTL, &val);
- DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_TOP, &val);
- DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
- DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
- DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_H_RES_LO, &val);
tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
- DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_V_RES_LO, &val);
tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
- DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
static void tfp410_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 26bf0552b3c..18476bf0b58 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,7 @@
*/
#include <linux/seq_file.h>
+#include <linux/debugfs.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -96,13 +97,14 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_gem_object *obj = obj_priv->obj;
- seq_printf(m, " %p: %s %8zd %08x %08x %d %s",
+ seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
obj,
get_pin_flag(obj_priv),
obj->size,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno,
- obj_priv->dirty ? "dirty" : "");
+ obj_priv->dirty ? " dirty" : "",
+ obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->name)
seq_printf(m, " (name: %d)", obj->name);
@@ -160,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -412,6 +414,109 @@ static int i915_registers_info(struct seq_file *m, void *data) {
return 0;
}
+static int
+i915_wedged_open(struct inode *inode,
+ struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+i915_wedged_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof (buf),
+ "wedged : %d\n",
+ atomic_read(&dev_priv->mm.wedged));
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_wedged_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof (buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_INFO("Manually setting wedged to %d\n", val);
+
+ atomic_set(&dev_priv->mm.wedged, val);
+ if (val) {
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ queue_work(dev_priv->wq, &dev_priv->error_work);
+ }
+
+ return cnt;
+}
+
+static const struct file_operations i915_wedged_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_wedged_open,
+ .read = i915_wedged_read,
+ .write = i915_wedged_write,
+};
+
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+ struct dentry *ent,
+ const void *key)
+{
+ struct drm_info_node *node;
+
+ node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
+ list_add(&node->list, &minor->debugfs_nodes.list);
+
+ return 0;
+}
+
+static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_wedged",
+ S_IRUGO | S_IWUSR,
+ root, dev,
+ &i915_wedged_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
+}
static struct drm_info_list i915_debugfs_list[] = {
{"i915_regs", i915_registers_info, 0},
@@ -432,6 +537,12 @@ static struct drm_info_list i915_debugfs_list[] = {
int i915_debugfs_init(struct drm_minor *minor)
{
+ int ret;
+
+ ret = i915_wedged_create(minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
@@ -441,7 +552,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
-
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e5b138be45f..701bfeac7f5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -807,6 +807,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = dev_priv->overlay ? 1 : 0;
+ break;
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -962,7 +968,7 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
* Some of the preallocated space is taken by the GTT
* and popup. GTT is 1K per MB of aperture size, and popup is 4K.
*/
- if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
+ if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
overhead = 4096;
else
overhead = (*aperture_size / 1024) + 4096;
@@ -1048,7 +1054,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
int gtt_offset, gtt_size;
if (IS_I965G(dev)) {
- if (IS_G4X(dev) || IS_IGDNG(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
gtt_offset = 2*1024*1024;
gtt_size = 2*1024*1024;
} else {
@@ -1070,7 +1076,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
- DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+ DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
/* Mask out these reserved bits on this hardware. */
if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
@@ -1096,7 +1102,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
phys =(entry & PTE_ADDRESS_MASK) |
((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
- DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
+ DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
return phys;
}
@@ -1306,7 +1312,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
u32 tmp;
- if (!IS_IGD(dev))
+ if (!IS_PINEVIEW(dev))
return;
tmp = I915_READ(CLKCFG);
@@ -1413,7 +1419,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_iomapfree;
- dev_priv->wq = create_workqueue("i915");
+ dev_priv->wq = create_singlethread_workqueue("i915");
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
@@ -1434,7 +1440,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_IGDNG(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
@@ -1489,9 +1495,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
/* Must be done after probing outputs */
- /* FIXME: verify on IGDNG */
- if (!IS_IGDNG(dev))
- intel_opregion_init(dev, 0);
+ intel_opregion_init(dev, 0);
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
@@ -1525,6 +1529,15 @@ int i915_driver_unload(struct drm_device *dev)
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /*
+ * free the memory space allocated for the child device
+ * config parsed from VBT
+ */
+ if (dev_priv->child_dev && dev_priv->child_dev_num) {
+ kfree(dev_priv->child_dev);
+ dev_priv->child_dev = NULL;
+ dev_priv->child_dev_num = 0;
+ }
drm_irq_uninstall(dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
@@ -1535,8 +1548,7 @@ int i915_driver_unload(struct drm_device *dev)
if (dev_priv->regs != NULL)
iounmap(dev_priv->regs);
- if (!IS_IGDNG(dev))
- intel_opregion_free(dev, 0);
+ intel_opregion_free(dev, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
@@ -1548,6 +1560,8 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
drm_mm_takedown(&dev_priv->vram);
i915_gem_lastclose(dev);
+
+ intel_cleanup_overlay(dev);
}
pci_dev_put(dev_priv->bridge_dev);
@@ -1656,6 +1670,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7f436ec075f..2fa21786205 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -333,6 +333,7 @@ static struct drm_driver driver = {
.mmap = drm_gem_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+ .read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a725f659119..fbecac72f5b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -170,6 +170,8 @@ struct drm_i915_display_funcs {
/* clock gating init */
};
+struct intel_overlay;
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -187,6 +189,7 @@ typedef struct drm_i915_private {
unsigned int status_gfx_addr;
drm_local_map_t hws_map;
struct drm_gem_object *hws_obj;
+ struct drm_gem_object *pwrctx;
struct resource mch_res;
@@ -206,11 +209,13 @@ typedef struct drm_i915_private {
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg;
u32 pipestat[2];
- /** splitted irq regs for graphics and display engine on IGDNG,
+ /** splitted irq regs for graphics and display engine on Ironlake,
irq_mask_reg is still used for display irq. */
u32 gt_irq_mask_reg;
u32 gt_irq_enable_reg;
u32 de_irq_enable_reg;
+ u32 pch_irq_mask_reg;
+ u32 pch_irq_enable_reg;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
@@ -240,6 +245,9 @@ typedef struct drm_i915_private {
struct intel_opregion opregion;
+ /* overlay */
+ struct intel_overlay *overlay;
+
/* LVDS info */
int backlight_duty_cycle; /* restore backlight to this value */
bool panel_wants_dither;
@@ -258,7 +266,7 @@ typedef struct drm_i915_private {
struct notifier_block lid_notifier;
- int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
+ int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -280,6 +288,7 @@ typedef struct drm_i915_private {
u32 saveDSPBCNTR;
u32 saveDSPARB;
u32 saveRENDERSTANDBY;
+ u32 savePWRCTXA;
u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
@@ -374,8 +383,6 @@ typedef struct drm_i915_private {
u32 saveFDI_RXA_IMR;
u32 saveFDI_RXB_IMR;
u32 saveCACHE_MODE_0;
- u32 saveD_STATE;
- u32 saveDSPCLK_GATE_D;
u32 saveMI_ARB_STATE;
u32 saveSWF0[16];
u32 saveSWF1[16];
@@ -539,13 +546,21 @@ typedef struct drm_i915_private {
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
+ struct drm_crtc *plane_to_crtc_mapping[2];
+ struct drm_crtc *pipe_to_crtc_mapping[2];
+ wait_queue_head_t pending_flip_queue;
+
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
+ /* indicates the reduced downclock for LVDS*/
+ int lvds_downclock;
struct work_struct idle_work;
struct timer_list idle_timer;
bool busy;
u16 orig_clock;
+ int child_dev_num;
+ struct child_device_config *child_dev;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -638,6 +653,13 @@ struct drm_i915_gem_object {
* Advice: are the backing pages purgeable?
*/
int madv;
+
+ /**
+ * Number of crtcs where this object is currently the fb, but
+ * will be page flipped away on the next vblank. When it
+ * reaches 0, dev_priv->pending_flip_queue will be woken up.
+ */
+ atomic_t pending_flip;
};
/**
@@ -738,6 +760,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+void intel_enable_asle (struct drm_device *dev);
+
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -813,6 +837,9 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
int i915_gem_idle(struct drm_device *dev);
+uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t flush_domains);
+int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
@@ -824,6 +851,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_object_get_pages(struct drm_gem_object *obj);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
@@ -863,11 +891,13 @@ extern int i915_restore_state(struct drm_device *dev);
extern int intel_opregion_init(struct drm_device *dev, int resume);
extern void intel_opregion_free(struct drm_device *dev, int suspend);
extern void opregion_asle_intr(struct drm_device *dev);
+extern void ironlake_opregion_gse_intr(struct drm_device *dev);
extern void opregion_enable_asle(struct drm_device *dev);
#else
static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
static inline void opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
static inline void opregion_enable_asle(struct drm_device *dev) { return; }
#endif
@@ -955,8 +985,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I855(dev) ((dev)->pci_device == 0x3582)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
@@ -990,47 +1020,51 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E42 || \
IS_GM45(dev))
-#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
-#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
-#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \
(dev)->pci_device == 0x29D2 || \
- (IS_IGD(dev)))
+ (IS_PINEVIEW(dev)))
-#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042)
-#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
- IS_IGDNG(dev))
+ IS_IRONLAKE(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
- IS_IGD(dev) || IS_IGDNG_M(dev))
+ IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
- IS_IGDNG(dev))
+ IS_IRONLAKE(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
-#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
+ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
-#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
+#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
(IS_I9XX(dev) || IS_GM45(dev)) && \
- !IS_IGD(dev) && \
- !IS_IGDNG(dev))
+ !IS_PINEVIEW(dev) && \
+ !IS_IRONLAKE(dev))
+#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a2a3fa59992..8c463cf2050 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1288,6 +1288,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
DRM_ERROR("failed to add to map hash\n");
+ ret = -ENOMEM;
goto out_free_mm;
}
@@ -1583,7 +1584,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
*
* Returned sequence numbers are nonzero on success.
*/
-static uint32_t
+uint32_t
i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
uint32_t flush_domains)
{
@@ -1617,7 +1618,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
- DRM_DEBUG("%d\n", seqno);
+ DRM_DEBUG_DRIVER("%d\n", seqno);
request->seqno = seqno;
request->emitted_jiffies = jiffies;
@@ -1820,12 +1821,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+int
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
@@ -1837,7 +1834,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
return -EIO;
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
@@ -1852,10 +1849,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev);
- ret = wait_event_interruptible(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged));
+ if (interruptible)
+ ret = wait_event_interruptible(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+ atomic_read(&dev_priv->mm.wedged));
+ else
+ wait_event(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+ atomic_read(&dev_priv->mm.wedged));
+
i915_user_irq_put(dev);
dev_priv->mm.waiting_gem_seqno = 0;
@@ -1879,6 +1881,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
return ret;
}
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+static int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+ return i915_do_wait_request(dev, seqno, 1);
+}
+
static void
i915_gem_flush(struct drm_device *dev,
uint32_t invalidate_domains,
@@ -1947,7 +1959,7 @@ i915_gem_flush(struct drm_device *dev,
#endif
BEGIN_LP_RING(2);
OUT_RING(cmd);
- OUT_RING(0); /* noop */
+ OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
}
@@ -2760,6 +2772,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
old_write_domain);
}
+void
+i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
+{
+ switch (obj->write_domain) {
+ case I915_GEM_DOMAIN_GTT:
+ i915_gem_object_flush_gtt_write_domain(obj);
+ break;
+ case I915_GEM_DOMAIN_CPU:
+ i915_gem_object_flush_cpu_write_domain(obj);
+ break;
+ default:
+ i915_gem_object_flush_gpu_write_domain(obj);
+ break;
+ }
+}
+
/**
* Moves a single object to the GTT read, and possibly write domain.
*
@@ -3525,6 +3553,41 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
return 0;
}
+static int
+i915_gem_wait_for_pending_flip(struct drm_device *dev,
+ struct drm_gem_object **object_list,
+ int count)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ DEFINE_WAIT(wait);
+ int i, ret = 0;
+
+ for (;;) {
+ prepare_to_wait(&dev_priv->pending_flip_queue,
+ &wait, TASK_INTERRUPTIBLE);
+ for (i = 0; i < count; i++) {
+ obj_priv = object_list[i]->driver_private;
+ if (atomic_read(&obj_priv->pending_flip) > 0)
+ break;
+ }
+ if (i == count)
+ break;
+
+ if (!signal_pending(current)) {
+ mutex_unlock(&dev->struct_mutex);
+ schedule();
+ mutex_lock(&dev->struct_mutex);
+ continue;
+ }
+ ret = -ERESTARTSYS;
+ break;
+ }
+ finish_wait(&dev_priv->pending_flip_queue, &wait);
+
+ return ret;
+}
+
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -3540,7 +3603,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
- int pin_tries;
+ int pin_tries, flips;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3552,8 +3615,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
/* Copy in the exec list from userland */
- exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
- object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
if (exec_list == NULL || object_list == NULL) {
DRM_ERROR("Failed to allocate exec or object list "
"for %d buffers\n",
@@ -3598,20 +3661,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
if (atomic_read(&dev_priv->mm.wedged)) {
- DRM_ERROR("Execbuf while wedged\n");
mutex_unlock(&dev->struct_mutex);
ret = -EIO;
goto pre_mutex_err;
}
if (dev_priv->mm.suspended) {
- DRM_ERROR("Execbuf while VT-switched.\n");
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
/* Look up object handles */
+ flips = 0;
for (i = 0; i < args->buffer_count; i++) {
object_list[i] = drm_gem_object_lookup(dev, file_priv,
exec_list[i].handle);
@@ -3630,6 +3692,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
goto err;
}
obj_priv->in_execbuffer = true;
+ flips += atomic_read(&obj_priv->pending_flip);
+ }
+
+ if (flips > 0) {
+ ret = i915_gem_wait_for_pending_flip(dev, object_list,
+ args->buffer_count);
+ if (ret)
+ goto err;
}
/* Pin and relocate */
@@ -4356,7 +4426,7 @@ i915_gem_init_hws(struct drm_device *dev)
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
I915_READ(HWS_PGA); /* posting read */
- DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+ DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
}
@@ -4614,8 +4684,8 @@ i915_gem_load(struct drm_device *dev)
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
}
-
i915_gem_detect_bit_6_swizzle(dev);
+ init_waitqueue_head(&dev_priv->pending_flip_queue);
}
/*
@@ -4790,7 +4860,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
- DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
+ DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
ret = copy_from_user(obj_addr, user_data, args->size);
if (ret)
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 200e398453c..30d6af6c09b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -121,7 +121,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
- DRM_DEBUG("failed bus alloc: %d\n", ret);
+ DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
goto out;
}
@@ -209,8 +209,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
bool need_disable;
- if (IS_IGDNG(dev)) {
- /* On IGDNG whatever DRAM config, GPU always do
+ if (IS_IRONLAKE(dev)) {
+ /* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index aa7fd82aa6e..85f4c5de97e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -43,10 +43,13 @@
* we leave them always unmasked in IMR and then control enabling them through
* PIPESTAT alone.
*/
-#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_FIX \
+ (I915_ASLE_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
/** Interrupts that we mask and unmask at runtime. */
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
@@ -61,7 +64,7 @@
DRM_I915_VBLANK_PIPE_B)
void
-igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
dev_priv->gt_irq_mask_reg &= ~mask;
@@ -71,7 +74,7 @@ igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
}
static inline void
-igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
dev_priv->gt_irq_mask_reg |= mask;
@@ -82,7 +85,7 @@ igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
/* For display hotplug interrupt */
void
-igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != 0) {
dev_priv->irq_mask_reg &= ~mask;
@@ -92,7 +95,7 @@ igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
static inline void
-igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != mask) {
dev_priv->irq_mask_reg |= mask;
@@ -157,6 +160,20 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
}
/**
+ * intel_enable_asle - enable ASLE interrupt for OpRegion
+ */
+void intel_enable_asle (struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (IS_IRONLAKE(dev))
+ ironlake_enable_display_irq(dev_priv, DE_GSE);
+ else
+ i915_enable_pipestat(dev_priv, 1,
+ I915_LEGACY_BLC_EVENT_ENABLE);
+}
+
+/**
* i915_pipe_enabled - check if a pipe is enabled
* @dev: DRM device
* @pipe: pipe to check
@@ -191,7 +208,8 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+ "pipe %d\n", pipe);
return 0;
}
@@ -220,7 +238,8 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+ "pipe %d\n", pipe);
return 0;
}
@@ -250,12 +269,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_sysfs_hotplug_event(dev);
}
-irqreturn_t igdng_irq_handler(struct drm_device *dev)
+irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier;
- u32 new_de_iir, new_gt_iir;
+ u32 de_iir, gt_iir, de_ier, pch_iir;
+ u32 new_de_iir, new_gt_iir, new_pch_iir;
struct drm_i915_master_private *master_priv;
/* disable master interrupt before clearing iir */
@@ -265,13 +284,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
+ pch_iir = I915_READ(SDEIIR);
for (;;) {
- if (de_iir == 0 && gt_iir == 0)
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
break;
ret = IRQ_HANDLED;
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ new_pch_iir = I915_READ(SDEIIR);
+
I915_WRITE(DEIIR, de_iir);
new_de_iir = I915_READ(DEIIR);
I915_WRITE(GTIIR, gt_iir);
@@ -291,8 +315,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
DRM_WAKEUP(&dev_priv->irq_queue);
}
+ if (de_iir & DE_GSE)
+ ironlake_opregion_gse_intr(dev);
+
+ /* check event from PCH */
+ if ((de_iir & DE_PCH_EVENT) &&
+ (pch_iir & SDE_HOTPLUG_MASK)) {
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+ }
+
de_iir = new_de_iir;
gt_iir = new_gt_iir;
+ pch_iir = new_pch_iir;
}
I915_WRITE(DEIER, de_ier);
@@ -317,19 +351,19 @@ static void i915_error_work_func(struct work_struct *work)
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
- DRM_DEBUG("generating error event\n");
+ DRM_DEBUG_DRIVER("generating error event\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
if (atomic_read(&dev_priv->mm.wedged)) {
if (IS_I965G(dev)) {
- DRM_DEBUG("resetting chip\n");
+ DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
if (!i965_reset(dev, GDRST_RENDER)) {
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
} else {
- printk("reboot required\n");
+ DRM_DEBUG_DRIVER("reboot required\n");
}
}
}
@@ -355,7 +389,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
- DRM_DEBUG("out ot memory, not capturing error state\n");
+ DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
goto out;
}
@@ -512,7 +546,6 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- printk("i915: Waking up sleeping processes\n");
DRM_WAKEUP(&dev_priv->irq_queue);
}
@@ -535,8 +568,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
atomic_inc(&dev_priv->irq_received);
- if (IS_IGDNG(dev))
- return igdng_irq_handler(dev);
+ if (IS_IRONLAKE(dev))
+ return ironlake_irq_handler(dev);
iir = I915_READ(IIR);
@@ -568,14 +601,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
*/
if (pipea_stats & 0x8000ffff) {
if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG("pipe a underrun\n");
+ DRM_DEBUG_DRIVER("pipe a underrun\n");
I915_WRITE(PIPEASTAT, pipea_stats);
irq_received = 1;
}
if (pipeb_stats & 0x8000ffff) {
if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG("pipe b underrun\n");
+ DRM_DEBUG_DRIVER("pipe b underrun\n");
I915_WRITE(PIPEBSTAT, pipeb_stats);
irq_received = 1;
}
@@ -591,7 +624,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
(iir & I915_DISPLAY_PORT_INTERRUPT)) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- DRM_DEBUG("hotplug event received, stat 0x%08x\n",
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
queue_work(dev_priv->wq,
@@ -599,27 +632,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
-
- /* EOS interrupts occurs */
- if (IS_IGD(dev) &&
- (hotplug_status & CRT_EOS_INT_STATUS)) {
- u32 temp;
-
- DRM_DEBUG("EOS interrupt occurs\n");
- /* status is already cleared */
- temp = I915_READ(ADPA);
- temp &= ~ADPA_DAC_ENABLE;
- I915_WRITE(ADPA, temp);
-
- temp = I915_READ(PORT_HOTPLUG_EN);
- temp &= ~CRT_EOS_INT_EN;
- I915_WRITE(PORT_HOTPLUG_EN, temp);
-
- temp = I915_READ(PORT_HOTPLUG_STAT);
- if (temp & CRT_EOS_INT_STATUS)
- I915_WRITE(PORT_HOTPLUG_STAT,
- CRT_EOS_INT_STATUS);
- }
}
I915_WRITE(IIR, iir);
@@ -641,14 +653,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 0);
+
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 1);
+
if (pipea_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 0);
+ intel_finish_page_flip(dev, 0);
}
if (pipeb_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 1);
+ intel_finish_page_flip(dev, 1);
}
if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
@@ -684,7 +704,7 @@ static int i915_emit_irq(struct drm_device * dev)
i915_kernel_lost_context(dev);
- DRM_DEBUG("\n");
+ DRM_DEBUG_DRIVER("\n");
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -709,8 +729,8 @@ void i915_user_irq_get(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
- if (IS_IGDNG(dev))
- igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ if (IS_IRONLAKE(dev))
+ ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
}
@@ -725,8 +745,8 @@ void i915_user_irq_put(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- if (IS_IGDNG(dev))
- igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ if (IS_IRONLAKE(dev))
+ ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
}
@@ -749,7 +769,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
- DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
@@ -832,7 +852,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!(pipeconf & PIPEACONF_ENABLE))
return -EINVAL;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return 0;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -854,7 +874,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -868,7 +888,7 @@ void i915_enable_interrupt (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
@@ -976,7 +996,7 @@ void i915_hangcheck_elapsed(unsigned long data)
/* drm_dma.h hooks
*/
-static void igdng_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -992,14 +1012,21 @@ static void igdng_irq_preinstall(struct drm_device *dev)
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
(void) I915_READ(GTIER);
+
+ /* south display irq */
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ (void) I915_READ(SDEIER);
}
-static int igdng_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
u32 render_mask = GT_USER_INTERRUPT;
+ u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
dev_priv->irq_mask_reg = ~display_mask;
dev_priv->de_irq_enable_reg = display_mask;
@@ -1019,6 +1046,14 @@ static int igdng_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
+ dev_priv->pch_irq_mask_reg = ~hotplug_mask;
+ dev_priv->pch_irq_enable_reg = hotplug_mask;
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
+ I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
+ (void) I915_READ(SDEIER);
+
return 0;
}
@@ -1031,8 +1066,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- if (IS_IGDNG(dev)) {
- igdng_irq_preinstall(dev);
+ if (IS_IRONLAKE(dev)) {
+ ironlake_irq_preinstall(dev);
return;
}
@@ -1059,8 +1094,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
- if (IS_IGDNG(dev))
- return igdng_irq_postinstall(dev);
+ if (IS_IRONLAKE(dev))
+ return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -1120,7 +1155,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void igdng_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
I915_WRITE(HWSTAM, 0xffffffff);
@@ -1143,8 +1178,8 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
dev_priv->vblank_pipe = 0;
- if (IS_IGDNG(dev)) {
- igdng_irq_uninstall(dev);
+ if (IS_IRONLAKE(dev)) {
+ ironlake_irq_uninstall(dev);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 2d5193556d3..7cc8410239c 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -118,6 +118,10 @@ struct opregion_asle {
#define ASLE_BACKLIGHT_FAIL (2<<12)
#define ASLE_PFIT_FAIL (2<<14)
#define ASLE_PWM_FREQ_FAIL (2<<16)
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
@@ -163,7 +167,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
else {
- if (IS_IGD(dev)) {
+ if (IS_PINEVIEW(dev)) {
blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT;
@@ -224,7 +228,7 @@ void opregion_asle_intr(struct drm_device *dev)
asle_req = asle->aslc & ASLE_REQ_MSK;
if (!asle_req) {
- DRM_DEBUG("non asle set request??\n");
+ DRM_DEBUG_DRIVER("non asle set request??\n");
return;
}
@@ -243,6 +247,73 @@ void opregion_asle_intr(struct drm_device *dev)
asle->aslc = asle_stat;
}
+static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 cpu_pwm_ctl, pch_pwm_ctl2;
+ u32 max_backlight, level;
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLE_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp < 0 || bclp > 255)
+ return ASLE_BACKLIGHT_FAILED;
+
+ cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
+ pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ /* get the max PWM frequency */
+ max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
+ /* calculate the expected PMW frequency */
+ level = (bclp * max_backlight) / 255;
+ /* reserve the high 16 bits */
+ cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
+ /* write the updated PWM frequency */
+ I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
+
+ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+
+ return 0;
+}
+
+void ironlake_opregion_gse_intr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+
+ if (!asle_req) {
+ DRM_DEBUG_DRIVER("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_ALS_ILLUM) {
+ DRM_DEBUG_DRIVER("Illum is not supported\n");
+ asle_stat |= ASLE_ALS_ILLUM_FAILED;
+ }
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
+
+ if (asle_req & ASLE_SET_PFIT) {
+ DRM_DEBUG_DRIVER("Pfit is not supported\n");
+ asle_stat |= ASLE_PFIT_FAILED;
+ }
+
+ if (asle_req & ASLE_SET_PWM_FREQ) {
+ DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+ asle_stat |= ASLE_PWM_FREQ_FAILED;
+ }
+
+ asle->aslc = asle_stat;
+}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
#define ASLE_PFIT_EN (1<<2)
@@ -258,8 +329,7 @@ void opregion_enable_asle(struct drm_device *dev)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 1,
- I915_LEGACY_BLC_EVENT_ENABLE);
+ intel_enable_asle(dev);
spin_unlock_irqrestore(&dev_priv->user_irq_lock,
irqflags);
}
@@ -361,9 +431,9 @@ int intel_opregion_init(struct drm_device *dev, int resume)
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
- DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+ DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
- DRM_DEBUG("ACPI OpRegion not supported!\n");
+ DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
@@ -373,30 +443,30 @@ int intel_opregion_init(struct drm_device *dev, int resume)
opregion->header = base;
if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
- DRM_DEBUG("opregion signature mismatch\n");
+ DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
- DRM_DEBUG("Public ACPI methods supported\n");
+ DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_didl_outputs(dev);
} else {
- DRM_DEBUG("Public ACPI methods not supported\n");
+ DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
err = -ENOTSUPP;
goto err_out;
}
opregion->enabled = 1;
if (mboxes & MBOX_SWSCI) {
- DRM_DEBUG("SWSCI supported\n");
+ DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
}
if (mboxes & MBOX_ASLE) {
- DRM_DEBUG("ASLE supported\n");
+ DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
opregion_enable_asle(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1687edf6879..974b3cf7061 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -140,6 +140,7 @@
#define MI_NOOP MI_INSTR(0, 0)
#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
+#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@@ -151,7 +152,13 @@
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
+#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
+#define MI_OVERLAY_CONTINUE (0x0<<21)
+#define MI_OVERLAY_ON (0x1<<21)
+#define MI_OVERLAY_OFF (0x2<<21)
#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -260,6 +267,8 @@
#define HWS_PGA 0x02080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
+#define PWRCTXA 0x2088 /* 965GM+ only */
+#define PWRCTX_EN (1<<0)
#define IPEIR 0x02088
#define IPEHR 0x0208c
#define INSTDONE 0x02090
@@ -405,6 +414,13 @@
# define GPIO_DATA_VAL_IN (1 << 12)
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+#define GMBUS0 0x5100
+#define GMBUS1 0x5104
+#define GMBUS2 0x5108
+#define GMBUS3 0x510c
+#define GMBUS4 0x5110
+#define GMBUS5 0x5120
+
/*
* Clock control & power management
*/
@@ -435,7 +451,7 @@
#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
+#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -512,7 +528,7 @@
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
-#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
/* i830, required in DVO non-gang */
#define PLL_P2_DIVIDE_BY_4 (1 << 23)
#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -522,7 +538,7 @@
#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
#define PLL_REF_INPUT_MASK (3 << 13)
#define PLL_LOAD_PULSE_PHASE_SHIFT 9
-/* IGDNG */
+/* Ironlake */
# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
@@ -586,12 +602,12 @@
#define FPB0 0x06048
#define FPB1 0x0604c
#define FP_N_DIV_MASK 0x003f0000
-#define FP_N_IGD_DIV_MASK 0x00ff0000
+#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
#define FP_M1_DIV_MASK 0x00003f00
#define FP_M1_DIV_SHIFT 8
#define FP_M2_DIV_MASK 0x0000003f
-#define FP_M2_IGD_DIV_MASK 0x000000ff
+#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
#define FP_M2_DIV_SHIFT 0
#define DPLL_TEST 0x606c
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -769,7 +785,8 @@
/** GM965 GM45 render standby register */
#define MCHBAR_RENDER_STANDBY 0x111B8
-
+#define RCX_SW_EXIT (1<<23)
+#define RSX_STATUS_MASK 0x00700000
#define PEG_BAND_GAP_DATA 0x14d68
/*
@@ -844,7 +861,6 @@
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
-#define CRT_EOS_INT_EN (1 << 10)
#define CRT_HOTPLUG_INT_EN (1 << 9)
#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
@@ -868,7 +884,6 @@
HDMID_HOTPLUG_INT_EN | \
SDVOB_HOTPLUG_INT_EN | \
SDVOC_HOTPLUG_INT_EN | \
- TV_HOTPLUG_INT_EN | \
CRT_HOTPLUG_INT_EN)
@@ -879,7 +894,6 @@
#define DPC_HOTPLUG_INT_STATUS (1 << 28)
#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
#define DPD_HOTPLUG_INT_STATUS (1 << 27)
-#define CRT_EOS_INT_STATUS (1 << 12)
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -1620,7 +1634,7 @@
#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
#define DP_SCRAMBLING_DISABLE (1 << 12)
-#define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7)
+#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
/** limit RGB values to avoid confusing TVs */
#define DP_COLOR_RANGE_16_235 (1 << 8)
@@ -1808,7 +1822,7 @@
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
-#define IGD_SELF_REFRESH_EN (1<<30)
+#define PINEVIEW_SELF_REFRESH_EN (1<<30)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -1824,16 +1838,16 @@
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
-#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */
-#define IGD_FIFO_LINE_SIZE 64
-#define IGD_MAX_WM 0x1ff
-#define IGD_DFT_WM 0x3f
-#define IGD_DFT_HPLLOFF_WM 0
-#define IGD_GUARD_WM 10
-#define IGD_CURSOR_FIFO 64
-#define IGD_CURSOR_MAX_WM 0x3f
-#define IGD_CURSOR_DFT_WM 0
-#define IGD_CURSOR_GUARD_WM 5
+#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE 64
+#define PINEVIEW_MAX_WM 0x1ff
+#define PINEVIEW_DFT_WM 0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM 0
+#define PINEVIEW_GUARD_WM 10
+#define PINEVIEW_CURSOR_FIFO 64
+#define PINEVIEW_CURSOR_MAX_WM 0x3f
+#define PINEVIEW_CURSOR_DFT_WM 0
+#define PINEVIEW_CURSOR_GUARD_WM 5
/*
* The two pipe frame counter registers are not synchronized, so
@@ -1907,6 +1921,7 @@
#define DISPPLANE_16BPP (0x5<<26)
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
#define DISPPLANE_32BPP (0x7<<26)
+#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_MASK (1<<24)
@@ -1918,7 +1933,7 @@
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* IGDNG */
+#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
#define DSPAADDR 0x70184
#define DSPASTRIDE 0x70188
@@ -1971,7 +1986,7 @@
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
-/* IGDNG */
+/* Ironlake */
#define CPU_VGACNTRL 0x41000
@@ -2117,6 +2132,7 @@
#define SDE_PORTC_HOTPLUG (1 << 9)
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
+#define SDE_HOTPLUG_MASK (0xf << 8)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -2157,6 +2173,13 @@
#define PCH_GPIOE 0xc5020
#define PCH_GPIOF 0xc5024
+#define PCH_GMBUS0 0xc5100
+#define PCH_GMBUS1 0xc5104
+#define PCH_GMBUS2 0xc5108
+#define PCH_GMBUS3 0xc510c
+#define PCH_GMBUS4 0xc5110
+#define PCH_GMBUS5 0xc5120
+
#define PCH_DPLL_A 0xc6014
#define PCH_DPLL_B 0xc6018
@@ -2292,7 +2315,7 @@
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
#define FDI_DP_PORT_WIDTH_X4 (3<<19)
#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
-/* IGDNG: hardwired to 1 */
+/* Ironlake: hardwired to 1 */
#define FDI_TX_PLL_ENABLE (1<<14)
/* both Tx and Rx */
#define FDI_SCRAMBLING_ENABLE (0<<7)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6eec8171a44..d5ebb00a9d4 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -27,14 +27,14 @@
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
-#include "i915_drv.h"
+#include "intel_drv.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
} else {
dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
fpb1_reg = FPB1;
}
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
}
@@ -402,7 +402,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
DRM_UDELAY(150);
@@ -413,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -467,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
DRM_UDELAY(150);
@@ -478,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +546,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
} else {
dev_priv->saveADPA = I915_READ(ADPA);
}
/* LVDS state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +571,10 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveLVDS = I915_READ(LVDS);
}
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -614,7 +614,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -656,24 +656,24 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
else
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -713,7 +713,7 @@ void i915_restore_display(struct drm_device *dev)
}
/* VGA state */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
@@ -733,8 +733,10 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
+ if (I915_HAS_RC6(dev)) {
dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
+ dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
+ }
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -742,7 +744,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
/* Interrupt state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveDEIER = I915_READ(DEIER);
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
@@ -754,10 +756,6 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- /* Clock gating state */
- dev_priv->saveD_STATE = I915_READ(D_STATE);
- dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
-
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -796,8 +794,10 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
/* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
+ if (I915_HAS_RC6(dev)) {
I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
+ I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
+ }
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -817,7 +817,7 @@ int i915_restore_state(struct drm_device *dev)
i915_restore_display(dev);
/* Interrupt state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(DEIER, dev_priv->saveDEIER);
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -830,8 +830,7 @@ int i915_restore_state(struct drm_device *dev)
}
/* Clock gating state */
- I915_WRITE (D_STATE, dev_priv->saveD_STATE);
- I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
+ intel_init_clock_gating(dev);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -846,6 +845,9 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ /* I2C state */
+ intel_i2c_reset_gmbus(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96cd256e60e..f2756774758 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -114,6 +114,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
int lfp_data_size, dvo_timing_offset;
+ int i, temp_downclock;
+ struct drm_display_mode *temp_mode;
/* Defaults if we can't find VBT info */
dev_priv->lvds_dither = 0;
@@ -159,9 +161,49 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
+ temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
+ temp_downclock = panel_fixed_mode->clock;
+ /*
+ * enumerate the LVDS panel timing info entry in VBT to check whether
+ * the LVDS downclock is found.
+ */
+ for (i = 0; i < 16; i++) {
+ entry = (struct bdb_lvds_lfp_data_entry *)
+ ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
+ dvo_timing = (struct lvds_dvo_timing *)
+ ((unsigned char *)entry + dvo_timing_offset);
+
+ fill_detail_timing_data(temp_mode, dvo_timing);
+
+ if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
+ temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
+ temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
+ temp_mode->htotal == panel_fixed_mode->htotal &&
+ temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
+ temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
+ temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
+ temp_mode->vtotal == panel_fixed_mode->vtotal &&
+ temp_mode->clock < temp_downclock) {
+ /*
+ * downclock is already found. But we expect
+ * to find the lower downclock.
+ */
+ temp_downclock = temp_mode->clock;
+ }
+ /* clear it to zero */
+ memset(temp_mode, 0, sizeof(*temp_mode));
+ }
+ kfree(temp_mode);
+ if (temp_downclock < panel_fixed_mode->clock) {
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = temp_downclock;
+ DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
+ "Normal Clock %dKHz, downclock %dKHz\n",
+ temp_downclock, panel_fixed_mode->clock);
+ }
return;
}
@@ -217,7 +259,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
if (IS_I85X(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
- else if (IS_IGDNG(dev_priv->dev))
+ else if (IS_IRONLAKE(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 120;
else
@@ -241,22 +283,18 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
GPIOF,
};
- /* Set sensible defaults in case we can't find the general block
- or it is the wrong chipset */
- dev_priv->crt_ddc_bus = -1;
-
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
u16 block_size = get_blocksize(general);
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
- DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin);
+ DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if ((bus_pin >= 1) && (bus_pin <= 6)) {
dev_priv->crt_ddc_bus =
crt_bus_map_table[bus_pin-1];
}
} else {
- DRM_DEBUG("BDB_GD too small (%d). Invalid.\n",
+ DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
}
}
@@ -274,7 +312,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -284,7 +322,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
*/
if (p_defs->child_dev_size != sizeof(*p_child)) {
/* different child dev size . Ignore it */
- DRM_DEBUG("different child size is found. Invalid.\n");
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
return;
}
/* get the block size of general definitions */
@@ -310,11 +348,11 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
if (p_child->dvo_port != DEVICE_PORT_DVOB &&
p_child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- DRM_DEBUG("Incorrect SDVO port. Skip it \n");
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
continue;
}
- DRM_DEBUG("the SDVO device with slave addr %2x is found on "
- "%s port\n",
+ DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ " %s port\n",
p_child->slave_addr,
(p_child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
@@ -325,21 +363,21 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->initialized = 1;
} else {
- DRM_DEBUG("Maybe one SDVO port is shared by "
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
if (p_child->slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- DRM_DEBUG("there exists the slave2_addr. Maybe this "
- "is a SDVO device with multiple inputs.\n");
+ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
}
count++;
}
if (!count) {
/* No SDVO device info is found */
- DRM_DEBUG("No SDVO device info is found in VBT\n");
+ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
}
return;
}
@@ -366,6 +404,70 @@ parse_driver_features(struct drm_i915_private *dev_priv,
dev_priv->render_reclock_avail = true;
}
+static void
+parse_device_mapping(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child device that is present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
+ return;
+ }
+ dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
+ if (!dev_priv->child_dev) {
+ DRM_DEBUG_KMS("No memory space for child device\n");
+ return;
+ }
+
+ dev_priv->child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->child_dev + count;
+ count++;
+ memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
/**
* intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
@@ -417,6 +519,7 @@ intel_init_bios(struct drm_device *dev)
parse_lfp_panel_data(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 0f8e5f69ac7..425ac9d7f72 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -549,4 +549,21 @@ bool intel_init_bios(struct drm_device *dev);
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e5051446c48..9f3d3e56341 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp, reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = PCH_ADPA;
else
reg = ADPA;
@@ -64,34 +64,6 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
}
I915_WRITE(reg, temp);
-
- if (IS_IGD(dev)) {
- if (mode == DRM_MODE_DPMS_OFF) {
- /* turn off DAC */
- temp = I915_READ(PORT_HOTPLUG_EN);
- temp &= ~CRT_EOS_INT_EN;
- I915_WRITE(PORT_HOTPLUG_EN, temp);
-
- temp = I915_READ(PORT_HOTPLUG_STAT);
- if (temp & CRT_EOS_INT_STATUS)
- I915_WRITE(PORT_HOTPLUG_STAT,
- CRT_EOS_INT_STATUS);
- } else {
- /* turn on DAC. EOS interrupt must be enabled after DAC
- * is enabled, so it sounds not good to enable it in
- * i915_driver_irq_postinstall()
- * wait 12.5ms after DAC is enabled
- */
- msleep(13);
- temp = I915_READ(PORT_HOTPLUG_STAT);
- if (temp & CRT_EOS_INT_STATUS)
- I915_WRITE(PORT_HOTPLUG_STAT,
- CRT_EOS_INT_STATUS);
- temp = I915_READ(PORT_HOTPLUG_EN);
- temp |= CRT_EOS_INT_EN;
- I915_WRITE(PORT_HOTPLUG_EN, temp);
- }
- }
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -141,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
else
dpll_md_reg = DPLL_B_MD;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
adpa_reg = PCH_ADPA;
else
adpa_reg = ADPA;
@@ -150,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
- if (IS_I965G(dev) && !IS_IGDNG(dev)) {
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -164,18 +136,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (intel_crtc->pipe == 0) {
adpa |= ADPA_PIPE_A_SELECT;
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
adpa |= ADPA_PIPE_B_SELECT;
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_B, 0);
}
I915_WRITE(adpa_reg, adpa);
}
-static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
+static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -194,7 +166,7 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
ADPA_CRT_HOTPLUG_ENABLE |
ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
- DRM_DEBUG("pch crt adpa 0x%x", adpa);
+ DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
I915_WRITE(PCH_ADPA, adpa);
while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
@@ -227,8 +199,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
u32 hotplug_en;
int i, tries = 0;
- if (IS_IGDNG(dev))
- return intel_igdng_crt_detect_hotplug(connector);
+ if (IS_IRONLAKE(dev))
+ return intel_ironlake_crt_detect_hotplug(connector);
/*
* On 4 series desktop, CRT detect sequence need to be done twice
@@ -549,12 +521,12 @@ void intel_crt_init(struct drm_device *dev)
&intel_output->enc);
/* Set up the DDC bus. */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
i2c_reg = PCH_GPIOA;
else {
i2c_reg = GPIOA;
/* Use VBT information for CRT DDC if available */
- if (dev_priv->crt_ddc_bus != -1)
+ if (dev_priv->crt_ddc_bus != 0)
i2c_reg = dev_priv->crt_ddc_bus;
}
intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 099f420de57..52cd9b006da 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -32,7 +32,7 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
#include "drm_crtc_helper.h"
@@ -102,32 +102,32 @@ struct intel_limit {
#define I9XX_DOT_MAX 400000
#define I9XX_VCO_MIN 1400000
#define I9XX_VCO_MAX 2800000
-#define IGD_VCO_MIN 1700000
-#define IGD_VCO_MAX 3500000
+#define PINEVIEW_VCO_MIN 1700000
+#define PINEVIEW_VCO_MAX 3500000
#define I9XX_N_MIN 1
#define I9XX_N_MAX 6
-/* IGD's Ncounter is a ring counter */
-#define IGD_N_MIN 3
-#define IGD_N_MAX 6
+/* Pineview's Ncounter is a ring counter */
+#define PINEVIEW_N_MIN 3
+#define PINEVIEW_N_MAX 6
#define I9XX_M_MIN 70
#define I9XX_M_MAX 120
-#define IGD_M_MIN 2
-#define IGD_M_MAX 256
+#define PINEVIEW_M_MIN 2
+#define PINEVIEW_M_MAX 256
#define I9XX_M1_MIN 10
#define I9XX_M1_MAX 22
#define I9XX_M2_MIN 5
#define I9XX_M2_MAX 9
-/* IGD M1 is reserved, and must be 0 */
-#define IGD_M1_MIN 0
-#define IGD_M1_MAX 0
-#define IGD_M2_MIN 0
-#define IGD_M2_MAX 254
+/* Pineview M1 is reserved, and must be 0 */
+#define PINEVIEW_M1_MIN 0
+#define PINEVIEW_M1_MAX 0
+#define PINEVIEW_M2_MIN 0
+#define PINEVIEW_M2_MAX 254
#define I9XX_P_SDVO_DAC_MIN 5
#define I9XX_P_SDVO_DAC_MAX 80
#define I9XX_P_LVDS_MIN 7
#define I9XX_P_LVDS_MAX 98
-#define IGD_P_LVDS_MIN 7
-#define IGD_P_LVDS_MAX 112
+#define PINEVIEW_P_LVDS_MIN 7
+#define PINEVIEW_P_LVDS_MAX 112
#define I9XX_P1_MIN 1
#define I9XX_P1_MAX 8
#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -234,33 +234,33 @@ struct intel_limit {
#define G4X_P2_DISPLAY_PORT_FAST 10
#define G4X_P2_DISPLAY_PORT_LIMIT 0
-/* IGDNG */
+/* Ironlake */
/* as we calculate clock using (register_value + 2) for
N/M1/M2, so here the range value for them is (actual_value-2).
*/
-#define IGDNG_DOT_MIN 25000
-#define IGDNG_DOT_MAX 350000
-#define IGDNG_VCO_MIN 1760000
-#define IGDNG_VCO_MAX 3510000
-#define IGDNG_N_MIN 1
-#define IGDNG_N_MAX 5
-#define IGDNG_M_MIN 79
-#define IGDNG_M_MAX 118
-#define IGDNG_M1_MIN 12
-#define IGDNG_M1_MAX 23
-#define IGDNG_M2_MIN 5
-#define IGDNG_M2_MAX 9
-#define IGDNG_P_SDVO_DAC_MIN 5
-#define IGDNG_P_SDVO_DAC_MAX 80
-#define IGDNG_P_LVDS_MIN 28
-#define IGDNG_P_LVDS_MAX 112
-#define IGDNG_P1_MIN 1
-#define IGDNG_P1_MAX 8
-#define IGDNG_P2_SDVO_DAC_SLOW 10
-#define IGDNG_P2_SDVO_DAC_FAST 5
-#define IGDNG_P2_LVDS_SLOW 14 /* single channel */
-#define IGDNG_P2_LVDS_FAST 7 /* double channel */
-#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */
+#define IRONLAKE_DOT_MIN 25000
+#define IRONLAKE_DOT_MAX 350000
+#define IRONLAKE_VCO_MIN 1760000
+#define IRONLAKE_VCO_MAX 3510000
+#define IRONLAKE_N_MIN 1
+#define IRONLAKE_N_MAX 5
+#define IRONLAKE_M_MIN 79
+#define IRONLAKE_M_MAX 118
+#define IRONLAKE_M1_MIN 12
+#define IRONLAKE_M1_MAX 23
+#define IRONLAKE_M2_MIN 5
+#define IRONLAKE_M2_MAX 9
+#define IRONLAKE_P_SDVO_DAC_MIN 5
+#define IRONLAKE_P_SDVO_DAC_MAX 80
+#define IRONLAKE_P_LVDS_MIN 28
+#define IRONLAKE_P_LVDS_MAX 112
+#define IRONLAKE_P1_MIN 1
+#define IRONLAKE_P1_MAX 8
+#define IRONLAKE_P2_SDVO_DAC_SLOW 10
+#define IRONLAKE_P2_SDVO_DAC_FAST 5
+#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
+#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
+#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -272,15 +272,15 @@ static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
-intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
-intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -453,13 +453,13 @@ static const intel_limit_t intel_limits_g4x_display_port = {
.find_pll = intel_find_pll_g4x_dp,
};
-static const intel_limit_t intel_limits_igd_sdvo = {
+static const intel_limit_t intel_limits_pineview_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
- .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
- .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
- .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
- .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
- .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
+ .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
+ .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
+ .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
+ .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
+ .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
.p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
@@ -468,59 +468,59 @@ static const intel_limit_t intel_limits_igd_sdvo = {
.find_reduced_pll = intel_find_best_reduced_PLL,
};
-static const intel_limit_t intel_limits_igd_lvds = {
+static const intel_limit_t intel_limits_pineview_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
- .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
- .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
- .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
- .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
- .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
- .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
+ .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
+ .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
+ .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
+ .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
+ .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
+ .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX },
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
- /* IGD only supports single-channel mode. */
+ /* Pineview only supports single-channel mode. */
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
.find_reduced_pll = intel_find_best_reduced_PLL,
};
-static const intel_limit_t intel_limits_igdng_sdvo = {
- .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
- .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
- .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
- .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
- .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
- .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
- .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX },
- .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
- .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
- .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
- .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
- .find_pll = intel_igdng_find_best_PLL,
+static const intel_limit_t intel_limits_ironlake_sdvo = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
+ .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
+ .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
+ .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
+ .find_pll = intel_ironlake_find_best_PLL,
};
-static const intel_limit_t intel_limits_igdng_lvds = {
- .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
- .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
- .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
- .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
- .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
- .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
- .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX },
- .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
- .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
- .p2_slow = IGDNG_P2_LVDS_SLOW,
- .p2_fast = IGDNG_P2_LVDS_FAST },
- .find_pll = intel_igdng_find_best_PLL,
+static const intel_limit_t intel_limits_ironlake_lvds = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
+ .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
+ .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_P2_LVDS_SLOW,
+ .p2_fast = IRONLAKE_P2_LVDS_FAST },
+ .find_pll = intel_ironlake_find_best_PLL,
};
-static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
{
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_igdng_lvds;
+ limit = &intel_limits_ironlake_lvds;
else
- limit = &intel_limits_igdng_sdvo;
+ limit = &intel_limits_ironlake_sdvo;
return limit;
}
@@ -557,20 +557,20 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (IS_IGDNG(dev))
- limit = intel_igdng_limit(crtc);
+ if (IS_IRONLAKE(dev))
+ limit = intel_ironlake_limit(crtc);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
- } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
+ } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
- } else if (IS_IGD(dev)) {
+ } else if (IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_igd_lvds;
+ limit = &intel_limits_pineview_lvds;
else
- limit = &intel_limits_igd_sdvo;
+ limit = &intel_limits_pineview_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
@@ -580,8 +580,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
return limit;
}
-/* m1 is reserved as 0 in IGD, n is a ring counter */
-static void igd_clock(int refclk, intel_clock_t *clock)
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+static void pineview_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -591,8 +591,8 @@ static void igd_clock(int refclk, intel_clock_t *clock)
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
{
- if (IS_IGD(dev)) {
- igd_clock(refclk, clock);
+ if (IS_PINEVIEW(dev)) {
+ pineview_clock(refclk, clock);
return;
}
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
@@ -657,7 +657,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
INTELPllInvalid ("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid ("m1 out of range\n");
- if (clock->m1 <= clock->m2 && !IS_IGD(dev))
+ if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
INTELPllInvalid ("m1 <= m2\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
INTELPllInvalid ("m out of range\n");
@@ -706,16 +706,17 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset (best_clock, 0, sizeof (*best_clock));
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in IGD */
- if (clock.m2 >= clock.m1 && !IS_IGD(dev))
- break;
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ /* m1 is always 0 in Pineview */
+ if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
intel_clock(dev, refclk, &clock);
@@ -751,8 +752,8 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in IGD */
- if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+ /* m1 is always 0 in Pineview */
+ if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
break;
for (clock.n = limit->n.min; clock.n <= limit->n.max;
clock.n++) {
@@ -833,8 +834,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
@@ -857,8 +858,8 @@ intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -871,7 +872,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- return intel_find_pll_igdng_dp(limit, crtc, target,
+ return intel_find_pll_ironlake_dp(limit, crtc, target,
refclk, best_clock);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -949,7 +950,7 @@ void
intel_wait_for_vblank(struct drm_device *dev)
{
/* Wait for 20ms, i.e. one cycle at 50hz. */
- mdelay(20);
+ msleep(20);
}
/* Parameters have changed, update FBC info */
@@ -994,7 +995,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl |= dev_priv->cfb_fence;
I915_WRITE(FBC_CONTROL, fbc_ctl);
- DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+ DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
}
@@ -1017,7 +1018,7 @@ void i8xx_disable_fbc(struct drm_device *dev)
intel_wait_for_vblank(dev);
- DRM_DEBUG("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
}
static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
@@ -1062,7 +1063,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* enable it... */
I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
- DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane);
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
void g4x_disable_fbc(struct drm_device *dev)
@@ -1076,7 +1077,7 @@ void g4x_disable_fbc(struct drm_device *dev)
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
intel_wait_for_vblank(dev);
- DRM_DEBUG("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
}
static bool g4x_fbc_enabled(struct drm_crtc *crtc)
@@ -1141,25 +1142,27 @@ static void intel_update_fbc(struct drm_crtc *crtc,
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
if (intel_fb->obj->size > dev_priv->cfb_size) {
- DRM_DEBUG("framebuffer too large, disabling compression\n");
+ DRM_DEBUG_KMS("framebuffer too large, disabling "
+ "compression\n");
goto out_disable;
}
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG("mode incompatible with compression, disabling\n");
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
goto out_disable;
}
if ((mode->hdisplay > 2048) ||
(mode->vdisplay > 1536)) {
- DRM_DEBUG("mode too large for compression, disabling\n");
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
- DRM_DEBUG("plane not 0, disabling compression\n");
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
goto out_disable;
}
if (obj_priv->tiling_mode != I915_TILING_X) {
- DRM_DEBUG("framebuffer not tiled, disabling compression\n");
+ DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
goto out_disable;
}
@@ -1181,13 +1184,57 @@ static void intel_update_fbc(struct drm_crtc *crtc,
return;
out_disable:
- DRM_DEBUG("unsupported config, disabling FBC\n");
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
/* Multiple disables should be harmless */
if (dev_priv->display.fbc_enabled(crtc))
dev_priv->display.disable_fbc(dev);
}
static int
+intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ u32 alignment;
+ int ret;
+
+ switch (obj_priv->tiling_mode) {
+ case I915_TILING_NONE:
+ alignment = 64 * 1024;
+ break;
+ case I915_TILING_X:
+ /* pin() will align the object as required by fence */
+ alignment = 0;
+ break;
+ case I915_TILING_Y:
+ /* FIXME: Is this true? */
+ DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+ return -EINVAL;
+ default:
+ BUG();
+ }
+
+ ret = i915_gem_object_pin(obj, alignment);
+ if (ret != 0)
+ return ret;
+
+ /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always install
+ * a fence as the cost is not that onerous.
+ */
+ if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+ obj_priv->tiling_mode != I915_TILING_NONE) {
+ ret = i915_gem_object_get_fence_reg(obj);
+ if (ret != 0) {
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -1206,12 +1253,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- u32 dspcntr, alignment;
+ u32 dspcntr;
int ret;
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -1228,24 +1275,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
obj = intel_fb->obj;
obj_priv = obj->driver_private;
- switch (obj_priv->tiling_mode) {
- case I915_TILING_NONE:
- alignment = 64 * 1024;
- break;
- case I915_TILING_X:
- /* pin() will align the object as required by fence */
- alignment = 0;
- break;
- case I915_TILING_Y:
- /* FIXME: Is this true? */
- DRM_ERROR("Y tiled not allowed for scan out buffers\n");
- return -EINVAL;
- default:
- BUG();
- }
-
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, alignment);
+ ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1258,20 +1289,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- /* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
- * whereas 965+ only requires a fence if using framebuffer compression.
- * For simplicity, we always install a fence as the cost is not that onerous.
- */
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
-
dspcntr = I915_READ(dspcntr_reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -1287,7 +1304,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
break;
case 24:
case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ if (crtc->fb->depth == 30)
+ dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
+ else
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
DRM_ERROR("Unknown color depth\n");
@@ -1302,7 +1322,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr &= ~DISPPLANE_TILED;
}
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -1311,7 +1331,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
Start = obj_priv->gtt_offset;
Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
- DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
I915_WRITE(dspstride, crtc->fb->pitch);
if (IS_I965G(dev)) {
I915_WRITE(dspbase, Offset);
@@ -1363,7 +1383,7 @@ static void i915_disable_vga (struct drm_device *dev)
u8 sr1;
u32 vga_reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
@@ -1379,19 +1399,19 @@ static void i915_disable_vga (struct drm_device *dev)
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
}
-static void igdng_disable_pll_edp (struct drm_crtc *crtc)
+static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
}
-static void igdng_enable_pll_edp (struct drm_crtc *crtc)
+static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1404,13 +1424,13 @@ static void igdng_enable_pll_edp (struct drm_crtc *crtc)
}
-static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
+static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
- DRM_DEBUG("eDP PLL enable for clock %d\n", clock);
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
@@ -1440,7 +1460,7 @@ static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
udelay(500);
}
-static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1481,10 +1501,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG("crtc %d dpms on\n", pipe);
+ DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0) {
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ POSTING_READ(PCH_LVDS);
+ }
+ }
+
if (HAS_eDP) {
/* enable eDP PLL */
- igdng_enable_pll_edp(crtc);
+ ironlake_enable_pll_edp(crtc);
} else {
/* enable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
@@ -1501,7 +1530,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(fdi_rx_reg);
udelay(200);
- /* Enable CPU FDI TX PLL, always on for IGDNG */
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
temp = I915_READ(fdi_tx_reg);
if ((temp & FDI_TX_PLL_ENABLE) == 0) {
I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
@@ -1568,12 +1597,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK) == 0) {
for (j = 0; j < tries; j++) {
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
+ temp);
if (temp & FDI_RX_BIT_LOCK)
break;
udelay(200);
@@ -1582,11 +1612,11 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_BIT_LOCK);
else
- DRM_DEBUG("train 1 fail\n");
+ DRM_DEBUG_KMS("train 1 fail\n");
} else {
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG("train 1 ok 2!\n");
+ DRM_DEBUG_KMS("train 1 ok 2!\n");
}
temp = I915_READ(fdi_tx_reg);
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1601,12 +1631,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
for (j = 0; j < tries; j++) {
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
+ temp);
if (temp & FDI_RX_SYMBOL_LOCK)
break;
udelay(200);
@@ -1614,15 +1645,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
if (j != tries) {
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG("train 2 ok 1!\n");
+ DRM_DEBUG_KMS("train 2 ok 1!\n");
} else
- DRM_DEBUG("train 2 fail\n");
+ DRM_DEBUG_KMS("train 2 fail\n");
} else {
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG("train 2 ok 2!\n");
+ DRM_DEBUG_KMS("train 2 ok 2!\n");
}
- DRM_DEBUG("train done\n");
+ DRM_DEBUG_KMS("train done\n");
/* set transcoder timing */
I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1664,9 +1695,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
case DRM_MODE_DPMS_OFF:
- DRM_DEBUG("crtc %d dpms off\n", pipe);
-
- i915_disable_vga(dev);
+ DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
@@ -1677,6 +1706,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
+ i915_disable_vga(dev);
+
/* disable cpu pipe, disable after all planes disabled */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -1690,16 +1721,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(500);
continue;
} else {
- DRM_DEBUG("pipe %d off delay\n", pipe);
+ DRM_DEBUG_KMS("pipe %d off delay\n",
+ pipe);
break;
}
}
} else
- DRM_DEBUG("crtc %d is disabled\n", pipe);
+ DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
- if (HAS_eDP) {
- igdng_disable_pll_edp(crtc);
+ udelay(100);
+
+ /* Disable PF */
+ temp = I915_READ(pf_ctl_reg);
+ if ((temp & PF_ENABLE) != 0) {
+ I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+ I915_READ(pf_ctl_reg);
}
+ I915_WRITE(pf_win_size, 0);
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -1725,6 +1763,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(100);
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
+ I915_READ(PCH_LVDS);
+ udelay(100);
+ }
+
/* disable PCH transcoder */
temp = I915_READ(transconf_reg);
if ((temp & TRANS_ENABLE) != 0) {
@@ -1738,12 +1783,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(500);
continue;
} else {
- DRM_DEBUG("transcoder %d off delay\n", pipe);
+ DRM_DEBUG_KMS("transcoder %d off "
+ "delay\n", pipe);
break;
}
}
}
+ udelay(100);
+
/* disable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -1751,14 +1799,20 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(pch_dpll_reg);
}
- temp = I915_READ(fdi_rx_reg);
- if ((temp & FDI_RX_PLL_ENABLE) != 0) {
- temp &= ~FDI_SEL_PCDCLK;
- temp &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
+ if (HAS_eDP) {
+ ironlake_disable_pll_edp(crtc);
}
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_SEL_PCDCLK;
+ I915_WRITE(fdi_rx_reg, temp);
+ I915_READ(fdi_rx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(fdi_rx_reg, temp);
+ I915_READ(fdi_rx_reg);
+
/* Disable CPU FDI TX PLL */
temp = I915_READ(fdi_tx_reg);
if ((temp & FDI_TX_PLL_ENABLE) != 0) {
@@ -1767,20 +1821,43 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(100);
}
- /* Disable PF */
- temp = I915_READ(pf_ctl_reg);
- if ((temp & PF_ENABLE) != 0) {
- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
- I915_READ(pf_ctl_reg);
- }
- I915_WRITE(pf_win_size, 0);
-
/* Wait for the clocks to turn off. */
- udelay(150);
+ udelay(100);
break;
}
}
+static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
+ struct intel_overlay *overlay;
+ int ret;
+
+ if (!enable && intel_crtc->overlay) {
+ overlay = intel_crtc->overlay;
+ mutex_lock(&overlay->dev->struct_mutex);
+ for (;;) {
+ ret = intel_overlay_switch_off(overlay);
+ if (ret == 0)
+ break;
+
+ ret = intel_overlay_recover_from_interrupt(overlay, 0);
+ if (ret != 0) {
+ /* overlay doesn't react anymore. Usually
+ * results in a black screen and an unkillable
+ * X server. */
+ BUG();
+ overlay->hw_wedged = HW_WEDGED;
+ break;
+ }
+ }
+ mutex_unlock(&overlay->dev->struct_mutex);
+ }
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway. */
+
+ return;
+}
+
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
@@ -1839,12 +1916,14 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_update_fbc(crtc, &crtc->mode);
/* Give the overlay scaler a chance to enable if it's on this pipe */
- //intel_crtc_dpms_video(crtc, true); TODO
+ intel_crtc_dpms_overlay(intel_crtc, true);
break;
case DRM_MODE_DPMS_OFF:
intel_update_watermarks(dev);
+
/* Give the overlay scaler a chance to disable if it's on this pipe */
- //intel_crtc_dpms_video(crtc, FALSE); TODO
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ drm_vblank_off(dev, pipe);
if (dev_priv->cfb_plane == plane &&
dev_priv->display.disable_fbc)
@@ -1963,7 +2042,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > 27000 * 4)
return MODE_CLOCK_HIGH;
@@ -2039,7 +2118,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
-static int intel_panel_fitter_pipe (struct drm_device *dev)
+int intel_panel_fitter_pipe (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control;
@@ -2083,9 +2162,8 @@ fdi_reduce_ratio(u32 *num, u32 *den)
#define LINK_N 0x80000
static void
-igdng_compute_m_n(int bits_per_pixel, int nlanes,
- int pixel_clock, int link_clock,
- struct fdi_m_n *m_n)
+ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
+ int link_clock, struct fdi_m_n *m_n)
{
u64 temp;
@@ -2113,34 +2191,34 @@ struct intel_watermark_params {
unsigned long cacheline_size;
};
-/* IGD has different values for various configs */
-static struct intel_watermark_params igd_display_wm = {
- IGD_DISPLAY_FIFO,
- IGD_MAX_WM,
- IGD_DFT_WM,
- IGD_GUARD_WM,
- IGD_FIFO_LINE_SIZE
+/* Pineview has different values for various configs */
+static struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params igd_display_hplloff_wm = {
- IGD_DISPLAY_FIFO,
- IGD_MAX_WM,
- IGD_DFT_HPLLOFF_WM,
- IGD_GUARD_WM,
- IGD_FIFO_LINE_SIZE
+static struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params igd_cursor_wm = {
- IGD_CURSOR_FIFO,
- IGD_CURSOR_MAX_WM,
- IGD_CURSOR_DFT_WM,
- IGD_CURSOR_GUARD_WM,
- IGD_FIFO_LINE_SIZE,
+static struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params igd_cursor_hplloff_wm = {
- IGD_CURSOR_FIFO,
- IGD_CURSOR_MAX_WM,
- IGD_CURSOR_DFT_WM,
- IGD_CURSOR_GUARD_WM,
- IGD_FIFO_LINE_SIZE
+static struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
};
static struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
@@ -2213,11 +2291,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1000;
entries_required /= wm->cacheline_size;
- DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
+ DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
wm_size = wm->fifo_size - (entries_required + wm->guard_size);
- DRM_DEBUG("FIFO watermark level: %d\n", wm_size);
+ DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
@@ -2279,50 +2357,50 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
return latency;
}
- DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
return NULL;
}
-static void igd_disable_cxsr(struct drm_device *dev)
+static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
/* deactivate cxsr */
reg = I915_READ(DSPFW3);
- reg &= ~(IGD_SELF_REFRESH_EN);
+ reg &= ~(PINEVIEW_SELF_REFRESH_EN);
I915_WRITE(DSPFW3, reg);
DRM_INFO("Big FIFO is disabled\n");
}
-static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
- int pixel_size)
+static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
unsigned long wm;
struct cxsr_latency *latency;
- latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq,
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
dev_priv->mem_freq);
if (!latency) {
- DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
- igd_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
return;
}
/* Display SR */
- wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size,
+ wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= 0x7fffff;
reg |= wm << 23;
I915_WRITE(DSPFW1, reg);
- DRM_DEBUG("DSPFW1 register is %x\n", reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size,
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~(0x3f << 24);
@@ -2330,7 +2408,7 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &igd_display_hplloff_wm,
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
reg = I915_READ(DSPFW3);
reg &= 0xfffffe00;
@@ -2338,17 +2416,17 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size,
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~(0x3f << 16);
reg |= (wm & 0x3f) << 16;
I915_WRITE(DSPFW3, reg);
- DRM_DEBUG("DSPFW3 register is %x\n", reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */
reg = I915_READ(DSPFW3);
- reg |= IGD_SELF_REFRESH_EN;
+ reg |= PINEVIEW_SELF_REFRESH_EN;
I915_WRITE(DSPFW3, reg);
DRM_INFO("Big FIFO is enabled\n");
@@ -2384,8 +2462,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
(dsparb & 0x7f);
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
return size;
}
@@ -2403,8 +2481,8 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
(dsparb & 0x1ff);
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
return size;
}
@@ -2418,7 +2496,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
size);
return size;
@@ -2433,8 +2512,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
size = dsparb & 0x7f;
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
return size;
}
@@ -2509,15 +2588,39 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
- int unused3, int unused4)
+static void i965_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long line_time_us;
+ int sr_clock, sr_entries, srwm = 1;
+
+ /* Calc sr entries for one plane configs */
+ if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+ /* self-refresh has much higher latency */
+ const static int sr_latency_ns = 12000;
+
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+ line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+ /* Use ns/us then divide to preserve precision */
+ sr_entries = (((sr_latency_ns / line_time_us) + 1) *
+ pixel_size * sr_hdisplay) / 1000;
+ sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
+ DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ srwm = I945_FIFO_SIZE - sr_entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x3f;
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ }
- DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n");
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
+ (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
}
@@ -2553,7 +2656,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
pixel_size, latency_ns);
planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
pixel_size, latency_ns);
- DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
/*
* Overlay gets an aggressive default since video jitter is bad.
@@ -2573,14 +2676,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
sr_entries = (((sr_latency_ns / line_time_us) + 1) *
pixel_size * sr_hdisplay) / 1000;
sr_entries = roundup(sr_entries / cacheline_size, 1);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
srwm = total_size - sr_entries;
if (srwm < 0)
srwm = 1;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
}
- DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
@@ -2607,7 +2710,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
pixel_size, latency_ns);
fwater_lo |= (3<<8) | planea_wm;
- DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm);
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
I915_WRITE(FW_BLC, fwater_lo);
}
@@ -2661,11 +2764,11 @@ static void intel_update_watermarks(struct drm_device *dev)
if (crtc->enabled) {
enabled++;
if (intel_crtc->plane == 0) {
- DRM_DEBUG("plane A (pipe %d) clock: %d\n",
+ DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
intel_crtc->pipe, crtc->mode.clock);
planea_clock = crtc->mode.clock;
} else {
- DRM_DEBUG("plane B (pipe %d) clock: %d\n",
+ DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
intel_crtc->pipe, crtc->mode.clock);
planeb_clock = crtc->mode.clock;
}
@@ -2682,10 +2785,10 @@ static void intel_update_watermarks(struct drm_device *dev)
return;
/* Single plane configs can enable self refresh */
- if (enabled == 1 && IS_IGD(dev))
- igd_enable_cxsr(dev, sr_clock, pixel_size);
- else if (IS_IGD(dev))
- igd_disable_cxsr(dev);
+ if (enabled == 1 && IS_PINEVIEW(dev))
+ pineview_enable_cxsr(dev, sr_clock, pixel_size);
+ else if (IS_PINEVIEW(dev))
+ pineview_disable_cxsr(dev);
dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
sr_hdisplay, pixel_size);
@@ -2779,10 +2882,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
- DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ refclk / 1000);
} else if (IS_I9XX(dev)) {
refclk = 96000;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
refclk = 120000; /* 120Mhz refclk */
} else {
refclk = 48000;
@@ -2802,14 +2906,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
}
- if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) {
+ if (is_lvds && limit->find_reduced_pll &&
+ dev_priv->lvds_downclock_avail) {
memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
has_reduced_clock = limit->find_reduced_pll(limit, crtc,
- (adjusted_mode->clock*3/4),
+ dev_priv->lvds_downclock,
refclk,
&reduced_clock);
+ if (has_reduced_clock && (clock.p != reduced_clock.p)) {
+ /*
+ * If the different P is found, it means that we can't
+ * switch the display clock by using the FP0/FP1.
+ * In such case we will disable the LVDS downclock
+ * feature.
+ */
+ DRM_DEBUG_KMS("Different P is found for "
+ "LVDS clock/downclock\n");
+ has_reduced_clock = 0;
+ }
}
-
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
@@ -2831,7 +2946,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* FDI link */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
int lane, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
@@ -2873,8 +2988,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bpp = 24;
}
- igdng_compute_m_n(bpp, lane, target_clock,
- link_bw, &m_n);
+ ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
/* Ironlake: try to setup display ref clock before DPLL
@@ -2882,7 +2996,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -2917,7 +3031,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (IS_IGD(dev)) {
+ if (IS_PINEVIEW(dev)) {
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = (1 << reduced_clock.n) << 16 |
@@ -2929,7 +3043,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
dpll = DPLL_VGA_MODE_DIS;
if (IS_I9XX(dev)) {
@@ -2942,19 +3056,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- else if (IS_IGDNG(dev))
+ else if (IS_IRONLAKE(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
if (is_dp)
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_IGD(dev))
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
if (IS_G4X(dev) && has_reduced_clock)
dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -2973,7 +3087,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
@@ -3005,9 +3119,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
- /* IGDNG's plane is forced to pipe, bit 24 is to
+ /* Ironlake's plane is forced to pipe, bit 24 is to
enable color space conversion */
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
@@ -3034,20 +3148,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Disable the panel fitter if it was on our pipe */
- if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe)
+ if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
- DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- /* assign to IGDNG registers */
- if (IS_IGDNG(dev)) {
+ /* assign to Ironlake registers */
+ if (IS_IRONLAKE(dev)) {
fp_reg = pch_fp_reg;
dpll_reg = pch_dpll_reg;
}
if (is_edp) {
- igdng_disable_pll_edp(crtc);
+ ironlake_disable_pll_edp(crtc);
} else if ((dpll & DPLL_VCO_ENABLE)) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
@@ -3062,7 +3176,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds) {
u32 lvds;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
lvds_reg = PCH_LVDS;
lvds = I915_READ(lvds_reg);
@@ -3095,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Wait for the clocks to stabilize. */
udelay(150);
- if (IS_I965G(dev) && !IS_IGDNG(dev)) {
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
if (is_sdvo) {
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3115,14 +3229,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(fp_reg + 4, fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG("enabling CxSR downclocking\n");
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
I915_WRITE(fp_reg + 4, fp);
intel_crtc->lowfreq_avail = false;
if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG("disabling CxSR downclocking\n");
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
@@ -3142,21 +3256,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* pipesrc and dspsize control the size that is scaled from, which should
* always be the user's requested size.
*/
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(dsppos_reg, 0);
}
I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
I915_WRITE(link_m1_reg, m_n.link_m);
I915_WRITE(link_n1_reg, m_n.link_n);
if (is_edp) {
- igdng_set_pll_edp(crtc, adjusted_mode->clock);
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
} else {
/* enable FDI RX PLL too */
temp = I915_READ(fdi_rx_reg);
@@ -3170,7 +3284,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_wait_for_vblank(dev);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
/* enable address swizzle for tiling buffer */
temp = I915_READ(DISP_ARB_CTL);
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
@@ -3204,8 +3318,8 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->enabled)
return;
- /* use legacy palette for IGDNG */
- if (IS_IGDNG(dev))
+ /* use legacy palette for Ironlake */
+ if (IS_IRONLAKE(dev))
palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
LGC_PALETTE_B;
@@ -3234,11 +3348,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
size_t addr;
int ret;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
- DRM_DEBUG("cursor off\n");
+ DRM_DEBUG_KMS("cursor off\n");
if (IS_MOBILE(dev) || IS_I9XX(dev)) {
temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
temp |= CURSOR_MODE_DISABLE;
@@ -3546,18 +3660,18 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_IGD(dev)) {
- clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
- clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ if (IS_PINEVIEW(dev)) {
+ clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
if (IS_I9XX(dev)) {
- if (IS_IGD(dev))
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
+ if (IS_PINEVIEW(dev))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -3572,7 +3686,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
7 : 14;
break;
default:
- DRM_DEBUG("Unknown DPLL mode %08x in programmed "
+ DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return 0;
}
@@ -3658,7 +3772,7 @@ static void intel_gpu_idle_timer(unsigned long arg)
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("idle timer fired, downclocking\n");
+ DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
dev_priv->busy = false;
@@ -3669,11 +3783,11 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG("not reclocking render clock\n");
+ DRM_DEBUG_DRIVER("not reclocking render clock\n");
return;
}
@@ -3682,7 +3796,7 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule)
pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
else if (IS_I85X(dev))
pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
- DRM_DEBUG("increasing render clock frequency\n");
+ DRM_DEBUG_DRIVER("increasing render clock frequency\n");
/* Schedule downclock */
if (schedule)
@@ -3694,11 +3808,11 @@ void intel_decrease_renderclock(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG("not reclocking render clock\n");
+ DRM_DEBUG_DRIVER("not reclocking render clock\n");
return;
}
@@ -3758,7 +3872,7 @@ void intel_decrease_renderclock(struct drm_device *dev)
pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
}
- DRM_DEBUG("decreasing render clock frequency\n");
+ DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
}
/* Note that no increase function is needed for this - increase_renderclock()
@@ -3766,7 +3880,7 @@ void intel_decrease_renderclock(struct drm_device *dev)
*/
void intel_decrease_displayclock(struct drm_device *dev)
{
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
@@ -3792,7 +3906,7 @@ static void intel_crtc_idle_timer(unsigned long arg)
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- DRM_DEBUG("idle timer fired, downclocking\n");
+ DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
intel_crtc->busy = false;
@@ -3808,14 +3922,14 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->lvds_downclock_avail)
return;
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
- DRM_DEBUG("upclocking LVDS\n");
+ DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3826,7 +3940,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
intel_wait_for_vblank(dev);
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
- DRM_DEBUG("failed to upclock LVDS!\n");
+ DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3847,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -3858,7 +3972,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
* the manual case.
*/
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
- DRM_DEBUG("downclocking LVDS\n");
+ DRM_DEBUG_DRIVER("downclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3869,7 +3983,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
intel_wait_for_vblank(dev);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
- DRM_DEBUG("failed to downclock LVDS!\n");
+ DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3936,8 +4050,13 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- dev_priv->busy = true;
- intel_increase_renderclock(dev, true);
+ if (!dev_priv->busy) {
+ dev_priv->busy = true;
+ intel_increase_renderclock(dev, true);
+ } else {
+ mod_timer(&dev_priv->idle_timer, jiffies +
+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ }
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
@@ -3967,6 +4086,158 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(intel_crtc);
}
+struct intel_unpin_work {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+ struct drm_pending_vblank_event *event;
+ int pending;
+};
+
+static void intel_unpin_work_fn(struct work_struct *__work)
+{
+ struct intel_unpin_work *work =
+ container_of(__work, struct intel_unpin_work, work);
+
+ mutex_lock(&work->dev->struct_mutex);
+ i915_gem_object_unpin(work->obj);
+ drm_gem_object_unreference(work->obj);
+ mutex_unlock(&work->dev->struct_mutex);
+ kfree(work);
+}
+
+void intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+ if (work == NULL || !work->pending) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
+
+ intel_crtc->unpin_work = NULL;
+ drm_vblank_put(dev, intel_crtc->pipe);
+
+ if (work->event) {
+ e = work->event;
+ do_gettimeofday(&now);
+ e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ obj_priv = work->obj->driver_private;
+ if (atomic_dec_and_test(&obj_priv->pending_flip))
+ DRM_WAKEUP(&dev_priv->pending_flip_queue);
+ schedule_work(&work->work);
+}
+
+void intel_prepare_page_flip(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (intel_crtc->unpin_work)
+ intel_crtc->unpin_work->pending = 1;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ unsigned long flags;
+ int ret;
+ RING_LOCALS;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&dev->struct_mutex);
+
+ work->event = event;
+ work->dev = crtc->dev;
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ work->obj = intel_fb->obj;
+ INIT_WORK(&work->work, intel_unpin_work_fn);
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (intel_crtc->unpin_work) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(work);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBUSY;
+ }
+ intel_crtc->unpin_work = work;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj);
+ if (ret != 0) {
+ kfree(work);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /* Reference the old fb object for the scheduled work. */
+ drm_gem_object_reference(work->obj);
+
+ crtc->fb = fb;
+ i915_gem_object_flush_write_domain(obj);
+ drm_vblank_get(dev, intel_crtc->pipe);
+ obj_priv = obj->driver_private;
+ atomic_inc(&obj_priv->pending_flip);
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ if (IS_I965G(dev)) {
+ OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+ OUT_RING((fb->width << 16) | fb->height);
+ } else {
+ OUT_RING(obj_priv->gtt_offset);
+ OUT_RING(MI_NOOP);
+ }
+ ADVANCE_LP_RING();
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
@@ -3983,11 +4254,13 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.gamma_set = intel_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = intel_crtc_destroy,
+ .page_flip = intel_crtc_page_flip,
};
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int i;
@@ -4010,10 +4283,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
- DRM_DEBUG("swapping pipes & planes for FBC\n");
+ DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = ((pipe == 0) ? 1 : 0);
}
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+
intel_crtc->cursor_addr = 0;
intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -4090,7 +4368,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_MOBILE(dev) && !IS_I830(dev))
intel_lvds_init(dev);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
int found;
if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4118,7 +4396,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D);
- } else if (IS_I9XX(dev)) {
+ } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
@@ -4145,10 +4423,10 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D);
- } else
+ } else if (IS_I8XX(dev))
intel_dvo_init(dev);
- if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev))
+ if (SUPPORTS_TV(dev))
intel_tv_init(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -4257,7 +4535,7 @@ void intel_init_clock_gating(struct drm_device *dev)
* Disable clock gating reported to work incorrectly according to the
* specs, but enable as much else as we can.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -4291,11 +4569,52 @@ void intel_init_clock_gating(struct drm_device *dev)
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
- } else if (IS_I855(dev) || IS_I865G(dev)) {
+ } else if (IS_I85X(dev) || IS_I865G(dev)) {
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
} else if (IS_I830(dev)) {
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ if (I915_HAS_RC6(dev)) {
+ struct drm_gem_object *pwrctx;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ if (dev_priv->pwrctx) {
+ obj_priv = dev_priv->pwrctx->driver_private;
+ } else {
+ pwrctx = drm_gem_object_alloc(dev, 4096);
+ if (!pwrctx) {
+ DRM_DEBUG("failed to alloc power context, "
+ "RC6 disabled\n");
+ goto out;
+ }
+
+ ret = i915_gem_object_pin(pwrctx, 4096);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n",
+ ret);
+ drm_gem_object_unreference(pwrctx);
+ goto out;
+ }
+
+ i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+
+ dev_priv->pwrctx = pwrctx;
+ obj_priv = pwrctx->driver_private;
+ }
+
+ I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+ I915_WRITE(MCHBAR_RENDER_STANDBY,
+ I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ }
+
+out:
+ return;
}
/* Set up chip specific display functions */
@@ -4304,8 +4623,8 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (IS_IGDNG(dev))
- dev_priv->display.dpms = igdng_crtc_dpms;
+ if (IS_IRONLAKE(dev))
+ dev_priv->display.dpms = ironlake_crtc_dpms;
else
dev_priv->display.dpms = i9xx_crtc_dpms;
@@ -4324,13 +4643,13 @@ static void intel_init_display(struct drm_device *dev)
}
/* Returns the core display clock speed */
- if (IS_I945G(dev))
+ if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_I915GM(dev))
@@ -4339,7 +4658,7 @@ static void intel_init_display(struct drm_device *dev)
else if (IS_I865G(dev))
dev_priv->display.get_display_clock_speed =
i865_get_display_clock_speed;
- else if (IS_I855(dev))
+ else if (IS_I85X(dev))
dev_priv->display.get_display_clock_speed =
i855_get_display_clock_speed;
else /* 852, 830 */
@@ -4347,7 +4666,7 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
dev_priv->display.update_wm = NULL;
else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
@@ -4403,7 +4722,7 @@ void intel_modeset_init(struct drm_device *dev)
num_pipe = 2;
else
num_pipe = 1;
- DRM_DEBUG("%d display pipe%s available.\n",
+ DRM_DEBUG_KMS("%d display pipe%s available.\n",
num_pipe, num_pipe > 1 ? "s" : "");
if (IS_I85X(dev))
@@ -4422,6 +4741,15 @@ void intel_modeset_init(struct drm_device *dev)
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
+
+ intel_setup_overlay(dev);
+
+ if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq))
+ DRM_INFO("failed to find known CxSR latency "
+ "(found fsb freq %d, mem freq %d), disabling CxSR\n",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -4445,11 +4773,21 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_increase_renderclock(dev, false);
del_timer_sync(&dev_priv->idle_timer);
- mutex_unlock(&dev->struct_mutex);
-
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
+ if (dev_priv->pwrctx) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = dev_priv->pwrctx->driver_private;
+ I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
+ I915_READ(PWRCTXA);
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(dev_priv->pwrctx);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
drm_mode_config_cleanup(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d83447557f9..4e7aa8b7b93 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -33,7 +33,8 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
+
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
@@ -223,8 +224,8 @@ intel_dp_aux_ch(struct intel_output *intel_output,
*/
if (IS_eDP(intel_output))
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (IS_IGDNG(dev))
- aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */
+ else if (IS_IRONLAKE(dev))
+ aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -282,7 +283,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status);
+ DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
return -ETIMEDOUT;
}
@@ -382,17 +383,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
}
static int
-intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
- uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_bytes)
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct intel_dp_priv *dp_priv = container_of(adapter,
struct intel_dp_priv,
adapter);
struct intel_output *intel_output = dp_priv->intel_output;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
- return intel_dp_aux_ch(intel_output,
- send, send_bytes, recv, recv_bytes);
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return ret;
+ }
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch nack\n");
+ return -EREMOTEIO;
+ case AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_ch defer\n");
+ udelay(100);
+ break;
+ default:
+ DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
+ return -EREMOTEIO;
+ }
+ }
}
static int
@@ -435,7 +496,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->link_bw = bws[clock];
dp_priv->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
- DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n",
+ DRM_DEBUG_KMS("Display port link bw %02x lane "
+ "count %d clock %d\n",
dp_priv->link_bw, dp_priv->lane_count,
adjusted_mode->clock);
return true;
@@ -514,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp_compute_m_n(3, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
if (intel_crtc->pipe == 0) {
I915_WRITE(TRANSA_DATA_M1,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -606,23 +668,23 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
-static void igdng_edp_backlight_on (struct drm_device *dev)
+static void ironlake_edp_backlight_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
}
-static void igdng_edp_backlight_off (struct drm_device *dev)
+static void ironlake_edp_backlight_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
@@ -641,13 +703,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (dp_reg & DP_PORT_EN) {
intel_dp_link_down(intel_output, dp_priv->DP);
if (IS_eDP(intel_output))
- igdng_edp_backlight_off(dev);
+ ironlake_edp_backlight_off(dev);
}
} else {
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
if (IS_eDP(intel_output))
- igdng_edp_backlight_on(dev);
+ ironlake_edp_backlight_on(dev);
}
}
dp_priv->dpms_mode = mode;
@@ -1010,7 +1072,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (IS_eDP(intel_output)) {
DP &= ~DP_PLL_ENABLE;
@@ -1071,7 +1133,7 @@ intel_dp_check_link_status(struct intel_output *intel_output)
}
static enum drm_connector_status
-igdng_dp_detect(struct drm_connector *connector)
+ironlake_dp_detect(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
@@ -1106,8 +1168,8 @@ intel_dp_detect(struct drm_connector *connector)
dp_priv->has_audio = false;
- if (IS_IGDNG(dev))
- return igdng_dp_detect(connector);
+ if (IS_IRONLAKE(dev))
+ return ironlake_dp_detect(connector);
temp = I915_READ(PORT_HOTPLUG_EN);
@@ -1227,7 +1289,53 @@ intel_dp_hot_plug(struct intel_output *intel_output)
if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
intel_dp_check_link_status(intel_output);
}
-
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the given DP is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it is assumed that the given
+ * DP is present.
+ */
+static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, dp_port, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ dp_port = 0;
+ if (dp_reg == DP_B || dp_reg == PCH_DP_B)
+ dp_port = PORT_IDPB;
+ else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
+ dp_port = PORT_IDPC;
+ else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
+ dp_port = PORT_IDPD;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not DP, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_DP &&
+ p_child->device_type != DEVICE_TYPE_eDP)
+ continue;
+ /* Find the eDP port */
+ if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
+ ret = 1;
+ break;
+ }
+ /* Find the DP port */
+ if (p_child->dvo_port == dp_port) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
@@ -1237,6 +1345,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
struct intel_dp_priv *dp_priv;
const char *name = NULL;
+ if (!dp_is_present_in_vbt(dev, output_reg)) {
+ DRM_DEBUG_KMS("DP is not present. Ignore it\n");
+ return;
+ }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
if (!intel_output)
@@ -1254,11 +1366,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else
intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
- if (output_reg == DP_B)
+ if (output_reg == DP_B || output_reg == PCH_DP_B)
intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
- else if (output_reg == DP_C)
+ else if (output_reg == DP_C || output_reg == PCH_DP_C)
intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
- else if (output_reg == DP_D)
+ else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
if (IS_eDP(intel_output)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ef61fe9507e..a51573da1ff 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,32 @@ struct intel_output {
int clone_mask;
};
+struct intel_crtc;
+struct intel_overlay {
+ struct drm_device *dev;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *vid_bo;
+ struct drm_i915_gem_object *old_vid_bo;
+ int active;
+ int pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ u32 flip_addr;
+ struct drm_i915_gem_object *reg_bo;
+ void *virt_addr;
+ /* flip handling */
+ uint32_t last_flip_req;
+ int hw_wedged;
+#define HW_WEDGED 1
+#define NEEDS_WAIT_FOR_FLIP 2
+#define RELEASE_OLD_VID 3
+#define SWITCH_OFF_STAGE_1 4
+#define SWITCH_OFF_STAGE_2 5
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -121,6 +147,8 @@ struct intel_crtc {
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
bool lowfreq_avail;
+ struct intel_overlay *overlay;
+ struct intel_unpin_work *unpin_work;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -134,6 +162,8 @@ void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct intel_output *intel_output);
extern bool intel_ddc_probe(struct intel_output *intel_output);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
+void intel_i2c_reset_gmbus(struct drm_device *dev);
+
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
@@ -148,6 +178,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
extern void intel_edp_link_config (struct intel_output *, int *, int *);
+extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
@@ -177,10 +208,23 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
+extern void intel_init_clock_gating(struct drm_device *dev);
extern int intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_framebuffer **fb,
struct drm_gem_object *obj);
+extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+
+extern void intel_setup_overlay(struct drm_device *dev);
+extern void intel_cleanup_overlay(struct drm_device *dev);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+ int interruptible);
+extern int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 40fcf6fdef3..371d753e362 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -230,8 +230,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
par->intel_fb = intel_fb;
/* To allow resizeing without swapping buffers */
- DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
- intel_fb->base.height, obj_priv->gtt_offset, fbo);
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+ intel_fb->base.width, intel_fb->base.height,
+ obj_priv->gtt_offset, fbo);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -249,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
{
int ret;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c33451aec1b..f04dbbe7d40 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
POSTING_READ(hdmi_priv->sdvox_reg);
}
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
}
@@ -225,7 +225,52 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
.destroy = intel_hdmi_enc_destroy,
};
-
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the given HDMI is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the given
+ * HDMI is present.
+ */
+static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, hdmi_port, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ if (hdmi_reg == SDVOB)
+ hdmi_port = DVO_B;
+ else if (hdmi_reg == SDVOC)
+ hdmi_port = DVO_C;
+ else if (hdmi_reg == HDMIB)
+ hdmi_port = DVO_B;
+ else if (hdmi_reg == HDMIC)
+ hdmi_port = DVO_C;
+ else if (hdmi_reg == HDMID)
+ hdmi_port = DVO_D;
+ else
+ return 0;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not HDMI, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_HDMI)
+ continue;
+ /* Find the HDMI port */
+ if (p_child->dvo_port == hdmi_port) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -233,6 +278,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct intel_output *intel_output;
struct intel_hdmi_priv *hdmi_priv;
+ if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
+ DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
+ return;
+ }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_output)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c7eab724c41..8673c735b8a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -39,7 +39,7 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
struct drm_i915_private *dev_priv = dev->dev_private;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_IGD(dev))
+ if (!IS_PINEVIEW(dev))
return;
if (enable)
I915_WRITE(DSPCLK_GATE_D,
@@ -118,6 +118,23 @@ static void set_data(void *data, int state_high)
udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
}
+/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
+ * engine, but if the BIOS leaves it enabled, then that can break our use
+ * of the bit-banging I2C interfaces. This is notably the case with the
+ * Mac Mini in EFI mode.
+ */
+void
+intel_i2c_reset_gmbus(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_IRONLAKE(dev)) {
+ I915_WRITE(PCH_GMBUS0, 0);
+ } else {
+ I915_WRITE(GMBUS0, 0);
+ }
+}
+
/**
* intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
* @dev: DRM device
@@ -168,6 +185,8 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
if(i2c_bit_add_bus(&chan->adapter))
goto out_free;
+ intel_i2c_reset_gmbus(dev);
+
/* JJJ: raise SCL and SDA? */
intel_i2c_quirk_set(dev, true);
set_data(chan, 1);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index eb365021bb5..3118ce274e6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blc_pwm_ctl, reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = BLC_PWM_PCH_CTL2;
else
reg = BLC_PWM_CTL;
@@ -91,7 +91,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_status, ctl_reg, status_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
ctl_reg = PCH_PP_CONTROL;
status_reg = PCH_PP_STATUS;
} else {
@@ -137,7 +137,7 @@ static void intel_lvds_save(struct drm_connector *connector)
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +174,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +297,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* full screen scale for now */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
goto out;
/* 965+ wants fuzzy fitting */
@@ -327,7 +327,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
}
@@ -548,7 +548,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -587,7 +587,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* settings.
*/
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
/*
@@ -914,6 +914,101 @@ static int intel_lid_present(void)
#endif
/**
+ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @connector: LVDS connector
+ *
+ * Find the reduced downclock for LVDS in EDID.
+ */
+static void intel_find_lvds_downclock(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *scan, *panel_fixed_mode;
+ int temp_downclock;
+
+ panel_fixed_mode = dev_priv->panel_fixed_mode;
+ temp_downclock = panel_fixed_mode->clock;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found for the LVDS. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == panel_fixed_mode->hdisplay &&
+ scan->hsync_start == panel_fixed_mode->hsync_start &&
+ scan->hsync_end == panel_fixed_mode->hsync_end &&
+ scan->htotal == panel_fixed_mode->htotal &&
+ scan->vdisplay == panel_fixed_mode->vdisplay &&
+ scan->vsync_start == panel_fixed_mode->vsync_start &&
+ scan->vsync_end == panel_fixed_mode->vsync_end &&
+ scan->vtotal == panel_fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ }
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ if (temp_downclock < panel_fixed_mode->clock) {
+ /* We found the downclock for LVDS. */
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = temp_downclock;
+ DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+ "Normal clock %dKhz, downclock %dKhz\n",
+ panel_fixed_mode->clock, temp_downclock);
+ }
+ return;
+}
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ * Note: The addin_offset should also be checked for LVDS panel.
+ * Only when it is non-zero, it is assumed that it is present.
+ */
+static int lvds_is_present_in_vbt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not LFP, continue.
+ * If the device type is 0x22, it is also regarded as LFP.
+ */
+ if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
+ p_child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ /* The addin_offset should be checked. Only when it is
+ * non-zero, it is regarded as present.
+ */
+ if (p_child->addin_offset) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
*
@@ -936,21 +1031,20 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
- /* Assume that any device without an ACPI LID device also doesn't
- * have an integrated LVDS. We would be better off parsing the BIOS
- * to get a reliable indicator, but that code isn't written yet.
- *
- * In the case of all-in-one desktops using LVDS that we've seen,
- * they're using SDVO LVDS.
+ /*
+ * Assume LVDS is present if there's an ACPI lid device or if the
+ * device is present in the VBT.
*/
- if (!intel_lid_present())
+ if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
return;
+ }
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
if (dev_priv->edp_support) {
- DRM_DEBUG("disable LVDS for eDP support\n");
+ DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return;
}
gpio = PCH_GPIOC;
@@ -1023,6 +1117,7 @@ void intel_lvds_init(struct drm_device *dev)
dev_priv->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
mutex_unlock(&dev->mode_config.mutex);
+ intel_find_lvds_downclock(dev, connector);
goto out;
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1047,8 +1142,8 @@ void intel_lvds_init(struct drm_device *dev)
* correct mode.
*/
- /* IGDNG: FIXME if still fail, not try pipe mode now */
- if (IS_IGDNG(dev))
+ /* Ironlake: FIXME if still fail, not try pipe mode now */
+ if (IS_IRONLAKE(dev))
goto failed;
lvds = I915_READ(LVDS);
@@ -1069,7 +1164,7 @@ void intel_lvds_init(struct drm_device *dev)
goto failed;
out:
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
u32 pwm;
/* make sure PWM is enabled */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
@@ -1082,7 +1177,7 @@ out:
}
dev_priv->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
- DRM_DEBUG("lid notifier registration failed\n");
+ DRM_DEBUG_KMS("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL;
}
drm_sysfs_connector_add(connector);
@@ -1093,5 +1188,6 @@ failed:
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
+ drm_encoder_cleanup(encoder);
kfree(intel_output);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
new file mode 100644
index 00000000000..2639591c72e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -0,0 +1,1416 @@
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both. */
+#define IMAGE_MAX_WIDTH 2048
+#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY 1024
+#define IMAGE_MAX_HEIGHT_LEGACY 1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE (0x1<<19)
+#define OCMD_MIRROR_MASK (0x3<<17)
+#define OCMD_MIRROR_MODE (0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
+#define OCMD_MIRROR_VERTICAL (0x2<<17)
+#define OCMD_MIRROR_BOTH (0x3<<17)
+#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED (0x8<<10)
+#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR (0xc<<10)
+#define OCMD_YUV_422_PLANAR (0xd<<10)
+#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
+#define OCMD_BUF_TYPE_MASK (Ox1<<5)
+#define OCMD_BUF_TYPE_FRAME (0x0<<5)
+#define OCMD_BUF_TYPE_FIELD (0x1<<5)
+#define OCMD_TEST_MODE (0x1<<4)
+#define OCMD_BUFFER_SELECT (0x3<<2)
+#define OCMD_BUFFER0 (0x0<<2)
+#define OCMD_BUFFER1 (0x1<<2)
+#define OCMD_FIELD_SELECT (0x1<<2)
+#define OCMD_FIELD0 (0x0<<1)
+#define OCMD_FIELD1 (0x1<<1)
+#define OCMD_ENABLE (0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK (0x1<<18)
+#define OCONF_PIPE_A (0x0<<18)
+#define OCONF_PIPE_B (0x1<<18)
+#define OCONF_GAMMA2_ENABLE (0x1<<16)
+#define OCONF_CSC_MODE_BT601 (0x0<<5)
+#define OCONF_CSC_MODE_BT709 (0x1<<5)
+#define OCONF_CSC_BYPASS (0x1<<4)
+#define OCONF_CC_OUT_8BIT (0x1<<3)
+#define OCONF_TEST_MODE (0x1<<2)
+#define OCONF_THREE_LINE_BUFFER (0x1<<0)
+#define OCONF_TWO_LINE_BUFFER (0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE (0x1<<31)
+#define CLK_RGB24_MASK 0x0
+#define CLK_RGB16_MASK 0x070307
+#define CLK_RGB15_MASK 0x070707
+#define CLK_RGB8I_MASK 0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+ (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+ (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS 5
+#define N_VERT_Y_TAPS 3
+#define N_HORIZ_UV_TAPS 3
+#define N_VERT_UV_TAPS 3
+#define N_PHASES 17
+#define MAX_TAPS 5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+ u32 OBUF_0Y;
+ u32 OBUF_1Y;
+ u32 OBUF_0U;
+ u32 OBUF_0V;
+ u32 OBUF_1U;
+ u32 OBUF_1V;
+ u32 OSTRIDE;
+ u32 YRGB_VPH;
+ u32 UV_VPH;
+ u32 HORZ_PH;
+ u32 INIT_PHS;
+ u32 DWINPOS;
+ u32 DWINSZ;
+ u32 SWIDTH;
+ u32 SWIDTHSW;
+ u32 SHEIGHT;
+ u32 YRGBSCALE;
+ u32 UVSCALE;
+ u32 OCLRC0;
+ u32 OCLRC1;
+ u32 DCLRKV;
+ u32 DCLRKM;
+ u32 SCLRKVH;
+ u32 SCLRKVL;
+ u32 SCLRKEN;
+ u32 OCONFIG;
+ u32 OCMD;
+ u32 RESERVED1; /* 0x6C */
+ u32 OSTART_0Y;
+ u32 OSTART_1Y;
+ u32 OSTART_0U;
+ u32 OSTART_0V;
+ u32 OSTART_1U;
+ u32 OSTART_1V;
+ u32 OTILEOFF_0Y;
+ u32 OTILEOFF_1Y;
+ u32 OTILEOFF_0U;
+ u32 OTILEOFF_0V;
+ u32 OTILEOFF_1U;
+ u32 OTILEOFF_1V;
+ u32 FASTHSCALE; /* 0xA0 */
+ u32 UVSCALEV; /* 0xA4 */
+ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+ u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+ u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+ u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
+#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
+
+
+static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers *regs;
+
+ /* no recursive mappings */
+ BUG_ON(overlay->virt_addr);
+
+ if (OVERLAY_NONPHYSICAL(overlay->dev)) {
+ regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset);
+
+ if (!regs) {
+ DRM_ERROR("failed to map overlay regs in GTT\n");
+ return NULL;
+ }
+ } else
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+
+ return overlay->virt_addr = regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (OVERLAY_NONPHYSICAL(overlay->dev))
+ io_mapping_unmap_atomic(overlay->virt_addr);
+
+ overlay->virt_addr = NULL;
+
+ I915_READ(OVADD); /* flush wc cashes */
+
+ return;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ RING_LOCALS;
+
+ BUG_ON(overlay->active);
+
+ overlay->active = 1;
+ overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ OUT_RING(overlay->flip_addr | OFC_UPDATE);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static void intel_overlay_continue(struct intel_overlay *overlay,
+ bool load_polyphase_filter)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 flip_addr = overlay->flip_addr;
+ u32 tmp;
+ RING_LOCALS;
+
+ BUG_ON(!overlay->active);
+
+ if (load_polyphase_filter)
+ flip_addr |= OFC_UPDATE;
+
+ /* check for underruns */
+ tmp = I915_READ(DOVSTA);
+ if (tmp & (1 << 17))
+ DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ OUT_RING(flip_addr);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+}
+
+static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ u32 tmp;
+ RING_LOCALS;
+
+ if (overlay->last_flip_req != 0) {
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret == 0) {
+ overlay->last_flip_req = 0;
+
+ tmp = I915_READ(ISR);
+
+ if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
+ return 0;
+ }
+ }
+
+ /* synchronous slowpath */
+ overlay->hw_wedged = RELEASE_OLD_VID;
+
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+ u32 flip_addr = overlay->flip_addr;
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ RING_LOCALS;
+
+ BUG_ON(!overlay->active);
+
+ /* According to intel docs the overlay hw may hang (when switching
+ * off) without loading the filter coeffs. It is however unclear whether
+ * this applies to the disabling of the overlay or to the switching off
+ * of the hw. Do it in both cases */
+ flip_addr |= OFC_UPDATE;
+
+ /* wait for overlay to go idle */
+ overlay->hw_wedged = SWITCH_OFF_STAGE_1;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ /* turn overlay off */
+ overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return ret;
+}
+
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_gem_object *obj;
+
+ /* never have the overlay hw on without showing a frame */
+ BUG_ON(!overlay->vid_bo);
+ obj = overlay->vid_bo->obj;
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->vid_bo = NULL;
+
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = 0;
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+ int interruptible)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ u32 flip_addr;
+ int ret;
+ RING_LOCALS;
+
+ if (overlay->hw_wedged == HW_WEDGED)
+ return -EIO;
+
+ if (overlay->last_flip_req == 0) {
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+ }
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+ if (ret != 0)
+ return ret;
+
+ switch (overlay->hw_wedged) {
+ case RELEASE_OLD_VID:
+ obj = overlay->old_vid_bo->obj;
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->old_vid_bo = NULL;
+ break;
+ case SWITCH_OFF_STAGE_1:
+ flip_addr = overlay->flip_addr;
+ flip_addr |= OFC_UPDATE;
+
+ overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ interruptible);
+ if (ret != 0)
+ return ret;
+
+ case SWITCH_OFF_STAGE_2:
+ intel_overlay_off_tail(overlay);
+ break;
+ default:
+ BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
+ }
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs_atomic */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+ int ret;
+ struct drm_gem_object *obj;
+
+ /* only wait if there is actually an old frame to release to
+ * guarantee forward progress */
+ if (!overlay->old_vid_bo)
+ return 0;
+
+ ret = intel_overlay_wait_flip(overlay);
+ if (ret != 0)
+ return ret;
+
+ obj = overlay->old_vid_bo->obj;
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->old_vid_bo = NULL;
+
+ return 0;
+}
+
+struct put_image_params {
+ int format;
+ short dst_x;
+ short dst_y;
+ short dst_w;
+ short dst_h;
+ short src_w;
+ short src_scan_h;
+ short src_scan_w;
+ short src_h;
+ short stride_Y;
+ short stride_UV;
+ int offset_Y;
+ int offset_U;
+ int offset_V;
+};
+
+static int packed_depth_bytes(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
+ }
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_hsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_vsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+{
+ u32 mask, shift, ret;
+ if (IS_I9XX(dev)) {
+ mask = 0x3f;
+ shift = 6;
+ } else {
+ mask = 0x1f;
+ shift = 5;
+ }
+ ret = ((offset + width + mask) >> shift) - (offset >> shift);
+ if (IS_I9XX(dev))
+ ret <<= 1;
+ ret -=1;
+ return ret << 2;
+}
+
+static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+ 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
+ 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
+ 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
+ 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
+ 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
+ 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
+ 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
+ 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
+ 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
+ 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
+ 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
+ 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
+ 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
+ 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+ 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+ 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+ 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+ 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+ 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
+ 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
+ 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
+ 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+ 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+ 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+ 0x3000, 0x0800, 0x3000};
+
+static void update_polyphase_filter(struct overlay_registers *regs)
+{
+ memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+ struct overlay_registers *regs,
+ struct put_image_params *params)
+{
+ /* fixed point with a 12 bit shift */
+ u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+ bool scale_changed = false;
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+
+ if (params->dst_w > 1)
+ xscale = ((params->src_scan_w - 1) << FP_SHIFT)
+ /(params->dst_w);
+ else
+ xscale = 1 << FP_SHIFT;
+
+ if (params->dst_h > 1)
+ yscale = ((params->src_scan_h - 1) << FP_SHIFT)
+ /(params->dst_h);
+ else
+ yscale = 1 << FP_SHIFT;
+
+ /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
+ /*} else {
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
+
+ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+ scale_changed = true;
+ overlay->old_xscale = xscale;
+ overlay->old_yscale = yscale;
+
+ regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
+ | ((xscale >> FP_SHIFT) << 16)
+ | ((xscale & FRACT_MASK) << 3);
+ regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
+ | ((xscale_UV >> FP_SHIFT) << 16)
+ | ((xscale_UV & FRACT_MASK) << 3);
+ regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
+ | ((yscale_UV >> FP_SHIFT) << 0);
+
+ if (scale_changed)
+ update_polyphase_filter(regs);
+
+ return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ u32 key = overlay->color_key;
+ switch (overlay->crtc->base.fb->bits_per_pixel) {
+ case 8:
+ regs->DCLRKV = 0;
+ regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ case 16:
+ if (overlay->crtc->base.fb->depth == 15) {
+ regs->DCLRKV = RGB15_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ } else {
+ regs->DCLRKV = RGB16_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ }
+ case 24:
+ case 32:
+ regs->DCLRKV = key;
+ regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ }
+}
+
+static u32 overlay_cmd_reg(struct put_image_params *params)
+{
+ u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
+ }
+ } else { /* YUV packed */
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
+ }
+
+ switch (params->format & I915_OVERLAY_SWAP_MASK) {
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
+ }
+ }
+
+ return cmd;
+}
+
+int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_gem_object *new_bo,
+ struct put_image_params *params)
+{
+ int ret, tmp_width;
+ struct overlay_registers *regs;
+ bool scale_changed = false;
+ struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
+ struct drm_device *dev = overlay->dev;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ BUG_ON(!overlay);
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
+ if (ret != 0)
+ goto out_unpin;
+
+ if (!overlay->active) {
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ regs->OCONFIG = OCONF_CC_OUT_8BIT;
+ if (IS_I965GM(overlay->dev))
+ regs->OCONFIG |= OCONF_CSC_MODE_BT709;
+ regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ OCONF_PIPE_A : OCONF_PIPE_B;
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ ret = intel_overlay_on(overlay);
+ if (ret != 0)
+ goto out_unpin;
+ }
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+
+ regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
+ regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+
+ if (params->format & I915_OVERLAY_YUV_PACKED)
+ tmp_width = packed_width_bytes(params->format, params->src_w);
+ else
+ tmp_width = params->src_w;
+
+ regs->SWIDTH = params->src_w;
+ regs->SWIDTHSW = calc_swidthsw(overlay->dev,
+ params->offset_Y, tmp_width);
+ regs->SHEIGHT = params->src_h;
+ regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+ regs->OSTRIDE = params->stride_Y;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+ u32 tmp_U, tmp_V;
+ regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+ params->src_w/uv_hscale);
+ tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+ params->src_w/uv_hscale);
+ regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
+ regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
+ regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
+ regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+ regs->OSTRIDE |= params->stride_UV << 16;
+ }
+
+ scale_changed = update_scaling_factors(overlay, regs, params);
+
+ update_colorkey(overlay, regs);
+
+ regs->OCMD = overlay_cmd_reg(params);
+
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ intel_overlay_continue(overlay, scale_changed);
+
+ overlay->old_vid_bo = overlay->vid_bo;
+ overlay->vid_bo = new_bo->driver_private;
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(new_bo);
+ return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+ int ret;
+ struct overlay_registers *regs;
+ struct drm_device *dev = overlay->dev;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ if (overlay->hw_wedged) {
+ ret = intel_overlay_recover_from_interrupt(overlay, 1);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (!overlay->active)
+ return 0;
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ regs->OCMD = 0;
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ ret = intel_overlay_off(overlay);
+ if (ret != 0)
+ return ret;
+
+ intel_overlay_off_tail(overlay);
+
+ return 0;
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ struct intel_crtc *crtc)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ u32 pipeconf;
+ int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
+
+ if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+ return -EINVAL;
+
+ pipeconf = I915_READ(pipeconf_reg);
+
+ /* can't use the overlay with double wide pipe */
+ if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 ratio;
+ u32 pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* XXX: This is not the same logic as in the xorg driver, but more in
+ * line with the intel documentation for the i965 */
+ if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
+ ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
+ } else { /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = I915_READ(PFIT_PGM_RATIOS);
+ if (IS_I965G(dev))
+ ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+ else
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
+ }
+
+ overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+ struct drm_intel_overlay_put_image *rec)
+{
+ struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+ if ((rec->dst_x < mode->crtc_hdisplay)
+ && (rec->dst_x + rec->dst_width
+ <= mode->crtc_hdisplay)
+ && (rec->dst_y < mode->crtc_vdisplay)
+ && (rec->dst_y + rec->dst_height
+ <= mode->crtc_vdisplay))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int check_overlay_scaling(struct put_image_params *rec)
+{
+ u32 tmp;
+
+ /* downscaling limit is 8.0 */
+ tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+ tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_overlay_src(struct drm_device *dev,
+ struct drm_intel_overlay_put_image *rec,
+ struct drm_gem_object *new_bo)
+{
+ u32 stride_mask;
+ int depth;
+ int uv_hscale = uv_hsubsampling(rec->flags);
+ int uv_vscale = uv_vsubsampling(rec->flags);
+ size_t tmp;
+
+ /* check src dimensions */
+ if (IS_845G(dev) || IS_I830(dev)) {
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
+ || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ return -EINVAL;
+ } else {
+ if (rec->src_height > IMAGE_MAX_HEIGHT
+ || rec->src_width > IMAGE_MAX_WIDTH)
+ return -EINVAL;
+ }
+ /* better safe than sorry, use 4 as the maximal subsampling ratio */
+ if (rec->src_height < N_VERT_Y_TAPS*4
+ || rec->src_width < N_HORIZ_Y_TAPS*4)
+ return -EINVAL;
+
+ /* check alingment constrains */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+ case I915_OVERLAY_YUV_PACKED:
+ depth = packed_depth_bytes(rec->flags);
+ if (uv_vscale != 1)
+ return -EINVAL;
+ if (depth < 0)
+ return depth;
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
+ return -EINVAL;
+ break;
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
+ return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rec->src_width % uv_hscale)
+ return -EINVAL;
+
+ /* stride checking */
+ stride_mask = 63;
+
+ if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+ return -EINVAL;
+ if (IS_I965G(dev) && rec->stride_Y < 512)
+ return -EINVAL;
+
+ tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+ 4 : 8;
+ if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+ return -EINVAL;
+
+ /* check buffer dimensions */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width)
+ > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+ break;
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+ tmp = rec->stride_UV*rec->src_height;
+ tmp /= uv_vscale;
+ if (rec->offset_U + tmp > new_bo->size
+ || rec->offset_V + tmp > new_bo->size)
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_put_image *put_image_rec = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_mode_object *drmmode_obj;
+ struct intel_crtc *crtc;
+ struct drm_gem_object *new_bo;
+ struct put_image_params *params;
+ int ret;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ ret = intel_overlay_switch_off(overlay);
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+ }
+
+ params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!drmmode_obj)
+ return -ENOENT;
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+
+ new_bo = drm_gem_object_lookup(dev, file_priv,
+ put_image_rec->bo_handle);
+ if (!new_bo)
+ return -ENOENT;
+
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ if (overlay->hw_wedged) {
+ ret = intel_overlay_recover_from_interrupt(overlay, 1);
+ if (ret != 0)
+ goto out_unlock;
+ }
+
+ if (overlay->crtc != crtc) {
+ struct drm_display_mode *mode = &crtc->base.mode;
+ ret = intel_overlay_switch_off(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = check_overlay_possible_on_crtc(overlay, crtc);
+ if (ret != 0)
+ goto out_unlock;
+
+ overlay->crtc = crtc;
+ crtc->overlay = overlay;
+
+ if (intel_panel_fitter_pipe(dev) == crtc->pipe
+ /* and line to wide, i.e. one-line-mode */
+ && mode->hdisplay > 1024) {
+ overlay->pfit_active = 1;
+ update_pfit_vscale_ratio(overlay);
+ } else
+ overlay->pfit_active = 0;
+ }
+
+ ret = check_overlay_dst(overlay, put_image_rec);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->pfit_active) {
+ params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+ overlay->pfit_vscale_ratio);
+ /* shifting right rounds downwards, so add 1 */
+ params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+ overlay->pfit_vscale_ratio) + 1;
+ } else {
+ params->dst_y = put_image_rec->dst_y;
+ params->dst_h = put_image_rec->dst_height;
+ }
+ params->dst_x = put_image_rec->dst_x;
+ params->dst_w = put_image_rec->dst_width;
+
+ params->src_w = put_image_rec->src_width;
+ params->src_h = put_image_rec->src_height;
+ params->src_scan_w = put_image_rec->src_scan_width;
+ params->src_scan_h = put_image_rec->src_scan_height;
+ if (params->src_scan_h > params->src_h
+ || params->src_scan_w > params->src_w) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = check_overlay_src(dev, put_image_rec, new_bo);
+ if (ret != 0)
+ goto out_unlock;
+ params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
+ params->stride_Y = put_image_rec->stride_Y;
+ params->stride_UV = put_image_rec->stride_UV;
+ params->offset_Y = put_image_rec->offset_Y;
+ params->offset_U = put_image_rec->offset_U;
+ params->offset_V = put_image_rec->offset_V;
+
+ /* Check scaling after src size to prevent a divide-by-zero. */
+ ret = check_overlay_scaling(params);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = intel_overlay_do_put_image(overlay, new_bo, params);
+ if (ret != 0)
+ goto out_unlock;
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ kfree(params);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_gem_object_unreference(new_bo);
+ kfree(params);
+
+ return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
+ regs->OCLRC1 = overlay->saturation;
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+ int i;
+
+ if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+ return false;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma5 >> i*8) & 0xff) == 0x80)
+ return false;
+ }
+
+ return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+ if (!check_gamma_bounds(0, attrs->gamma0)
+ || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
+ || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
+ || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
+ || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
+ || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
+ || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ return -EINVAL;
+ if (!check_gamma5_errata(attrs->gamma5))
+ return -EINVAL;
+ return 0;
+}
+
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_attrs *attrs = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct overlay_registers *regs;
+ int ret;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+ attrs->color_key = overlay->color_key;
+ attrs->brightness = overlay->brightness;
+ attrs->contrast = overlay->contrast;
+ attrs->saturation = overlay->saturation;
+
+ if (IS_I9XX(dev)) {
+ attrs->gamma0 = I915_READ(OGAMC0);
+ attrs->gamma1 = I915_READ(OGAMC1);
+ attrs->gamma2 = I915_READ(OGAMC2);
+ attrs->gamma3 = I915_READ(OGAMC3);
+ attrs->gamma4 = I915_READ(OGAMC4);
+ attrs->gamma5 = I915_READ(OGAMC5);
+ }
+ ret = 0;
+ } else {
+ overlay->color_key = attrs->color_key;
+ if (attrs->brightness >= -128 && attrs->brightness <= 127) {
+ overlay->brightness = attrs->brightness;
+ } else {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (attrs->contrast <= 255) {
+ overlay->contrast = attrs->contrast;
+ } else {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (attrs->saturation <= 1023) {
+ overlay->saturation = attrs->saturation;
+ } else {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+ if (!IS_I9XX(dev)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (overlay->active) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = check_gamma(attrs);
+ if (ret != 0)
+ goto out_unlock;
+
+ I915_WRITE(OGAMC0, attrs->gamma0);
+ I915_WRITE(OGAMC1, attrs->gamma1);
+ I915_WRITE(OGAMC2, attrs->gamma2);
+ I915_WRITE(OGAMC3, attrs->gamma3);
+ I915_WRITE(OGAMC4, attrs->gamma4);
+ I915_WRITE(OGAMC5, attrs->gamma5);
+ }
+ ret = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void intel_setup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_gem_object *reg_bo;
+ struct overlay_registers *regs;
+ int ret;
+
+ if (!OVERLAY_EXISTS(dev))
+ return;
+
+ overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+ if (!overlay)
+ return;
+ overlay->dev = dev;
+
+ reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+ if (!reg_bo)
+ goto out_free;
+ overlay->reg_bo = reg_bo->driver_private;
+
+ if (OVERLAY_NONPHYSICAL(dev)) {
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = overlay->reg_bo->gtt_offset;
+ } else {
+ ret = i915_gem_attach_phys_object(dev, reg_bo,
+ I915_GEM_PHYS_OVERLAY_REGS);
+ if (ret) {
+ DRM_ERROR("failed to attach phys overlay regs\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ }
+
+ /* init all values */
+ overlay->color_key = 0x0101fe;
+ overlay->brightness = -19;
+ overlay->contrast = 75;
+ overlay->saturation = 146;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs)
+ goto out_free_bo;
+
+ memset(regs, 0, sizeof(struct overlay_registers));
+ update_polyphase_filter(regs);
+
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ dev_priv->overlay = overlay;
+ DRM_INFO("initialized overlay support\n");
+ return;
+
+out_free_bo:
+ drm_gem_object_unreference(reg_bo);
+out_free:
+ kfree(overlay);
+ return;
+}
+
+void intel_cleanup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->overlay) {
+ /* The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already */
+ BUG_ON(dev_priv->overlay->active);
+
+ kfree(dev_priv->overlay);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e7fa3279e2f..24a3dc99716 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,8 +36,6 @@
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
-#undef SDVO_DEBUG
-
static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
@@ -356,7 +354,6 @@ static const struct _sdvo_cmd_name {
#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
-#ifdef SDVO_DEBUG
static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
void *args, int args_len)
{
@@ -379,9 +376,6 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
DRM_LOG_KMS("(%02X)", cmd);
DRM_LOG_KMS("\n");
}
-#else
-#define intel_sdvo_debug_write(o, c, a, l)
-#endif
static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
void *args, int args_len)
@@ -398,7 +392,6 @@ static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
}
-#ifdef SDVO_DEBUG
static const char *cmd_status_names[] = {
"Power on",
"Success",
@@ -427,9 +420,6 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
DRM_LOG_KMS("(??? %d)", status);
DRM_LOG_KMS("\n");
}
-#else
-#define intel_sdvo_debug_response(o, r, l, s)
-#endif
static u8 intel_sdvo_read_response(struct intel_output *intel_output,
void *response, int response_len)
@@ -1627,6 +1617,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
intel_sdvo_write_cmd(intel_output,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+ if (sdvo_priv->is_tv) {
+ /* add 30ms delay when the output type is SDVO-TV */
+ mdelay(30);
+ }
status = intel_sdvo_read_response(intel_output, &response, 2);
DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9ca917931af..552ec110b74 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1213,20 +1213,17 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
tv_ctl |= TV_TRILEVEL_SYNC;
if (tv_mode->pal_burst)
tv_ctl |= TV_PAL_BURST;
+
scctl1 = 0;
- /* dda1 implies valid video levels */
- if (tv_mode->dda1_inc) {
+ if (tv_mode->dda1_inc)
scctl1 |= TV_SC_DDA1_EN;
- }
-
if (tv_mode->dda2_inc)
scctl1 |= TV_SC_DDA2_EN;
-
if (tv_mode->dda3_inc)
scctl1 |= TV_SC_DDA3_EN;
-
scctl1 |= tv_mode->sc_reset;
- scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+ if (video_levels)
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1416,16 +1413,16 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
* 0 0 0 Component
*/
if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG("Detected Composite TV connection\n");
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
type = DRM_MODE_CONNECTOR_Composite;
} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG("Detected S-Video TV connection\n");
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
type = DRM_MODE_CONNECTOR_SVIDEO;
} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG("Detected Component TV connection\n");
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
type = DRM_MODE_CONNECTOR_Component;
} else {
- DRM_DEBUG("No TV connection detected\n");
+ DRM_DEBUG_KMS("No TV connection detected\n");
type = -1;
}
@@ -1702,6 +1699,41 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
.destroy = intel_tv_enc_destroy,
};
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the integrated TV is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the TV is present.
+ */
+static int tv_is_present_in_vbt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not TV, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+ p_child->device_type != DEVICE_TYPE_TV)
+ continue;
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+ if (p_child->addin_offset) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
void
intel_tv_init(struct drm_device *dev)
@@ -1717,6 +1749,10 @@ intel_tv_init(struct drm_device *dev)
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
+ if (!tv_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("Integrated TV is not present.\n");
+ return;
+ }
/* Even if we have an encoder we may not have a connector */
if (!dev_priv->int_tv_support)
return;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index b5713eedd6e..feb52eee431 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o
+ r600_blit_kms.o radeon_pm.o atombios_dp.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d67c42555ab..6578d19dff9 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -263,10 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
+ val = gctx->scratch[((gctx->fb_base + idx) / 4)];
if (print)
DEBUG("FB[0x%02X]", idx);
- printk(KERN_INFO "FB access is not implemented.\n");
- return 0;
+ break;
case ATOM_ARG_IMM:
switch (align) {
case ATOM_SRC_DWORD:
@@ -488,9 +488,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
+ gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
DEBUG("FB[0x%02X]", idx);
- printk(KERN_INFO "FB access is not implemented.\n");
- return;
+ break;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
@@ -1214,3 +1214,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
*crev = CU8(idx + 3);
return;
}
+
+int atom_allocate_fb_scratch(struct atom_context *ctx)
+{
+ int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
+ uint16_t data_offset;
+ int usage_bytes;
+ struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+
+ atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+
+ firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+ DRM_DEBUG("atom firmware requested %08x %dkb\n",
+ firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+ firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+
+ usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ if (usage_bytes == 0)
+ usage_bytes = 20 * 1024;
+ /* allocate some scratch memory */
+ ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
+ if (!ctx->scratch)
+ return -ENOMEM;
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index e6eb38f2bca..6671848e5ea 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -132,6 +132,7 @@ struct atom_context {
uint8_t shift;
int cs_equal, cs_above;
int io_mode;
+ uint32_t *scratch;
};
extern int atom_debug;
@@ -142,6 +143,7 @@ int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
+int atom_allocate_fb_scratch(struct atom_context *ctx);
#include "atom-types.h"
#include "atombios.h"
#include "ObjectID.h"
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 6643afc36ce..5f48515c77a 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2680,7 +2680,7 @@ typedef struct _ATOM_I2C_RECORD {
typedef struct _ATOM_HPD_INT_RECORD {
ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
- UCHAR ucPluggged_PinState;
+ UCHAR ucPlugged_PinState;
} ATOM_HPD_INT_RECORD;
typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c15287a590f..260fcf59f00 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -241,6 +241,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -248,20 +249,19 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 1);
atombios_blank_crtc(crtc, 0);
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, 1);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 0);
atombios_enable_crtc(crtc, 0);
break;
}
-
- if (mode != DRM_MODE_DPMS_OFF) {
- radeon_crtc_load_lut(crtc);
- }
}
static void
@@ -457,9 +457,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (encoder->encoder_type !=
DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
- if (!ASIC_IS_AVIVO(rdev)
- && (encoder->encoder_type ==
- DRM_MODE_ENCODER_LVDS))
+ if (encoder->encoder_type ==
+ DRM_MODE_ENCODER_LVDS)
pll_flags |= RADEON_PLL_USE_REF_DIV;
}
radeon_encoder = to_radeon_encoder(encoder);
@@ -500,8 +499,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
else
pll = &rdev->clock.p2pll;
- radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (radeon_new_pll)
+ radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div,
+ &ref_div, &post_div, pll_flags);
+ else
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div,
+ &ref_div, &post_div, pll_flags);
+ } else
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div, pll_flags);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -574,21 +583,32 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_device *rdev = dev->dev_private;
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
- struct drm_radeon_gem_object *obj_priv;
+ struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ int r;
- if (!crtc->fb)
- return -EINVAL;
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
radeon_fb = to_radeon_framebuffer(crtc->fb);
+ /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- obj_priv = obj->driver_private;
-
- if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
return -EINVAL;
}
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
switch (crtc->fb->bits_per_pixel) {
case 8:
@@ -618,8 +638,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
- radeon_object_get_tiling_flags(obj->driver_private,
- &tiling_flags, NULL);
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
@@ -674,7 +692,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
- radeon_gem_object_unpin(radeon_fb->obj);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
}
/* Bytes per pixel may have changed */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
new file mode 100644
index 00000000000..0d63c4436e7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include "drm_dp_helper.h"
+
+/* move these to drm_dp_helper.c/h */
+#define DP_LINK_CONFIGURATION_SIZE 9
+#define DP_LINK_STATUS_SIZE 6
+#define DP_DPCD_SIZE 8
+
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+
+static const int dp_clocks[] = {
+ 54000, /* 1 lane, 1.62 Ghz */
+ 90000, /* 1 lane, 2.70 Ghz */
+ 108000, /* 2 lane, 1.62 Ghz */
+ 180000, /* 2 lane, 2.70 Ghz */
+ 216000, /* 4 lane, 1.62 Ghz */
+ 360000, /* 4 lane, 2.70 Ghz */
+};
+
+static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
+
+/* common helper functions */
+static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+ int i;
+ u8 max_link_bw;
+ u8 max_lane_count;
+
+ if (!dpcd)
+ return 0;
+
+ max_link_bw = dpcd[DP_MAX_LINK_RATE];
+ max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ for (i = 0; i < num_dp_clocks; i++) {
+ if (i % 2)
+ continue;
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock) {
+ if (i < 2)
+ return 1;
+ else if (i < 4)
+ return 2;
+ else
+ return 4;
+ }
+ }
+ break;
+ case DP_LINK_BW_2_7:
+ for (i = 0; i < num_dp_clocks; i++) {
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock) {
+ if (i < 2)
+ return 1;
+ else if (i < 4)
+ return 2;
+ else
+ return 4;
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+ int i;
+ u8 max_link_bw;
+ u8 max_lane_count;
+
+ if (!dpcd)
+ return 0;
+
+ max_link_bw = dpcd[DP_MAX_LINK_RATE];
+ max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ for (i = 0; i < num_dp_clocks; i++) {
+ if (i % 2)
+ continue;
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock)
+ return 162000;
+ }
+ break;
+ case DP_LINK_BW_2_7:
+ for (i = 0; i < num_dp_clocks; i++) {
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock)
+ return (i % 2) ? 270000 : 162000;
+ }
+ }
+
+ return 0;
+}
+
+int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+ int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
+ int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
+
+ if ((lanes == 0) || (bw == 0))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+/* XXX fix me -- chip specific */
+#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
+static u8 dp_pre_emphasis_max(u8 voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+}
+
+static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count,
+ u8 train_set[4])
+{
+ u8 v = 0;
+ u8 p = 0;
+ int lane;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
+ u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+ DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+ lane,
+ voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+ pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= DP_VOLTAGE_MAX)
+ v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p >= dp_pre_emphasis_max(v))
+ p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
+ voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+ pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+ for (lane = 0; lane < 4; lane++)
+ train_set[lane] = v | p;
+}
+
+
+/* radeon aux chan functions */
+bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
+ int num_bytes, u8 *read_byte,
+ u8 read_buf_len, u8 delay)
+{
+ struct drm_device *dev = chan->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
+ unsigned char *base;
+
+ memset(&args, 0, sizeof(args));
+
+ base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+
+ memcpy(base, req_bytes, num_bytes);
+
+ args.lpAuxRequest = 0;
+ args.lpDataOut = 16;
+ args.ucDataOutLen = 0;
+ args.ucChannelID = chan->rec.i2c_id;
+ args.ucDelay = delay / 10;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ if (args.ucReplyStatus) {
+ DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
+ req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
+ chan->rec.i2c_id, args.ucReplyStatus);
+ return false;
+ }
+
+ if (args.ucDataOutLen && read_byte && read_buf_len) {
+ if (read_buf_len < args.ucDataOutLen) {
+ DRM_ERROR("Buffer to small for return answer %d %d\n",
+ read_buf_len, args.ucDataOutLen);
+ return false;
+ }
+ {
+ int len = min(read_buf_len, args.ucDataOutLen);
+ memcpy(read_byte, base + 16, len);
+ }
+ }
+ return true;
+}
+
+bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address,
+ uint8_t send_bytes, uint8_t *send)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[20];
+ u8 msg_len, dp_msg_len;
+ bool ret;
+
+ dp_msg_len = 4;
+ msg[0] = address;
+ msg[1] = address >> 8;
+ msg[2] = AUX_NATIVE_WRITE << 4;
+ dp_msg_len += send_bytes;
+ msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
+
+ if (send_bytes > 16)
+ return false;
+
+ memcpy(&msg[4], send, send_bytes);
+ msg_len = 4 + send_bytes;
+ ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
+ return ret;
+}
+
+bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address,
+ uint8_t delay, uint8_t expected_bytes,
+ uint8_t *read_p)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[20];
+ u8 msg_len, dp_msg_len;
+ bool ret = false;
+ msg_len = 4;
+ dp_msg_len = 4;
+ msg[0] = address;
+ msg[1] = address >> 8;
+ msg[2] = AUX_NATIVE_READ << 4;
+ msg[3] = (dp_msg_len) << 4;
+ msg[3] |= expected_bytes - 1;
+
+ ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay);
+ return ret;
+}
+
+/* radeon dp functions */
+static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock,
+ uint8_t ucconfig, uint8_t lane_num)
+{
+ DP_ENCODER_SERVICE_PARAMETERS args;
+ int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+
+ memset(&args, 0, sizeof(args));
+ args.ucLinkClock = dp_clock / 10;
+ args.ucConfig = ucconfig;
+ args.ucAction = action;
+ args.ucLaneNum = lane_num;
+ args.ucStatus = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ return args.ucStatus;
+}
+
+u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
+ dig_connector->dp_i2c_bus->rec.i2c_id, 0);
+}
+
+bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[25];
+ int ret;
+
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg);
+ if (ret) {
+ memcpy(dig_connector->dpcd, msg, 8);
+ {
+ int i;
+ DRM_DEBUG("DPCD: ");
+ for (i = 0; i < 8; i++)
+ DRM_DEBUG("%02x ", msg[i]);
+ DRM_DEBUG("\n");
+ }
+ return true;
+ }
+ dig_connector->dpcd[0] = 0;
+ return false;
+}
+
+void radeon_dp_set_link_config(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ dig_connector->dp_clock =
+ dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
+ dig_connector->dp_lane_count =
+ dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
+}
+
+int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+
+ return dp_mode_valid(dig_connector->dpcd, mode->clock);
+}
+
+static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ int ret;
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100,
+ DP_LINK_STATUS_SIZE, link_status);
+ if (!ret) {
+ DRM_ERROR("displayport link status failed\n");
+ return false;
+ }
+
+ DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
+ link_status[0], link_status[1], link_status[2],
+ link_status[3], link_status[4], link_status[5]);
+ return true;
+}
+
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!atom_dp_get_link_status(radeon_connector, link_status))
+ return false;
+ if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
+ return false;
+ return true;
+}
+
+static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+
+ if (dig_connector->dpcd[0] >= 0x11) {
+ radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1,
+ &power_state);
+ }
+}
+
+static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
+{
+ radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
+ &downspread);
+}
+
+static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
+ u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
+{
+ radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2,
+ link_configuration);
+}
+
+static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
+ struct drm_encoder *encoder,
+ u8 train_set[4])
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ int i;
+
+ for (i = 0; i < dig_connector->dp_lane_count; i++)
+ atombios_dig_transmitter_setup(encoder,
+ ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
+ i, train_set[i]);
+
+ radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
+ dig_connector->dp_lane_count, train_set);
+}
+
+static void dp_set_training(struct radeon_connector *radeon_connector,
+ u8 training)
+{
+ radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
+ 1, &training);
+}
+
+void dp_link_train(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+ int enc_id = 0;
+ bool clock_recovery, channel_eq;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ u8 tries, voltage;
+ u8 train_set[4];
+ int i;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+ dig = radeon_encoder->enc_priv;
+
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ if (ASIC_IS_DCE32(rdev)) {
+ if (dig->dig_block)
+ enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+ else
+ enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+ if (dig_connector->linkb)
+ enc_id |= ATOM_DP_CONFIG_LINK_B;
+ else
+ enc_id |= ATOM_DP_CONFIG_LINK_A;
+ } else {
+ if (dig_connector->linkb)
+ enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
+ else
+ enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
+ }
+
+ memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ if (dig_connector->dp_clock == 270000)
+ link_configuration[0] = DP_LINK_BW_2_7;
+ else
+ link_configuration[0] = DP_LINK_BW_1_62;
+ link_configuration[1] = dig_connector->dp_lane_count;
+ if (dig_connector->dpcd[0] >= 0x11)
+ link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ /* power up the sink */
+ dp_set_power(radeon_connector, DP_SET_POWER_D0);
+ /* disable the training pattern on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+ /* set link bw and lanes on the sink */
+ dp_set_link_bw_lanes(radeon_connector, link_configuration);
+ /* disable downspread on the sink */
+ dp_set_downspread(radeon_connector, 0);
+ /* start training on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
+ dig_connector->dp_clock, enc_id, 0);
+ /* set training pattern 1 on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 0);
+
+ /* set initial vs/emph */
+ memset(train_set, 0, 4);
+ udelay(400);
+ /* set training pattern 1 on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
+
+ dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+
+ /* clock recovery loop */
+ clock_recovery = false;
+ tries = 0;
+ voltage = 0xff;
+ for (;;) {
+ udelay(100);
+ if (!atom_dp_get_link_status(radeon_connector, link_status))
+ break;
+
+ if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
+ clock_recovery = true;
+ break;
+ }
+
+ for (i = 0; i < dig_connector->dp_lane_count; i++) {
+ if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ }
+ if (i == dig_connector->dp_lane_count) {
+ DRM_ERROR("clock recovery reached max voltage\n");
+ break;
+ }
+
+ if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5) {
+ DRM_ERROR("clock recovery tried 5 times\n");
+ break;
+ }
+ } else
+ tries = 0;
+
+ voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new train_set as requested by sink */
+ dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
+ dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+ }
+ if (!clock_recovery)
+ DRM_ERROR("clock recovery failed\n");
+ else
+ DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
+ train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+ (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+
+ /* set training pattern 2 on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
+ /* set training pattern 2 on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 1);
+
+ /* channel equalization loop */
+ tries = 0;
+ channel_eq = false;
+ for (;;) {
+ udelay(400);
+ if (!atom_dp_get_link_status(radeon_connector, link_status))
+ break;
+
+ if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times */
+ if (tries > 5) {
+ DRM_ERROR("channel eq failed: 5 tries\n");
+ break;
+ }
+
+ /* Compute new train_set as requested by sink */
+ dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
+ dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+
+ tries++;
+ }
+
+ if (!channel_eq)
+ DRM_ERROR("channel eq failed\n");
+ else
+ DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
+ train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+ (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+ >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+ /* disable the training pattern on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+ dig_connector->dp_clock, enc_id, 0);
+}
+
+int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
+ int ret = 0;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ int msg_len, dp_msg_len;
+ int reply_bytes;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[2] = AUX_I2C_READ << 4;
+ else
+ msg[2] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[2] |= AUX_I2C_MOT << 4;
+
+ msg[0] = address;
+ msg[1] = address >> 8;
+
+ reply_bytes = 1;
+
+ msg_len = 4;
+ dp_msg_len = 3;
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[4] = write_byte;
+ msg_len++;
+ dp_msg_len += 2;
+ break;
+ case MODE_I2C_READ:
+ dp_msg_len += 1;
+ break;
+ default:
+ break;
+ }
+
+ msg[3] = (dp_msg_len) << 4;
+ ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0);
+
+ if (ret) {
+ if (read_byte)
+ *read_byte = reply[0];
+ return reply_bytes;
+ }
+ return -EREMOTEIO;
+}
+
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9e93eabcf1..824cc6480a0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -65,6 +65,95 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+/* hpd for digital panel detect/disconnect */
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ return connected;
+}
+
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = r100_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(RADEON_FP_GEN_CNTL);
+ if (connected)
+ tmp &= ~RADEON_FP_DETECT_INT_POL;
+ else
+ tmp |= RADEON_FP_DETECT_INT_POL;
+ WREG32(RADEON_FP_GEN_CNTL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(RADEON_FP2_GEN_CNTL);
+ if (connected)
+ tmp &= ~RADEON_FP2_DETECT_INT_POL;
+ else
+ tmp |= RADEON_FP2_DETECT_INT_POL;
+ WREG32(RADEON_FP2_GEN_CNTL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+void r100_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ rdev->irq.hpd[1] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ r100_irq_set(rdev);
+}
+
+void r100_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ rdev->irq.hpd[1] = false;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
/*
* PCI GART
*/
@@ -94,6 +183,15 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return radeon_gart_table_ram_alloc(rdev);
}
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ /* Enable bus mastering */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+}
+
int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -105,9 +203,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
WREG32(RADEON_AIC_HI_ADDR, tmp);
- /* Enable bus mastering */
- tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
- WREG32(RADEON_BUS_CNTL, tmp);
/* set PCI GART page-table base address */
WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -157,6 +252,12 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.crtc_vblank_int[1]) {
tmp |= RADEON_CRTC2_VBLANK_MASK;
}
+ if (rdev->irq.hpd[0]) {
+ tmp |= RADEON_FP_DETECT_MASK;
+ }
+ if (rdev->irq.hpd[1]) {
+ tmp |= RADEON_FP2_DETECT_MASK;
+ }
WREG32(RADEON_GEN_INT_CNTL, tmp);
return 0;
}
@@ -175,8 +276,9 @@ void r100_irq_disable(struct radeon_device *rdev)
static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
- uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
- RADEON_CRTC2_VBLANK_STAT;
+ uint32_t irq_mask = RADEON_SW_INT_TEST |
+ RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
+ RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
@@ -187,6 +289,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
int r100_irq_process(struct radeon_device *rdev)
{
uint32_t status, msi_rearm;
+ bool queue_hotplug = false;
status = r100_irq_ack(rdev);
if (!status) {
@@ -207,8 +310,18 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1);
}
+ if (status & RADEON_FP_DETECT_STAT) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD1\n");
+ }
+ if (status & RADEON_FP2_DETECT_STAT) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD2\n");
+ }
status = r100_irq_ack(rdev);
}
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS400:
@@ -255,24 +368,27 @@ int r100_wb_init(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
- true,
- RADEON_GEM_DOMAIN_GTT,
- false, &rdev->wb.wb_obj);
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.wb_obj);
if (r) {
- DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->wb.wb_obj,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
if (r) {
- DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
return r;
}
- r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
- DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
return r;
}
}
@@ -290,11 +406,19 @@ void r100_wb_disable(struct radeon_device *rdev)
void r100_wb_fini(struct radeon_device *rdev)
{
+ int r;
+
r100_wb_disable(rdev);
if (rdev->wb.wb_obj) {
- radeon_object_kunmap(rdev->wb.wb_obj);
- radeon_object_unpin(rdev->wb.wb_obj);
- radeon_object_unref(&rdev->wb.wb_obj);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(rdev->dev, "(%d) can't finish WB\n", r);
+ return;
+ }
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
@@ -1288,17 +1412,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
- struct radeon_object *robj)
+ struct radeon_bo *robj)
{
unsigned idx;
u32 value;
idx = pkt->idx + 1;
value = radeon_get_ib_value(p, idx + 2);
- if ((value + 1) > radeon_object_size(robj)) {
+ if ((value + 1) > radeon_bo_size(robj)) {
DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
"(need %u have %lu) !\n",
value + 1,
- radeon_object_size(robj));
+ radeon_bo_size(robj));
return -EINVAL;
}
return 0;
@@ -1583,6 +1707,14 @@ void r100_gpu_init(struct radeon_device *rdev)
r100_hdp_reset(rdev);
}
+void r100_hdp_flush(struct radeon_device *rdev)
+{
+ u32 tmp;
+ tmp = RREG32(RADEON_HOST_PATH_CNTL);
+ tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
+ WREG32(RADEON_HOST_PATH_CNTL, tmp);
+}
+
void r100_hdp_reset(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -1650,6 +1782,17 @@ int r100_gpu_reset(struct radeon_device *rdev)
return 0;
}
+void r100_set_common_regs(struct radeon_device *rdev)
+{
+ /* set these so they don't interfere with anything */
+ WREG32(RADEON_OV0_SCALE_CNTL, 0);
+ WREG32(RADEON_SUBPIC_CNTL, 0);
+ WREG32(RADEON_VIPH_CONTROL, 0);
+ WREG32(RADEON_I2C_CNTL_1, 0);
+ WREG32(RADEON_DVI_I2C_CNTL_1, 0);
+ WREG32(RADEON_CAP0_TRIG_CNTL, 0);
+ WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+}
/*
* VRAM info
@@ -2594,7 +2737,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
- struct radeon_object *cube_robj;
+ struct radeon_bo *cube_robj;
unsigned long size;
for (face = 0; face < 5; face++) {
@@ -2607,9 +2750,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
size += track->textures[idx].cube_info[face].offset;
- if (size > radeon_object_size(cube_robj)) {
+ if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
- size, radeon_object_size(cube_robj));
+ size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
@@ -2620,7 +2763,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned long size;
unsigned u, i, w, h;
int ret;
@@ -2676,9 +2819,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
"%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
- if (size > radeon_object_size(robj)) {
+ if (size > radeon_bo_size(robj)) {
DRM_ERROR("Texture of unit %u needs %lu bytes but is "
- "%lu\n", u, size, radeon_object_size(robj));
+ "%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
@@ -2700,10 +2843,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
- if (size > radeon_object_size(track->cb[i].robj)) {
+ if (size > radeon_bo_size(track->cb[i].robj)) {
DRM_ERROR("[drm] Buffer too small for color buffer %d "
"(need %lu have %lu) !\n", i, size,
- radeon_object_size(track->cb[i].robj));
+ radeon_bo_size(track->cb[i].robj));
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
i, track->cb[i].pitch, track->cb[i].cpp,
track->cb[i].offset, track->maxy);
@@ -2717,10 +2860,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
- if (size > radeon_object_size(track->zb.robj)) {
+ if (size > radeon_bo_size(track->zb.robj)) {
DRM_ERROR("[drm] Buffer too small for z buffer "
"(need %lu have %lu) !\n", size,
- radeon_object_size(track->zb.robj));
+ radeon_bo_size(track->zb.robj));
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
track->zb.pitch, track->zb.cpp,
track->zb.offset, track->maxy);
@@ -2738,11 +2881,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
"bound\n", prim_walk, i);
return -EINVAL;
}
- if (size > radeon_object_size(track->arrays[i].robj)) {
- DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
- "have %lu dwords\n", prim_walk, i,
- size >> 2,
- radeon_object_size(track->arrays[i].robj) >> 2);
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
DRM_ERROR("Max indices %u\n", track->max_indx);
return -EINVAL;
}
@@ -2756,10 +2900,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
"bound\n", prim_walk, i);
return -EINVAL;
}
- if (size > radeon_object_size(track->arrays[i].robj)) {
- DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
- "have %lu dwords\n", prim_walk, i, size >> 2,
- radeon_object_size(track->arrays[i].robj) >> 2);
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
return -EINVAL;
}
}
@@ -3101,6 +3247,9 @@ static int r100_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
@@ -3108,13 +3257,13 @@ static int r100_startup(struct radeon_device *rdev)
r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
+ r100_enable_bm(rdev);
if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev);
if (r)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -3150,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */
r100_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r100_startup(rdev);
}
@@ -3174,7 +3325,7 @@ void r100_fini(struct radeon_device *rdev)
r100_pci_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -3242,10 +3393,8 @@ int r100_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Set asic errata */
r100_errata(rdev);
/* Initialize clocks */
@@ -3264,7 +3413,7 @@ int r100_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
if (rdev->flags & RADEON_IS_PCI) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 0daf0d76a89..ca50903dd2b 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -10,26 +10,26 @@
* CS functions
*/
struct r100_cs_track_cb {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned pitch;
unsigned cpp;
unsigned offset;
};
struct r100_cs_track_array {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned esize;
};
struct r100_cs_cube_info {
- struct radeon_object *robj;
- unsigned offset;
+ struct radeon_bo *robj;
+ unsigned offset;
unsigned width;
unsigned height;
};
struct r100_cs_track_texture {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
unsigned pitch;
unsigned width;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 2f43ee8e404..83378c39d0e 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -137,14 +137,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
void rv370_pcie_gart_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 tmp;
+ int r;
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -1181,6 +1186,9 @@ static int r300_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r300_mc_program(rdev);
/* Resume clock */
r300_clock_startup(rdev);
@@ -1193,13 +1201,18 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ if (rdev->family == CHIP_R300 ||
+ rdev->family == CHIP_R350 ||
+ rdev->family == CHIP_RV350)
+ r100_enable_bm(rdev);
+
if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev);
if (r)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -1237,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */
r300_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r300_startup(rdev);
}
@@ -1265,7 +1280,7 @@ void r300_fini(struct radeon_device *rdev)
r100_pci_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -1303,10 +1318,8 @@ int r300_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Set asic errata */
r300_errata(rdev);
/* Initialize clocks */
@@ -1325,7 +1338,7 @@ int r300_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
if (rdev->flags & RADEON_IS_PCIE) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 1cefdbcc085..c05a7270cf0 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -169,6 +169,9 @@ static int r420_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r300_mc_program(rdev);
/* Resume clock */
r420_clock_resume(rdev);
@@ -186,7 +189,6 @@ static int r420_startup(struct radeon_device *rdev)
}
r420_pipes_init(rdev);
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -229,7 +231,8 @@ int r420_resume(struct radeon_device *rdev)
}
/* Resume clock after posting */
r420_clock_resume(rdev);
-
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r420_startup(rdev);
}
@@ -258,7 +261,7 @@ void r420_fini(struct radeon_device *rdev)
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
if (rdev->is_atom_bios) {
radeon_atombios_fini(rdev);
} else {
@@ -301,14 +304,9 @@ int r420_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- if (rdev->is_atom_bios) {
- atom_asic_init(rdev->mode_info.atom_context);
- } else {
- radeon_combios_asic_init(rdev->ddev);
- }
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -331,10 +329,13 @@ int r420_init(struct radeon_device *rdev)
return r;
}
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r) {
return r;
}
+ if (rdev->family == CHIP_R420)
+ r100_enable_bm(rdev);
+
if (rdev->flags & RADEON_IS_PCIE) {
r = rv370_pcie_gart_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 7baa7395556..74ad89bdf2b 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -716,6 +716,8 @@
#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
+#define AVIVO_DC_GPIO_HPD_A 0x7e94
+
#define AVIVO_GPIO_0 0x7e30
#define AVIVO_GPIO_1 0x7e40
#define AVIVO_GPIO_2 0x7e50
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f7435185c0a..0f3843b6dac 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -185,7 +185,6 @@ static int r520_startup(struct radeon_device *rdev)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -221,6 +220,8 @@ int r520_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r520_startup(rdev);
}
@@ -254,6 +255,9 @@ int r520_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
if (!radeon_card_posted(rdev) && rdev->bios) {
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
@@ -277,7 +281,7 @@ int r520_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rv370_pcie_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6740ed24358..36656bd110b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,8 +38,10 @@
#define PFP_UCODE_SIZE 576
#define PM4_UCODE_SIZE 1792
+#define RLC_UCODE_SIZE 768
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
+#define R700_RLC_UCODE_SIZE 1024
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin");
MODULE_FIRMWARE("radeon/RV730_me.bin");
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
+MODULE_FIRMWARE("radeon/R600_rlc.bin");
+MODULE_FIRMWARE("radeon/R700_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -70,6 +74,281 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
+/* hpd for digital panel detect/disconnect */
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ }
+ return connected;
+}
+
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = r600_hpd_sense(rdev, hpd);
+
+ if (ASIC_IS_DCE3(rdev)) {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void r600_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+ if (ASIC_IS_DCE32(rdev))
+ tmp |= DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ rdev->irq.hpd[2] = true;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ rdev->irq.hpd[3] = true;
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ rdev->irq.hpd[4] = true;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ rdev->irq.hpd[5] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ rdev->irq.hpd[2] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ r600_irq_set(rdev);
+}
+
+void r600_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ rdev->irq.hpd[3] = false;
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ rdev->irq.hpd[4] = false;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ rdev->irq.hpd[5] = false;
+ break;
+ default:
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
/*
* R600 PCIE GART
*/
@@ -180,7 +459,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
void r600_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
+ int i, r;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -208,8 +487,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -1101,6 +1384,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_PORT_DATA);
}
+void r600_hdp_flush(struct radeon_device *rdev)
+{
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
/*
* CP & Ring
@@ -1110,11 +1397,12 @@ void r600_cp_stop(struct radeon_device *rdev)
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
}
-int r600_cp_init_microcode(struct radeon_device *rdev)
+int r600_init_microcode(struct radeon_device *rdev)
{
struct platform_device *pdev;
const char *chip_name;
- size_t pfp_req_size, me_req_size;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, rlc_req_size;
char fw_name[30];
int err;
@@ -1128,30 +1416,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
}
switch (rdev->family) {
- case CHIP_R600: chip_name = "R600"; break;
- case CHIP_RV610: chip_name = "RV610"; break;
- case CHIP_RV630: chip_name = "RV630"; break;
- case CHIP_RV620: chip_name = "RV620"; break;
- case CHIP_RV635: chip_name = "RV635"; break;
- case CHIP_RV670: chip_name = "RV670"; break;
+ case CHIP_R600:
+ chip_name = "R600";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV610:
+ chip_name = "RV610";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV630:
+ chip_name = "RV630";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV620:
+ chip_name = "RV620";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV635:
+ chip_name = "RV635";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV670:
+ chip_name = "RV670";
+ rlc_chip_name = "R600";
+ break;
case CHIP_RS780:
- case CHIP_RS880: chip_name = "RS780"; break;
- case CHIP_RV770: chip_name = "RV770"; break;
+ case CHIP_RS880:
+ chip_name = "RS780";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV770:
+ chip_name = "RV770";
+ rlc_chip_name = "R700";
+ break;
case CHIP_RV730:
- case CHIP_RV740: chip_name = "RV730"; break;
- case CHIP_RV710: chip_name = "RV710"; break;
+ case CHIP_RV740:
+ chip_name = "RV730";
+ rlc_chip_name = "R700";
+ break;
+ case CHIP_RV710:
+ chip_name = "RV710";
+ rlc_chip_name = "R700";
+ break;
default: BUG();
}
if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
+ rlc_req_size = R700_RLC_UCODE_SIZE * 4;
} else {
pfp_req_size = PFP_UCODE_SIZE * 4;
me_req_size = PM4_UCODE_SIZE * 12;
+ rlc_req_size = RLC_UCODE_SIZE * 4;
}
- DRM_INFO("Loading %s CP Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
@@ -1175,6 +1495,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
rdev->me_fw->size, fw_name);
err = -EINVAL;
}
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
out:
platform_device_unregister(pdev);
@@ -1187,6 +1519,8 @@ out:
rdev->pfp_fw = NULL;
release_firmware(rdev->me_fw);
rdev->me_fw = NULL;
+ release_firmware(rdev->rlc_fw);
+ rdev->rlc_fw = NULL;
}
return err;
}
@@ -1381,10 +1715,16 @@ int r600_ring_test(struct radeon_device *rdev)
void r600_wb_disable(struct radeon_device *rdev)
{
+ int r;
+
WREG32(SCRATCH_UMSK, 0);
if (rdev->wb.wb_obj) {
- radeon_object_kunmap(rdev->wb.wb_obj);
- radeon_object_unpin(rdev->wb.wb_obj);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return;
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
}
}
@@ -1392,7 +1732,7 @@ void r600_wb_fini(struct radeon_device *rdev)
{
r600_wb_disable(rdev);
if (rdev->wb.wb_obj) {
- radeon_object_unref(&rdev->wb.wb_obj);
+ radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
@@ -1403,22 +1743,29 @@ int r600_wb_enable(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
- dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ r600_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr);
if (r) {
- dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
r600_wb_fini(rdev);
return r;
}
- r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
- dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
r600_wb_fini(rdev);
return r;
}
@@ -1433,10 +1780,14 @@ int r600_wb_enable(struct radeon_device *rdev)
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
+ /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+ radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(rdev, RB_INT_STAT);
}
int r600_copy_dma(struct radeon_device *rdev,
@@ -1459,18 +1810,6 @@ int r600_copy_blit(struct radeon_device *rdev,
return 0;
}
-int r600_irq_process(struct radeon_device *rdev)
-{
- /* FIXME: implement */
- return 0;
-}
-
-int r600_irq_set(struct radeon_device *rdev)
-{
- /* FIXME: implement */
- return 0;
-}
-
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -1506,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev)
{
int r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
@@ -1516,13 +1863,26 @@ int r600_startup(struct radeon_device *rdev)
}
r600_gpu_init(rdev);
- r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ r600_irq_set(rdev);
+
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
@@ -1583,13 +1943,19 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
+ int r;
+
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->cp.ready = false;
r600_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
- radeon_object_unpin(rdev->r600_blit.shader_obj);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
return 0;
}
@@ -1627,7 +1993,11 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev) && rdev->bios) {
+ if (!r600_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
@@ -1650,31 +2020,31 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
+
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
- if (!rdev->me_fw || !rdev->pfp_fw) {
- r = r600_cp_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
r = r600_pcie_gart_init(rdev);
if (r)
return r;
- rdev->accel_working = true;
r = r600_blit_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled blitter (%d).\n", r);
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
return r;
}
+ rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
r600_suspend(rdev);
@@ -1686,12 +2056,12 @@ int r600_init(struct radeon_device *rdev)
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
}
@@ -1704,6 +2074,8 @@ void r600_fini(struct radeon_device *rdev)
r600_suspend(rdev);
r600_blit_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
radeon_ring_fini(rdev);
r600_wb_fini(rdev);
r600_pcie_gart_fini(rdev);
@@ -1712,7 +2084,7 @@ void r600_fini(struct radeon_device *rdev)
radeon_clocks_fini(rdev);
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -1798,8 +2170,657 @@ int r600_ib_test(struct radeon_device *rdev)
return r;
}
+/*
+ * Interrupts
+ *
+ * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
+ * the same as the CP ring buffer, but in reverse. Rather than the CPU
+ * writing to the ring and the GPU consuming, the GPU writes to the ring
+ * and host consumes. As the host irq handler processes interrupts, it
+ * increments the rptr. When the rptr catches up with the wptr, all the
+ * current interrupts have been processed.
+ */
+
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+ u32 rb_bufsz;
+
+ /* Align ring size */
+ rb_bufsz = drm_order(ring_size / 4);
+ ring_size = (1 << rb_bufsz) * 4;
+ rdev->ih.ring_size = ring_size;
+ rdev->ih.align_mask = 4 - 1;
+}
+
+static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
+{
+ int r;
+
+ rdev->ih.ring_size = ring_size;
+ /* Allocate ring buffer */
+ if (rdev->ih.ring_obj == NULL) {
+ r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
+ true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->ih.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->ih.ring_obj,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->ih.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->ih.ring_obj,
+ (void **)&rdev->ih.ring);
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
+ return r;
+ }
+ }
+ rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+ rdev->ih.rptr = 0;
+
+ return 0;
+}
+
+static void r600_ih_ring_fini(struct radeon_device *rdev)
+{
+ int r;
+ if (rdev->ih.ring_obj) {
+ r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->ih.ring_obj);
+ radeon_bo_unpin(rdev->ih.ring_obj);
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ }
+ radeon_bo_unref(&rdev->ih.ring_obj);
+ rdev->ih.ring = NULL;
+ rdev->ih.ring_obj = NULL;
+ }
+}
+
+static void r600_rlc_stop(struct radeon_device *rdev)
+{
+
+ if (rdev->family >= CHIP_RV770) {
+ /* r7xx asics need to soft reset RLC before halting */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(15000);
+ WREG32(SRBM_SOFT_RESET, 0);
+ RREG32(SRBM_SOFT_RESET);
+ }
+
+ WREG32(RLC_CNTL, 0);
+}
+
+static void r600_rlc_start(struct radeon_device *rdev)
+{
+ WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int r600_rlc_init(struct radeon_device *rdev)
+{
+ u32 i;
+ const __be32 *fw_data;
+
+ if (!rdev->rlc_fw)
+ return -EINVAL;
+
+ r600_rlc_stop(rdev);
+
+ WREG32(RLC_HB_BASE, 0);
+ WREG32(RLC_HB_CNTL, 0);
+ WREG32(RLC_HB_RPTR, 0);
+ WREG32(RLC_HB_WPTR, 0);
+ WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ WREG32(RLC_MC_CNTL, 0);
+ WREG32(RLC_UCODE_CNTL, 0);
+
+ fw_data = (const __be32 *)rdev->rlc_fw->data;
+ if (rdev->family >= CHIP_RV770) {
+ for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else {
+ for (i = 0; i < RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ }
+ WREG32(RLC_UCODE_ADDR, 0);
+
+ r600_rlc_start(rdev);
+
+ return 0;
+}
+
+static void r600_enable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_cntl = RREG32(IH_CNTL);
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+ ih_cntl |= ENABLE_INTR;
+ ih_rb_cntl |= IH_RB_ENABLE;
+ WREG32(IH_CNTL, ih_cntl);
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ rdev->ih.enabled = true;
+}
+
+static void r600_disable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+ u32 ih_cntl = RREG32(IH_CNTL);
+
+ ih_rb_cntl &= ~IH_RB_ENABLE;
+ ih_cntl &= ~ENABLE_INTR;
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ WREG32(IH_CNTL, ih_cntl);
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+ rdev->ih.enabled = false;
+ rdev->ih.wptr = 0;
+ rdev->ih.rptr = 0;
+}
+
+static void r600_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(CP_INT_CNTL, 0);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(DxMODE_INT_MASK, 0);
+ if (ASIC_IS_DCE3(rdev)) {
+ WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ if (ASIC_IS_DCE32(rdev)) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, 0);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, 0);
+ }
+ } else {
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
+ }
+}
+
+int r600_irq_init(struct radeon_device *rdev)
+{
+ int ret = 0;
+ int rb_bufsz;
+ u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+ /* allocate ring */
+ ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
+ if (ret)
+ return ret;
+
+ /* disable irqs */
+ r600_disable_interrupts(rdev);
+
+ /* init rlc */
+ ret = r600_rlc_init(rdev);
+ if (ret) {
+ r600_ih_ring_fini(rdev);
+ return ret;
+ }
+
+ /* setup interrupt control */
+ /* set dummy read address to ring address */
+ WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+ * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+ */
+ interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+ /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+ interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+ WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+ WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+ rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+ ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+ IH_WPTR_OVERFLOW_CLEAR |
+ (rb_bufsz << 1));
+ /* WPTR writeback, not yet */
+ /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
+ WREG32(IH_RB_WPTR_ADDR_LO, 0);
+ WREG32(IH_RB_WPTR_ADDR_HI, 0);
+
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+
+ /* Default settings for IH_CNTL (disabled at first) */
+ ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
+ /* RPTR_REARM only works if msi's are enabled */
+ if (rdev->msi_enabled)
+ ih_cntl |= RPTR_REARM;
+
+#ifdef __BIG_ENDIAN
+ ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
+#endif
+ WREG32(IH_CNTL, ih_cntl);
+
+ /* force the active interrupt state to all disabled */
+ r600_disable_interrupt_state(rdev);
+
+ /* enable irqs */
+ r600_enable_interrupts(rdev);
+
+ return ret;
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+ r600_disable_interrupts(rdev);
+ r600_rlc_stop(rdev);
+ r600_ih_ring_fini(rdev);
+}
+
+int r600_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 mode_int = 0;
+ u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled)
+ return 0;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ if (ASIC_IS_DCE32(rdev)) {
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ }
+ } else {
+ hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ }
+
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("r600_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ DRM_DEBUG("r600_irq_set: vblank 0\n");
+ mode_int |= D1MODE_VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ DRM_DEBUG("r600_irq_set: vblank 1\n");
+ mode_int |= D2MODE_VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("r600_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("r600_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("r600_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("r600_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("r600_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("r600_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(DxMODE_INT_MASK, mode_int);
+ if (ASIC_IS_DCE3(rdev)) {
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ }
+ } else {
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+ }
+
+ return 0;
+}
+
+static inline void r600_irq_ack(struct radeon_device *rdev,
+ u32 *disp_int,
+ u32 *disp_int_cont,
+ u32 *disp_int_cont2)
+{
+ u32 tmp;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+ } else {
+ *disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = 0;
+ }
+
+ if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+ if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+ if (*disp_int & LB_D2_VBLANK_INTERRUPT)
+ WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+ if (*disp_int & LB_D2_VLINE_INTERRUPT)
+ WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+ if (*disp_int & DC_HPD1_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ }
+ }
+ if (*disp_int & DC_HPD2_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ }
+ }
+ if (*disp_int_cont & DC_HPD3_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ }
+ }
+ if (*disp_int_cont & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (ASIC_IS_DCE32(rdev)) {
+ if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+ }
+}
+
+void r600_irq_disable(struct radeon_device *rdev)
+{
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+ r600_disable_interrupt_state(rdev);
+}
+
+static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+ /* XXX use writeback */
+ wptr = RREG32(IH_RB_WPTR);
+ if (wptr & RB_OVERFLOW) {
+ WARN_ON(1);
+ /* XXX deal with overflow */
+ DRM_ERROR("IH RB overflow\n");
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ wptr = wptr & WPTR_OFFSET_MASK;
+
+ return wptr;
+}
+
+/* r600 IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0] - interrupt source id
+ * [31:8] - reserved
+ * [59:32] - interrupt source data
+ * [127:60] - reserved
+ *
+ * The basic interrupt vector entries
+ * are decoded as follows:
+ * src_id src_data description
+ * 1 0 D1 Vblank
+ * 1 1 D1 Vline
+ * 5 0 D2 Vblank
+ * 5 1 D2 Vline
+ * 19 0 FP Hot plug detection A
+ * 19 1 FP Hot plug detection B
+ * 19 2 DAC A auto-detection
+ * 19 3 DAC B auto-detection
+ * 176 - CP_INT RB
+ * 177 - CP_INT IB1
+ * 178 - CP_INT IB2
+ * 181 - EOP Interrupt
+ * 233 - GUI Idle
+ *
+ * Note, these are based on r600 and may need to be
+ * adjusted or added to on newer asics
+ */
+
+int r600_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr = r600_get_ih_wptr(rdev);
+ u32 rptr = rdev->ih.rptr;
+ u32 src_id, src_data;
+ u32 last_entry = rdev->ih.ring_size - 16;
+ u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
+ unsigned long flags;
+ bool queue_hotplug = false;
+
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+ spin_lock_irqsave(&rdev->ih.lock, flags);
+
+ if (rptr == wptr) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+ if (rdev->shutdown) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+
+restart_ih:
+ /* display interrupts */
+ r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+
+ rdev->ih.wptr = wptr;
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D1_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (disp_int & LB_D2_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D2_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 19: /* HPD/DAC hotplug */
+ switch (src_data) {
+ case 0:
+ if (disp_int & DC_HPD1_INTERRUPT) {
+ disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (disp_int & DC_HPD2_INTERRUPT) {
+ disp_int &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 4:
+ if (disp_int_cont & DC_HPD3_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 5:
+ if (disp_int_cont & DC_HPD4_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 10:
+ if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 12:
+ if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ if (rptr == last_entry)
+ rptr = 0;
+ else
+ rptr += 16;
+ }
+ /* make sure wptr hasn't changed while processing */
+ wptr = r600_get_ih_wptr(rdev);
+ if (wptr != rdev->ih.wptr)
+ goto restart_ih;
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_HANDLED;
+}
/*
* Debugfs info
@@ -1811,21 +2832,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t rdp, wdp;
unsigned count, i, j;
radeon_ring_free_size(rdev);
- rdp = RREG32(CP_RB_RPTR);
- wdp = RREG32(CP_RB_WPTR);
- count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+ count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
- seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
- seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+ seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
+ seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
+ seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
+ seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
+ i = rdev->cp.rptr;
for (j = 0; j <= count; j++) {
- i = (rdp + j) & rdev->cp.ptr_mask;
seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+ i = (i + 1) & rdev->cp.ptr_mask;
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index dbf716e1fbf..9aecafb51b6 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -473,9 +473,8 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_object_create(rdev, NULL, obj_size,
- true, RADEON_GEM_DOMAIN_VRAM,
- false, &rdev->r600_blit.shader_obj);
+ r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
return r;
@@ -485,12 +484,14 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size,
rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
- r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
if (r) {
DRM_ERROR("failed to map blit object %d\n", r);
return r;
}
-
if (rdev->family >= CHIP_RV770)
memcpy_toio(ptr + rdev->r600_blit.state_offset,
r7xx_default_state, rdev->r600_blit.state_len * 4);
@@ -500,19 +501,26 @@ int r600_blit_init(struct radeon_device *rdev)
if (num_packet2s)
memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
packet2s, num_packet2s * 4);
-
-
memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
-
- radeon_object_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
return 0;
}
void r600_blit_fini(struct radeon_device *rdev)
{
- radeon_object_unpin(rdev->r600_blit.shader_obj);
- radeon_object_unref(&rdev->r600_blit.shader_obj);
+ int r;
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
+ goto out_unref;
+ }
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+out_unref:
+ radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
int r600_vb_ib_get(struct radeon_device *rdev)
@@ -569,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
- ring_size += 3; /* fence emit for VB IB */
+ ring_size += 5; /* fence emit for VB IB */
ring_size += 5; /* done copy */
- ring_size += 3; /* fence emit for done copy */
+ ring_size += 5; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
WARN_ON(r);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 27ab428b149..05894edadab 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -456,7 +456,215 @@
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
#define WAIT_3D_IDLECLEAN_bit (1 << 17)
-
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define RLC_CNTL 0x3f00
+# define RLC_ENABLE (1 << 0)
+#define RLC_HB_BASE 0x3f10
+#define RLC_HB_CNTL 0x3f0c
+#define RLC_HB_RPTR 0x3f20
+#define RLC_HB_WPTR 0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR 0x3f14
+#define RLC_HB_WPTR_MSB_ADDR 0x3f18
+#define RLC_MC_CNTL 0x3f44
+#define RLC_UCODE_CNTL 0x3f48
+#define RLC_UCODE_ADDR 0x3f2c
+#define RLC_UCODE_DATA 0x3f30
+
+#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_RLC (1 << 13)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+#define INTERRUPT_CNTL 0x5468
+# define IH_DUMMY_RD_OVERRIDE (1 << 0)
+# define IH_DUMMY_RD_EN (1 << 1)
+# define IH_REQ_NONSNOOP_EN (1 << 3)
+# define GEN_IH_INT_EN (1 << 8)
+#define INTERRUPT_CNTL2 0x546c
+
+#define D1MODE_VBLANK_STATUS 0x6534
+#define D2MODE_VBLANK_STATUS 0x6d34
+# define DxMODE_VBLANK_OCCURRED (1 << 0)
+# define DxMODE_VBLANK_ACK (1 << 4)
+# define DxMODE_VBLANK_STAT (1 << 12)
+# define DxMODE_VBLANK_INTERRUPT (1 << 16)
+# define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17)
+#define D1MODE_VLINE_STATUS 0x653c
+#define D2MODE_VLINE_STATUS 0x6d3c
+# define DxMODE_VLINE_OCCURRED (1 << 0)
+# define DxMODE_VLINE_ACK (1 << 4)
+# define DxMODE_VLINE_STAT (1 << 12)
+# define DxMODE_VLINE_INTERRUPT (1 << 16)
+# define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17)
+#define DxMODE_INT_MASK 0x6540
+# define D1MODE_VBLANK_INT_MASK (1 << 0)
+# define D1MODE_VLINE_INT_MASK (1 << 4)
+# define D2MODE_VBLANK_INT_MASK (1 << 8)
+# define D2MODE_VLINE_INT_MASK (1 << 12)
+#define DCE3_DISP_INTERRUPT_STATUS 0x7ddc
+# define DC_HPD1_INTERRUPT (1 << 18)
+# define DC_HPD2_INTERRUPT (1 << 19)
+#define DISP_INTERRUPT_STATUS 0x7edc
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VLINE_INTERRUPT (1 << 3)
+# define LB_D1_VBLANK_INTERRUPT (1 << 4)
+# define LB_D2_VBLANK_INTERRUPT (1 << 5)
+# define DACA_AUTODETECT_INTERRUPT (1 << 16)
+# define DACB_AUTODETECT_INTERRUPT (1 << 17)
+# define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18)
+# define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 20)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 21)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x7ee8
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8
+# define DC_HPD4_INTERRUPT (1 << 14)
+# define DC_HPD4_RX_INTERRUPT (1 << 15)
+# define DC_HPD3_INTERRUPT (1 << 28)
+# define DC_HPD1_RX_INTERRUPT (1 << 29)
+# define DC_HPD2_RX_INTERRUPT (1 << 30)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec
+# define DC_HPD3_RX_INTERRUPT (1 << 0)
+# define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1)
+# define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2)
+# define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3)
+# define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4)
+# define AUX1_SW_DONE_INTERRUPT (1 << 5)
+# define AUX1_LS_DONE_INTERRUPT (1 << 6)
+# define AUX2_SW_DONE_INTERRUPT (1 << 7)
+# define AUX2_LS_DONE_INTERRUPT (1 << 8)
+# define AUX3_SW_DONE_INTERRUPT (1 << 9)
+# define AUX3_LS_DONE_INTERRUPT (1 << 10)
+# define AUX4_SW_DONE_INTERRUPT (1 << 11)
+# define AUX4_LS_DONE_INTERRUPT (1 << 12)
+# define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13)
+# define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14)
+/* DCE 3.2 */
+# define AUX5_SW_DONE_INTERRUPT (1 << 15)
+# define AUX5_LS_DONE_INTERRUPT (1 << 16)
+# define AUX6_SW_DONE_INTERRUPT (1 << 17)
+# define AUX6_LS_DONE_INTERRUPT (1 << 18)
+# define DC_HPD5_INTERRUPT (1 << 19)
+# define DC_HPD5_RX_INTERRUPT (1 << 20)
+# define DC_HPD6_INTERRUPT (1 << 21)
+# define DC_HPD6_RX_INTERRUPT (1 << 22)
+
+#define DACA_AUTO_DETECT_CONTROL 0x7828
+#define DACB_AUTO_DETECT_CONTROL 0x7a28
+#define DCE3_DACA_AUTO_DETECT_CONTROL 0x7028
+#define DCE3_DACB_AUTO_DETECT_CONTROL 0x7128
+# define DACx_AUTODETECT_MODE(x) ((x) << 0)
+# define DACx_AUTODETECT_MODE_NONE 0
+# define DACx_AUTODETECT_MODE_CONNECT 1
+# define DACx_AUTODETECT_MODE_DISCONNECT 2
+# define DACx_AUTODETECT_FRAME_TIME_COUNTER(x) ((x) << 8)
+/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
+# define DACx_AUTODETECT_CHECK_MASK(x) ((x) << 16)
+
+#define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038
+#define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138
+#define DACA_AUTODETECT_INT_CONTROL 0x7838
+#define DACB_AUTODETECT_INT_CONTROL 0x7a38
+# define DACx_AUTODETECT_ACK (1 << 0)
+# define DACx_AUTODETECT_INT_ENABLE (1 << 16)
+
+#define DC_HOT_PLUG_DETECT1_CONTROL 0x7d00
+#define DC_HOT_PLUG_DETECT2_CONTROL 0x7d10
+#define DC_HOT_PLUG_DETECT3_CONTROL 0x7d24
+# define DC_HOT_PLUG_DETECTx_EN (1 << 0)
+
+#define DC_HOT_PLUG_DETECT1_INT_STATUS 0x7d04
+#define DC_HOT_PLUG_DETECT2_INT_STATUS 0x7d14
+#define DC_HOT_PLUG_DETECT3_INT_STATUS 0x7d28
+# define DC_HOT_PLUG_DETECTx_INT_STATUS (1 << 0)
+# define DC_HOT_PLUG_DETECTx_SENSE (1 << 1)
+
+/* DCE 3.0 */
+#define DC_HPD1_INT_STATUS 0x7d00
+#define DC_HPD2_INT_STATUS 0x7d0c
+#define DC_HPD3_INT_STATUS 0x7d18
+#define DC_HPD4_INT_STATUS 0x7d24
+/* DCE 3.2 */
+#define DC_HPD5_INT_STATUS 0x7dc0
+#define DC_HPD6_INT_STATUS 0x7df4
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08
+#define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18
+#define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c
+# define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0)
+# define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8)
+# define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16)
+/* DCE 3.0 */
+#define DC_HPD1_INT_CONTROL 0x7d04
+#define DC_HPD2_INT_CONTROL 0x7d10
+#define DC_HPD3_INT_CONTROL 0x7d1c
+#define DC_HPD4_INT_CONTROL 0x7d28
+/* DCE 3.2 */
+#define DC_HPD5_INT_CONTROL 0x7dc4
+#define DC_HPD6_INT_CONTROL 0x7df8
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+/* DCE 3.0 */
+#define DC_HPD1_CONTROL 0x7d08
+#define DC_HPD2_CONTROL 0x7d14
+#define DC_HPD3_CONTROL 0x7d20
+#define DC_HPD4_CONTROL 0x7d2c
+/* DCE 3.2 */
+#define DC_HPD5_CONTROL 0x7dc8
+#define DC_HPD6_CONTROL 0x7dfc
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+/* DCE 3.2 */
+# define DC_HPDx_EN (1 << 28)
/*
* PM4
@@ -500,7 +708,6 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
-#define PACKET3_CP_INTERRUPT 0x40
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_TC_ACTION_ENA (1 << 23)
@@ -674,4 +881,5 @@
#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 224506a2f7b..c938bb54123 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -28,8 +28,6 @@
#ifndef __RADEON_H__
#define __RADEON_H__
-#include "radeon_object.h"
-
/* TODO: Here are things that needs to be done :
* - surface allocator & initializer : (bit like scratch reg) should
* initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
@@ -67,6 +65,11 @@
#include <linux/list.h>
#include <linux/kref.h>
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
#include "radeon_family.h"
#include "radeon_mode.h"
#include "radeon_reg.h"
@@ -85,6 +88,7 @@ extern int radeon_benchmarking;
extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
+extern int radeon_new_pll;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -186,76 +190,62 @@ void radeon_fence_unref(struct radeon_fence **fence);
* Tiling registers
*/
struct radeon_surface_reg {
- struct radeon_object *robj;
+ struct radeon_bo *bo;
};
#define RADEON_GEM_MAX_SURFACES 8
/*
- * Radeon buffer.
+ * TTM.
*/
-struct radeon_object;
+struct radeon_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_global_reference mem_global_ref;
+ bool mem_global_referenced;
+ struct ttm_bo_device bdev;
+};
+
+struct radeon_bo {
+ /* Protected by gem.mutex */
+ struct list_head list;
+ /* Protected by tbo.reserved */
+ u32 placements[3];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ unsigned pin_count;
+ void *kptr;
+ u32 tiling_flags;
+ u32 pitch;
+ int surface_reg;
+ /* Constant after initialization */
+ struct radeon_device *rdev;
+ struct drm_gem_object *gobj;
+};
-struct radeon_object_list {
+struct radeon_bo_list {
struct list_head list;
- struct radeon_object *robj;
+ struct radeon_bo *bo;
uint64_t gpu_offset;
unsigned rdomain;
unsigned wdomain;
- uint32_t tiling_flags;
+ u32 tiling_flags;
};
-int radeon_object_init(struct radeon_device *rdev);
-void radeon_object_fini(struct radeon_device *rdev);
-int radeon_object_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj,
- unsigned long size,
- bool kernel,
- uint32_t domain,
- bool interruptible,
- struct radeon_object **robj_ptr);
-int radeon_object_kmap(struct radeon_object *robj, void **ptr);
-void radeon_object_kunmap(struct radeon_object *robj);
-void radeon_object_unref(struct radeon_object **robj);
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
- uint64_t *gpu_addr);
-void radeon_object_unpin(struct radeon_object *robj);
-int radeon_object_wait(struct radeon_object *robj);
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
-int radeon_object_evict_vram(struct radeon_device *rdev);
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
-void radeon_object_force_delete(struct radeon_device *rdev);
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
- struct list_head *head);
-int radeon_object_list_validate(struct list_head *head, void *fence);
-void radeon_object_list_unvalidate(struct list_head *head);
-void radeon_object_list_clean(struct list_head *head);
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
- struct vm_area_struct *vma);
-unsigned long radeon_object_size(struct radeon_object *robj);
-void radeon_object_clear_surface_reg(struct radeon_object *robj);
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
- bool force_drop);
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
- uint32_t tiling_flags, uint32_t pitch);
-void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
-void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
/*
* GEM objects.
*/
struct radeon_gem {
+ struct mutex mutex;
struct list_head objects;
};
int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
int radeon_gem_object_create(struct radeon_device *rdev, int size,
- int alignment, int initial_domain,
- bool discardable, bool kernel,
- bool interruptible,
- struct drm_gem_object **obj);
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct drm_gem_object **obj);
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);
@@ -271,7 +261,7 @@ struct radeon_gart_table_ram {
};
struct radeon_gart_table_vram {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
volatile uint32_t *ptr;
};
@@ -352,11 +342,16 @@ struct radeon_irq {
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[2];
+ /* FIXME: use defines for max hpd/dacs */
+ bool hpd[6];
+ spinlock_t sw_lock;
+ int sw_refcount;
};
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
-
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
/*
* CP & ring.
@@ -376,7 +371,7 @@ struct radeon_ib {
*/
struct radeon_ib_pool {
struct mutex mutex;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
@@ -384,7 +379,7 @@ struct radeon_ib_pool {
};
struct radeon_cp {
- struct radeon_object *ring_obj;
+ struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
unsigned wptr;
@@ -399,8 +394,25 @@ struct radeon_cp {
bool ready;
};
+/*
+ * R6xx+ IH ring
+ */
+struct r600_ih {
+ struct radeon_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned ring_size;
+ uint64_t gpu_addr;
+ uint32_t align_mask;
+ uint32_t ptr_mask;
+ spinlock_t lock;
+ bool enabled;
+};
+
struct r600_blit {
- struct radeon_object *shader_obj;
+ struct radeon_bo *shader_obj;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
u32 state_offset;
@@ -430,8 +442,8 @@ void radeon_ring_fini(struct radeon_device *rdev);
*/
struct radeon_cs_reloc {
struct drm_gem_object *gobj;
- struct radeon_object *robj;
- struct radeon_object_list lobj;
+ struct radeon_bo *robj;
+ struct radeon_bo_list lobj;
uint32_t handle;
uint32_t flags;
};
@@ -527,7 +539,7 @@ void radeon_agp_fini(struct radeon_device *rdev);
* Writeback
*/
struct radeon_wb {
- struct radeon_object *wb_obj;
+ struct radeon_bo *wb_obj;
volatile uint32_t *wb;
uint64_t gpu_addr;
};
@@ -639,6 +651,11 @@ struct radeon_asic {
uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
+ void (*hdp_flush)(struct radeon_device *rdev);
+ void (*hpd_init)(struct radeon_device *rdev);
+ void (*hpd_fini)(struct radeon_device *rdev);
+ bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
};
/*
@@ -751,9 +768,9 @@ struct radeon_device {
uint8_t *bios;
bool is_atom_bios;
uint16_t bios_header_start;
- struct radeon_object *stollen_vga_memory;
+ struct radeon_bo *stollen_vga_memory;
struct fb_info *fbdev_info;
- struct radeon_object *fbdev_robj;
+ struct radeon_bo *fbdev_rbo;
struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
resource_size_t rmmio_base;
@@ -791,8 +808,12 @@ struct radeon_device {
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
+ const struct firmware *rlc_fw; /* r6/700 RLC firmware */
struct r600_blit r600_blit;
int msi_enabled; /* msi enabled */
+ struct r600_ih ih; /* r6/700 interrupt ring */
+ struct workqueue_struct *wq;
+ struct work_struct hotplug_work;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -829,6 +850,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
}
}
+/*
+ * Cast helper
+ */
+#define to_radeon_fence(p) ((struct radeon_fence *)(p))
/*
* Registers read & write functions.
@@ -965,18 +990,24 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
-#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
+#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
+#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
+#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
/* Common functions */
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
+extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern int radeon_clocks_init(struct radeon_device *rdev);
extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
@@ -984,6 +1015,7 @@ extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save {
@@ -1021,7 +1053,7 @@ extern int r100_cp_reset(struct radeon_device *rdev);
extern void r100_vga_render_disable(struct radeon_device *rdev);
extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
- struct radeon_object *robj);
+ struct radeon_bo *robj);
extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n,
@@ -1029,6 +1061,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx);
+extern void r100_enable_bm(struct radeon_device *rdev);
+extern void r100_set_common_regs(struct radeon_device *rdev);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1104,7 +1138,14 @@ extern void r600_wb_disable(struct radeon_device *rdev);
extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
-extern int r600_cp_init_microcode(struct radeon_device *rdev);
+extern int r600_init_microcode(struct radeon_device *rdev);
extern int r600_gpu_reset(struct radeon_device *rdev);
+/* r600 irq */
+extern int r600_irq_init(struct radeon_device *rdev);
+extern void r600_irq_fini(struct radeon_device *rdev);
+extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+extern int r600_irq_set(struct radeon_device *rdev);
+
+#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c18fbee387d..636116bedcb 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -76,6 +76,12 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
+void r100_hdp_flush(struct radeon_device *rdev);
+void r100_hpd_init(struct radeon_device *rdev);
+void r100_hpd_fini(struct radeon_device *rdev);
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
static struct radeon_asic r100_asic = {
.init = &r100_init,
@@ -107,6 +113,11 @@ static struct radeon_asic r100_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
@@ -162,6 +173,11 @@ static struct radeon_asic r300_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
/*
@@ -201,6 +217,11 @@ static struct radeon_asic r420_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
@@ -245,6 +266,11 @@ static struct radeon_asic rs400_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
@@ -263,6 +289,12 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
+void rs600_hpd_init(struct radeon_device *rdev);
+void rs600_hpd_fini(struct radeon_device *rdev);
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
+
static struct radeon_asic rs600_asic = {
.init = &rs600_init,
.fini = &rs600_fini,
@@ -291,6 +323,11 @@ static struct radeon_asic rs600_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.bandwidth_update = &rs600_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
@@ -334,6 +371,11 @@ static struct radeon_asic rs690_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
@@ -381,6 +423,11 @@ static struct radeon_asic rv515_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
@@ -419,6 +466,11 @@ static struct radeon_asic r520_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
/*
@@ -455,6 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
+void r600_hdp_flush(struct radeon_device *rdev);
+void r600_hpd_init(struct radeon_device *rdev);
+void r600_hpd_fini(struct radeon_device *rdev);
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
static struct radeon_asic r600_asic = {
.init = &r600_init,
@@ -470,6 +528,7 @@ static struct radeon_asic r600_asic = {
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
@@ -484,6 +543,11 @@ static struct radeon_asic r600_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r600_hdp_flush,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
};
/*
@@ -509,6 +573,7 @@ static struct radeon_asic rv770_asic = {
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
@@ -523,6 +588,11 @@ static struct radeon_asic rv770_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r600_hdp_flush,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 2ed88a82093..12a0c760e7f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -47,7 +47,8 @@ radeon_add_atom_connector(struct drm_device *dev,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb, uint32_t igp_lane_info,
- uint16_t connector_object_id);
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd);
/* from radeon_legacy_encoder.c */
extern void
@@ -60,16 +61,16 @@ union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
-static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
- *dev, uint8_t id)
+static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+ uint8_t id)
{
- struct radeon_device *rdev = dev->dev_private;
struct atom_context *ctx = rdev->mode_info.atom_context;
- ATOM_GPIO_I2C_ASSIGMENT gpio;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset;
+ int i;
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false;
@@ -78,34 +79,121 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
- gpio = i2c_info->asGPIO_Info[id];
-
- i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
- i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
- i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
- i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
- i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
- i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
- i2c.put_data_mask = (1 << gpio.ucDataEnShift);
- i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
- i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
- i2c.valid = true;
+
+ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
+
+ if (gpio->sucI2cId.ucAccess == id) {
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ i2c.valid = true;
+ }
+ }
return i2c;
}
+static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+ u8 id)
+{
+ struct atom_context *ctx = rdev->mode_info.atom_context;
+ struct radeon_gpio_rec gpio;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
+ struct _ATOM_GPIO_PIN_LUT *gpio_info;
+ ATOM_GPIO_PIN_ASSIGNMENT *pin;
+ u16 data_offset, size;
+ int i, num_indices;
+
+ memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
+ gpio.valid = false;
+
+ atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
+
+ gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ pin = &gpio_info->asGPIO_Pin[i];
+ if (id == pin->ucGPIO_ID) {
+ gpio.id = pin->ucGPIO_ID;
+ gpio.reg = pin->usGpioPin_AIndex * 4;
+ gpio.mask = (1 << pin->ucGpioPinBitShift);
+ gpio.valid = true;
+ break;
+ }
+ }
+
+ return gpio;
+}
+
+static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
+ struct radeon_gpio_rec *gpio)
+{
+ struct radeon_hpd hpd;
+ hpd.gpio = *gpio;
+ if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
+ switch(gpio->mask) {
+ case (1 << 0):
+ hpd.hpd = RADEON_HPD_1;
+ break;
+ case (1 << 8):
+ hpd.hpd = RADEON_HPD_2;
+ break;
+ case (1 << 16):
+ hpd.hpd = RADEON_HPD_3;
+ break;
+ case (1 << 24):
+ hpd.hpd = RADEON_HPD_4;
+ break;
+ case (1 << 26):
+ hpd.hpd = RADEON_HPD_5;
+ break;
+ case (1 << 28):
+ hpd.hpd = RADEON_HPD_6;
+ break;
+ default:
+ hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+ } else
+ hpd.hpd = RADEON_HPD_NONE;
+ return hpd;
+}
+
static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint32_t supported_device,
int *connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t *line_mux)
+ uint16_t *line_mux,
+ struct radeon_hpd *hpd)
{
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@@ -135,6 +223,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* HIS X1300 is DVI+VGA, not DVI+DVI */
+ if ((dev->pdev->device == 0x7146) &&
+ (dev->pdev->subsystem_vendor == 0x17af) &&
+ (dev->pdev->subsystem_device == 0x2058)) {
+ if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+ return false;
+ }
+
+ /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
+ if ((dev->pdev->device == 0x7142) &&
+ (dev->pdev->subsystem_vendor == 0x1458) &&
+ (dev->pdev->subsystem_device == 0x2134)) {
+ if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+ return false;
+ }
+
+
/* Funky macbooks */
if ((dev->pdev->device == 0x71C5) &&
(dev->pdev->subsystem_vendor == 0x106b) &&
@@ -172,6 +277,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* Acer laptop reports DVI-D as DVI-I */
+ if ((dev->pdev->device == 0x95c4) &&
+ (dev->pdev->subsystem_vendor == 0x1025) &&
+ (dev->pdev->subsystem_device == 0x013c)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+ (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
return true;
}
@@ -240,16 +354,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
struct radeon_mode_info *mode_info = &rdev->mode_info;
struct atom_context *ctx = mode_info->atom_context;
int index = GetIndexIntoMasterTable(DATA, Object_Header);
- uint16_t size, data_offset;
- uint8_t frev, crev, line_mux = 0;
+ u16 size, data_offset;
+ u8 frev, crev;
ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
ATOM_OBJECT_HEADER *obj_header;
int i, j, path_size, device_support;
int connector_type;
- uint16_t igp_lane_info, conn_id, connector_object_id;
+ u16 igp_lane_info, conn_id, connector_object_id;
bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
+ struct radeon_gpio_rec gpio;
+ struct radeon_hpd hpd;
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@@ -276,7 +392,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
path_size += le16_to_cpu(path->usSize);
linkb = false;
-
if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type;
@@ -377,10 +492,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
}
}
- /* look up gpio for ddc */
+ /* look up gpio for ddc, hpd */
if ((le16_to_cpu(path->usDeviceTag) &
- (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- == 0) {
+ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
if (le16_to_cpu(path->usConnObjectId) ==
le16_to_cpu(con_obj->asObjects[j].
@@ -394,21 +508,34 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
asObjects[j].
usRecordOffset));
ATOM_I2C_RECORD *i2c_record;
+ ATOM_HPD_INT_RECORD *hpd_record;
+ ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+ hpd.hpd = RADEON_HPD_NONE;
while (record->ucRecordType > 0
&& record->
ucRecordType <=
ATOM_MAX_OBJECT_RECORD_NUMBER) {
- switch (record->
- ucRecordType) {
+ switch (record->ucRecordType) {
case ATOM_I2C_RECORD_TYPE:
i2c_record =
- (ATOM_I2C_RECORD
- *) record;
- line_mux =
- i2c_record->
- sucI2cId.
- bfI2C_LineMux;
+ (ATOM_I2C_RECORD *)
+ record;
+ i2c_config =
+ (ATOM_I2C_ID_CONFIG_ACCESS *)
+ &i2c_record->sucI2cId;
+ ddc_bus = radeon_lookup_i2c_gpio(rdev,
+ i2c_config->
+ ucAccess);
+ break;
+ case ATOM_HPD_INT_RECORD_TYPE:
+ hpd_record =
+ (ATOM_HPD_INT_RECORD *)
+ record;
+ gpio = radeon_lookup_gpio(rdev,
+ hpd_record->ucHPDIntGPIOID);
+ hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ hpd.plugged_state = hpd_record->ucPlugged_PinState;
break;
}
record =
@@ -421,24 +548,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
break;
}
}
- } else
- line_mux = 0;
-
- if ((le16_to_cpu(path->usDeviceTag) ==
- ATOM_DEVICE_TV1_SUPPORT)
- || (le16_to_cpu(path->usDeviceTag) ==
- ATOM_DEVICE_TV2_SUPPORT)
- || (le16_to_cpu(path->usDeviceTag) ==
- ATOM_DEVICE_CV_SUPPORT))
+ } else {
+ hpd.hpd = RADEON_HPD_NONE;
ddc_bus.valid = false;
- else
- ddc_bus = radeon_lookup_gpio(dev, line_mux);
+ }
conn_id = le16_to_cpu(path->usConnObjectId);
if (!radeon_atom_apply_quirks
(dev, le16_to_cpu(path->usDeviceTag), &connector_type,
- &ddc_bus, &conn_id))
+ &ddc_bus, &conn_id, &hpd))
continue;
radeon_add_atom_connector(dev,
@@ -447,7 +566,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
usDeviceTag),
connector_type, &ddc_bus,
linkb, igp_lane_info,
- connector_object_id);
+ connector_object_id,
+ &hpd);
}
}
@@ -502,6 +622,7 @@ struct bios_connector {
uint16_t devices;
int connector_type;
struct radeon_i2c_bus_rec ddc_bus;
+ struct radeon_hpd hpd;
};
bool radeon_get_atom_connector_info_from_supported_devices_table(struct
@@ -517,7 +638,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
uint16_t device_support;
uint8_t dac;
union atom_supported_devices *supported_devices;
- int i, j;
+ int i, j, max_device;
struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@@ -527,7 +648,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ if (frev > 1)
+ max_device = ATOM_MAX_SUPPORTED_DEVICE;
+ else
+ max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
+
+ for (i = 0; i < max_device; i++) {
ATOM_CONNECTOR_INFO_I2C ci =
supported_devices->info.asConnInfo[i];
@@ -553,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
- if ((rdev->family == CHIP_RS690) ||
- (rdev->family == CHIP_RS740)) {
- if ((i == ATOM_DEVICE_DFP2_INDEX)
- && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
- else if ((i == ATOM_DEVICE_DFP3_INDEX)
- && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
- else
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux;
- } else
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux;
+ bios_connectors[i].line_mux =
+ ci.sucI2cId.ucAccess;
/* give tv unique connector ids */
if (i == ATOM_DEVICE_TV1_INDEX) {
@@ -582,8 +694,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i].line_mux = 52;
} else
bios_connectors[i].ddc_bus =
- radeon_lookup_gpio(dev,
- bios_connectors[i].line_mux);
+ radeon_lookup_i2c_gpio(rdev,
+ bios_connectors[i].line_mux);
+
+ if ((crev > 1) && (frev > 1)) {
+ u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
+ switch (isb) {
+ case 0x4:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+ break;
+ case 0xa:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+ break;
+ default:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+ } else {
+ if (i == ATOM_DEVICE_DFP1_INDEX)
+ bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+ else if (i == ATOM_DEVICE_DFP2_INDEX)
+ bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+ else
+ bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+ }
/* Always set the connector type to VGA for CRT1/CRT2. if they are
* shared with a DVI port, we'll pick up the DVI connector when we
@@ -595,7 +729,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
if (!radeon_atom_apply_quirks
(dev, (1 << i), &bios_connectors[i].connector_type,
- &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux))
+ &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
+ &bios_connectors[i].hpd))
continue;
bios_connectors[i].valid = true;
@@ -617,9 +752,9 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
}
/* combine shared connectors */
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ for (i = 0; i < max_device; i++) {
if (bios_connectors[i].valid) {
- for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) {
+ for (j = 0; j < max_device; j++) {
if (bios_connectors[j].valid && (i != j)) {
if (bios_connectors[i].line_mux ==
bios_connectors[j].line_mux) {
@@ -643,6 +778,10 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i].
connector_type =
DRM_MODE_CONNECTOR_DVII;
+ if (bios_connectors[j].devices &
+ (ATOM_DEVICE_DFP_SUPPORT))
+ bios_connectors[i].hpd =
+ bios_connectors[j].hpd;
bios_connectors[j].
valid = false;
}
@@ -653,7 +792,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
}
/* add the connectors */
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ for (i = 0; i < max_device; i++) {
if (bios_connectors[i].valid) {
uint16_t connector_object_id =
atombios_get_connector_object_id(dev,
@@ -666,7 +805,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
connector_type,
&bios_connectors[i].ddc_bus,
false, 0,
- connector_object_id);
+ connector_object_id,
+ &bios_connectors[i].hpd);
}
}
@@ -731,7 +871,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
* pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
* family.
*/
- p1pll->pll_out_min = 64800;
+ if (!radeon_new_pll)
+ p1pll->pll_out_min = 64800;
}
p1pll->pll_in_min =
@@ -861,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
uint8_t frev, crev;
struct radeon_atom_ss *ss = NULL;
+ int i;
if (id > ATOM_MAX_SS_ENTRY)
return NULL;
@@ -878,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
if (!ss)
return NULL;
- ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage);
- ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType;
- ss->step = ss_info->asSS_Info[id].ucSS_Step;
- ss->delay = ss_info->asSS_Info[id].ucSS_Delay;
- ss->range = ss_info->asSS_Info[id].ucSS_Range;
- ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div;
+ for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
+ if (ss_info->asSS_Info[i].ucSS_Id == id) {
+ ss->percentage =
+ le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
+ ss->step = ss_info->asSS_Info[i].ucSS_Step;
+ ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
+ ss->range = ss_info->asSS_Info[i].ucSS_Range;
+ ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+ }
+ }
}
return ss;
}
@@ -901,7 +1048,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
- uint16_t data_offset;
+ uint16_t data_offset, misc;
union lvds_info *lvds_info;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
@@ -940,6 +1087,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs);
lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+
+ misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 10bd50a7db8..4ddfd4b5bc5 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -29,8 +29,8 @@
void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
unsigned sdomain, unsigned ddomain)
{
- struct radeon_object *dobj = NULL;
- struct radeon_object *sobj = NULL;
+ struct radeon_bo *dobj = NULL;
+ struct radeon_bo *sobj = NULL;
struct radeon_fence *fence = NULL;
uint64_t saddr, daddr;
unsigned long start_jiffies;
@@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
size = bsize;
n = 1024;
- r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
+ r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_pin(sobj, sdomain, &saddr);
+ r = radeon_bo_reserve(sobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(sobj, sdomain, &saddr);
+ radeon_bo_unreserve(sobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
+ r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_pin(dobj, ddomain, &daddr);
+ r = radeon_bo_reserve(dobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(dobj, ddomain, &daddr);
+ radeon_bo_unreserve(dobj);
if (r) {
goto out_cleanup;
}
@@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
}
out_cleanup:
if (sobj) {
- radeon_object_unpin(sobj);
- radeon_object_unref(&sobj);
+ r = radeon_bo_reserve(sobj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(sobj);
+ radeon_bo_unreserve(sobj);
+ }
+ radeon_bo_unref(&sobj);
}
if (dobj) {
- radeon_object_unpin(dobj);
- radeon_object_unref(&dobj);
+ r = radeon_bo_reserve(dobj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(dobj);
+ radeon_bo_unreserve(dobj);
+ }
+ radeon_bo_unref(&dobj);
}
if (fence) {
radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index a8135416762..b062109efbe 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (ref_div == 0)
+ return 0;
+
sclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
@@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (ref_div == 0)
+ return 0;
+
mclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
@@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev)
ret = radeon_combios_get_clock_info(dev);
if (ret) {
- if (p1pll->reference_div < 2)
- p1pll->reference_div = 12;
+ if (p1pll->reference_div < 2) {
+ if (!ASIC_IS_AVIVO(rdev)) {
+ u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
+ if (ASIC_IS_R300(rdev))
+ p1pll->reference_div =
+ (tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
+ else
+ p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ } else
+ p1pll->reference_div = 12;
+ }
if (p2pll->reference_div < 2)
p2pll->reference_div = 12;
if (rdev->family < CHIP_RS600) {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 5253cbf6db1..c5021a3445d 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -50,7 +50,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t connector_object_id);
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd);
/* from radeon_legacy_encoder.c */
extern void
@@ -442,38 +443,70 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
-struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line)
+static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
+ int ddc_line)
{
struct radeon_i2c_bus_rec i2c;
- i2c.mask_clk_mask = RADEON_GPIO_EN_1;
- i2c.mask_data_mask = RADEON_GPIO_EN_0;
- i2c.a_clk_mask = RADEON_GPIO_A_1;
- i2c.a_data_mask = RADEON_GPIO_A_0;
- i2c.put_clk_mask = RADEON_GPIO_EN_1;
- i2c.put_data_mask = RADEON_GPIO_EN_0;
- i2c.get_clk_mask = RADEON_GPIO_Y_1;
- i2c.get_data_mask = RADEON_GPIO_Y_0;
- if ((ddc_line == RADEON_LCD_GPIO_MASK) ||
- (ddc_line == RADEON_MDGPIO_EN_REG)) {
- i2c.mask_clk_reg = ddc_line;
- i2c.mask_data_reg = ddc_line;
- i2c.a_clk_reg = ddc_line;
- i2c.a_data_reg = ddc_line;
- i2c.put_clk_reg = ddc_line;
- i2c.put_data_reg = ddc_line;
- i2c.get_clk_reg = ddc_line + 4;
- i2c.get_data_reg = ddc_line + 4;
+ if (ddc_line == RADEON_GPIOPAD_MASK) {
+ i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
+ i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
+ i2c.a_clk_reg = RADEON_GPIOPAD_A;
+ i2c.a_data_reg = RADEON_GPIOPAD_A;
+ i2c.en_clk_reg = RADEON_GPIOPAD_EN;
+ i2c.en_data_reg = RADEON_GPIOPAD_EN;
+ i2c.y_clk_reg = RADEON_GPIOPAD_Y;
+ i2c.y_data_reg = RADEON_GPIOPAD_Y;
+ } else if (ddc_line == RADEON_MDGPIO_MASK) {
+ i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
+ i2c.mask_data_reg = RADEON_MDGPIO_MASK;
+ i2c.a_clk_reg = RADEON_MDGPIO_A;
+ i2c.a_data_reg = RADEON_MDGPIO_A;
+ i2c.en_clk_reg = RADEON_MDGPIO_EN;
+ i2c.en_data_reg = RADEON_MDGPIO_EN;
+ i2c.y_clk_reg = RADEON_MDGPIO_Y;
+ i2c.y_data_reg = RADEON_MDGPIO_Y;
} else {
+ i2c.mask_clk_mask = RADEON_GPIO_EN_1;
+ i2c.mask_data_mask = RADEON_GPIO_EN_0;
+ i2c.a_clk_mask = RADEON_GPIO_A_1;
+ i2c.a_data_mask = RADEON_GPIO_A_0;
+ i2c.en_clk_mask = RADEON_GPIO_EN_1;
+ i2c.en_data_mask = RADEON_GPIO_EN_0;
+ i2c.y_clk_mask = RADEON_GPIO_Y_1;
+ i2c.y_data_mask = RADEON_GPIO_Y_0;
+
i2c.mask_clk_reg = ddc_line;
i2c.mask_data_reg = ddc_line;
i2c.a_clk_reg = ddc_line;
i2c.a_data_reg = ddc_line;
- i2c.put_clk_reg = ddc_line;
- i2c.put_data_reg = ddc_line;
- i2c.get_clk_reg = ddc_line;
- i2c.get_data_reg = ddc_line;
+ i2c.en_clk_reg = ddc_line;
+ i2c.en_data_reg = ddc_line;
+ i2c.y_clk_reg = ddc_line;
+ i2c.y_data_reg = ddc_line;
+ }
+
+ if (rdev->family < CHIP_R200)
+ i2c.hw_capable = false;
+ else {
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ i2c.hw_capable = true;
+ break;
+ case RADEON_GPIO_MONID:
+ /* hw i2c on RADEON_GPIO_MONID doesn't seem to work
+ * reliably on some pre-r4xx hardware; not sure why.
+ */
+ i2c.hw_capable = false;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
}
+ i2c.mm_i2c = false;
+ i2c.i2c_id = 0;
if (ddc_line)
i2c.valid = true;
@@ -495,7 +528,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
uint16_t sclk, mclk;
if (rdev->bios == NULL)
- return NULL;
+ return false;
pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
if (pll_info) {
@@ -993,8 +1026,8 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */
- {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */
- {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */
+ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */
+ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */
};
bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
@@ -1028,7 +1061,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
if (tmds_info) {
-
ver = RBIOS8(tmds_info);
DRM_INFO("DFP table revision: %d\n", ver);
if (ver == 3) {
@@ -1063,51 +1095,139 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
tmds->tmds_pll[i].value);
}
}
- } else
+ } else {
DRM_INFO("No TMDS info found in BIOS\n");
+ return false;
+ }
return true;
}
-struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder)
+bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds)
{
- struct radeon_encoder_int_tmds *tmds = NULL;
- bool ret;
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_i2c_bus_rec i2c_bus;
- tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+ /* default for macs */
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- if (!tmds)
- return NULL;
-
- ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
- if (ret == false)
- radeon_legacy_get_tmds_info_from_table(encoder, tmds);
+ /* XXX some macs have duallink chips */
+ switch (rdev->mode_info.connector_table) {
+ case CT_POWERBOOK_EXTERNAL:
+ case CT_MINI_EXTERNAL:
+ default:
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+ break;
+ }
- return tmds;
+ return true;
}
-void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder)
+bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds)
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
- uint16_t ext_tmds_info;
- uint8_t ver;
+ uint16_t offset;
+ uint8_t ver, id, blocks, clk, data;
+ int i;
+ enum radeon_combios_ddc gpio;
+ struct radeon_i2c_bus_rec i2c_bus;
if (rdev->bios == NULL)
- return;
+ return false;
- ext_tmds_info =
- combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
- if (ext_tmds_info) {
- ver = RBIOS8(ext_tmds_info);
- DRM_INFO("External TMDS Table revision: %d\n", ver);
- // TODO
+ tmds->i2c_bus = NULL;
+ if (rdev->flags & RADEON_IS_IGP) {
+ offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+ if (offset) {
+ ver = RBIOS8(offset);
+ DRM_INFO("GPIO Table revision: %d\n", ver);
+ blocks = RBIOS8(offset + 2);
+ for (i = 0; i < blocks; i++) {
+ id = RBIOS8(offset + 3 + (i * 5) + 0);
+ if (id == 136) {
+ clk = RBIOS8(offset + 3 + (i * 5) + 3);
+ data = RBIOS8(offset + 3 + (i * 5) + 4);
+ i2c_bus.valid = true;
+ i2c_bus.mask_clk_mask = (1 << clk);
+ i2c_bus.mask_data_mask = (1 << data);
+ i2c_bus.a_clk_mask = (1 << clk);
+ i2c_bus.a_data_mask = (1 << data);
+ i2c_bus.en_clk_mask = (1 << clk);
+ i2c_bus.en_data_mask = (1 << data);
+ i2c_bus.y_clk_mask = (1 << clk);
+ i2c_bus.y_data_mask = (1 << data);
+ i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
+ i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
+ i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
+ i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
+ i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
+ i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
+ i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
+ i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+ break;
+ }
+ }
+ }
+ } else {
+ offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (offset) {
+ ver = RBIOS8(offset);
+ DRM_INFO("External TMDS Table revision: %d\n", ver);
+ tmds->slave_addr = RBIOS8(offset + 4 + 2);
+ tmds->slave_addr >>= 1; /* 7 bit addressing */
+ gpio = RBIOS8(offset + 4 + 3);
+ switch (gpio) {
+ case DDC_MONID:
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_DVI:
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_VGA:
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_CRT2:
+ /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
+ if (rdev->family >= CHIP_R300)
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ else
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_LCD: /* MM i2c */
+ DRM_ERROR("MM i2c requires hw i2c engine\n");
+ break;
+ default:
+ DRM_ERROR("Unsupported gpio %d\n", gpio);
+ break;
+ }
+ }
}
+
+ if (!tmds->i2c_bus) {
+ DRM_INFO("No valid Ext TMDS info found in BIOS\n");
+ return false;
+ }
+
+ return true;
}
bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_i2c_bus_rec ddc_i2c;
+ struct radeon_hpd hpd;
rdev->mode_info.connector_table = radeon_connector_table;
if (rdev->mode_info.connector_table == CT_NONE) {
@@ -1168,7 +1288,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
/* these are the most common settings */
if (rdev->flags & RADEON_SINGLE_CRTC) {
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1178,10 +1299,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
} else if (rdev->flags & RADEON_IS_MOBILITY) {
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
+ ddc_i2c = combios_setup_i2c_bus(rdev, 0);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1191,10 +1314,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1204,10 +1329,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
} else {
/* DVI-I - tv dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1223,10 +1350,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1236,11 +1365,14 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
}
if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
/* TV - tv dac */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1250,14 +1382,16 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
}
break;
case CT_IBOOK:
DRM_INFO("Connector Table: %d (ibook)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1265,9 +1399,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* VGA - TV DAC */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
@@ -1275,8 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1285,13 +1424,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_POWERBOOK_EXTERNAL:
DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1299,9 +1440,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* DVI-I - primary dac, ext tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP2_SUPPORT,
@@ -1317,8 +1460,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1327,13 +1473,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_POWERBOOK_INTERNAL:
DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1341,9 +1489,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* DVI-I - primary dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1358,8 +1508,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1368,13 +1521,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_POWERBOOK_VGA:
DRM_INFO("Connector Table: %d (powerbook vga)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1382,9 +1537,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1392,8 +1549,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1402,13 +1562,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_MINI_EXTERNAL:
DRM_INFO("Connector Table: %d (mini external tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, ext tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP2_SUPPORT,
@@ -1424,8 +1586,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1434,13 +1599,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_MINI_INTERNAL:
DRM_INFO("Connector Table: %d (mini internal tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1455,8 +1622,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1465,13 +1635,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_IMAC_G5_ISIGHT:
DRM_INFO("Connector Table: %d (imac g5 isight)\n",
rdev->mode_info.connector_table);
/* DVI-D - int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1479,9 +1651,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+ &hpd);
/* VGA - tv dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
@@ -1489,8 +1663,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1499,13 +1676,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_EMAC:
DRM_INFO("Connector Table: %d (emac)\n",
rdev->mode_info.connector_table);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1513,9 +1692,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* VGA - tv dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
@@ -1523,8 +1704,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1533,7 +1717,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
@@ -1550,7 +1735,8 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
int bios_index,
enum radeon_combios_connector
*legacy_connector,
- struct radeon_i2c_bus_rec *ddc_i2c)
+ struct radeon_i2c_bus_rec *ddc_i2c,
+ struct radeon_hpd *hpd)
{
struct radeon_device *rdev = dev->dev_private;
@@ -1558,29 +1744,26 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
if ((rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
- *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
else if ((rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
- ddc_i2c->valid = true;
+ *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK);
ddc_i2c->mask_clk_mask = (0x20 << 8);
ddc_i2c->mask_data_mask = 0x80;
ddc_i2c->a_clk_mask = (0x20 << 8);
ddc_i2c->a_data_mask = 0x80;
- ddc_i2c->put_clk_mask = (0x20 << 8);
- ddc_i2c->put_data_mask = 0x80;
- ddc_i2c->get_clk_mask = (0x20 << 8);
- ddc_i2c->get_data_mask = 0x80;
- ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
- ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
- ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
- ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
- ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
- ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
- ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
- ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
+ ddc_i2c->en_clk_mask = (0x20 << 8);
+ ddc_i2c->en_data_mask = 0x80;
+ ddc_i2c->y_clk_mask = (0x20 << 8);
+ ddc_i2c->y_data_mask = 0x80;
}
+ /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
+ if ((rdev->family >= CHIP_R300) &&
+ ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+ *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+
/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
if (dev->pdev->device == 0x515e &&
@@ -1624,6 +1807,12 @@ static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
dev->pdev->subsystem_device == 0x280a)
return false;
+ /* MSI S270 has non-existent TV port */
+ if (dev->pdev->device == 0x5955 &&
+ dev->pdev->subsystem_vendor == 0x1462 &&
+ dev->pdev->subsystem_device == 0x0131)
+ return false;
+
return true;
}
@@ -1671,6 +1860,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
enum radeon_combios_connector connector;
int i = 0;
struct radeon_i2c_bus_rec ddc_i2c;
+ struct radeon_hpd hpd;
if (rdev->bios == NULL)
return false;
@@ -1691,26 +1881,40 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
switch (ddc_type) {
case DDC_MONID:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
break;
case DDC_DVI:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
break;
case DDC_VGA:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
break;
case DDC_CRT2:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
break;
default:
break;
}
+ switch (connector) {
+ case CONNECTOR_PROPRIETARY_LEGACY:
+ case CONNECTOR_DVI_I_LEGACY:
+ case CONNECTOR_DVI_D_LEGACY:
+ if ((tmp >> 4) & 0x1)
+ hpd.hpd = RADEON_HPD_2;
+ else
+ hpd.hpd = RADEON_HPD_1;
+ break;
+ default:
+ hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+
if (!radeon_apply_legacy_quirks(dev, i, &connector,
- &ddc_i2c))
+ &ddc_i2c, &hpd))
continue;
switch (connector) {
@@ -1727,7 +1931,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+ &hpd);
break;
case CONNECTOR_CRT_LEGACY:
if (tmp & 0x1) {
@@ -1753,7 +1958,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
break;
case CONNECTOR_DVI_I_LEGACY:
devices = 0;
@@ -1799,7 +2005,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- connector_object_id);
+ connector_object_id,
+ &hpd);
break;
case CONNECTOR_DVI_D_LEGACY:
if ((tmp >> 4) & 0x1) {
@@ -1817,7 +2024,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- connector_object_id);
+ connector_object_id,
+ &hpd);
break;
case CONNECTOR_CTV_LEGACY:
case CONNECTOR_STV_LEGACY:
@@ -1832,7 +2040,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
default:
DRM_ERROR("Unknown connector type: %d\n",
@@ -1858,14 +2067,16 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
0),
ATOM_DEVICE_DFP1_SUPPORT);
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT |
ATOM_DEVICE_DFP1_SUPPORT,
DRM_MODE_CONNECTOR_DVII,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
} else {
uint16_t crt_info =
combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
@@ -1876,13 +2087,15 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
} else {
DRM_DEBUG("No connector info found\n");
return false;
@@ -1910,27 +2123,27 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
case DDC_MONID:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_MONID);
+ (rdev, RADEON_GPIO_MONID);
break;
case DDC_DVI:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_DVI_DDC);
+ (rdev, RADEON_GPIO_DVI_DDC);
break;
case DDC_VGA:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_VGA_DDC);
+ (rdev, RADEON_GPIO_VGA_DDC);
break;
case DDC_CRT2:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_CRT2_DDC);
+ (rdev, RADEON_GPIO_CRT2_DDC);
break;
case DDC_LCD:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_LCD_GPIO_MASK);
+ (rdev, RADEON_GPIOPAD_MASK);
ddc_i2c.mask_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.mask_data_mask =
@@ -1939,19 +2152,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.a_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.put_clk_mask =
+ ddc_i2c.en_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.put_data_mask =
+ ddc_i2c.en_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.get_clk_mask =
+ ddc_i2c.y_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.get_data_mask =
+ ddc_i2c.y_data_mask =
RBIOS32(lcd_ddc_info + 7);
break;
case DDC_GPIO:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_MDGPIO_EN_REG);
+ (rdev, RADEON_MDGPIO_MASK);
ddc_i2c.mask_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.mask_data_mask =
@@ -1960,13 +2173,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.a_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.put_clk_mask =
+ ddc_i2c.en_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.put_data_mask =
+ ddc_i2c.en_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.get_clk_mask =
+ ddc_i2c.y_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.get_data_mask =
+ ddc_i2c.y_data_mask =
RBIOS32(lcd_ddc_info + 7);
break;
default:
@@ -1977,12 +2190,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else
ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
5,
ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
}
}
@@ -1993,6 +2208,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
if (radeon_apply_legacy_tv_quirks(dev)) {
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
(dev,
@@ -2003,7 +2219,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
}
}
}
@@ -2014,6 +2231,193 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
return true;
}
+void radeon_external_tmds_setup(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+ if (!tmds)
+ return;
+
+ switch (tmds->dvo_chip) {
+ case DVO_SIL164:
+ /* sil 164 */
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x30);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x09, 0x00);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0a, 0x90);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0c, 0x89);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x3b);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ case DVO_SIL1178:
+ /* sil 1178 - untested */
+ /*
+ * 0x0f, 0x44
+ * 0x0f, 0x4c
+ * 0x0e, 0x01
+ * 0x0a, 0x80
+ * 0x09, 0x30
+ * 0x0c, 0xc9
+ * 0x0d, 0x70
+ * 0x08, 0x32
+ * 0x08, 0x33
+ */
+ break;
+ default:
+ break;
+ }
+
+}
+
+bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint16_t offset;
+ uint8_t blocks, slave_addr, rev;
+ uint32_t index, id;
+ uint32_t reg, val, and_mask, or_mask;
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+ if (rdev->bios == NULL)
+ return false;
+
+ if (!tmds)
+ return false;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
+ rev = RBIOS8(offset);
+ if (offset) {
+ rev = RBIOS8(offset);
+ if (rev > 1) {
+ blocks = RBIOS8(offset + 3);
+ index = offset + 4;
+ while (blocks > 0) {
+ id = RBIOS16(index);
+ index += 2;
+ switch (id >> 13) {
+ case 0:
+ reg = (id & 0x1fff) * 4;
+ val = RBIOS32(index);
+ index += 4;
+ WREG32(reg, val);
+ break;
+ case 2:
+ reg = (id & 0x1fff) * 4;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32(reg, val);
+ break;
+ case 3:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val);
+ break;
+ case 4:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val * 1000);
+ break;
+ case 6:
+ slave_addr = id & 0xff;
+ slave_addr >>= 1; /* 7 bit addressing */
+ index++;
+ reg = RBIOS8(index);
+ index++;
+ val = RBIOS8(index);
+ index++;
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ slave_addr,
+ reg, val);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ default:
+ DRM_ERROR("Unknown id %d\n", id >> 13);
+ break;
+ }
+ blocks--;
+ }
+ return true;
+ }
+ }
+ } else {
+ offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (offset) {
+ index = offset + 10;
+ id = RBIOS16(index);
+ while (id != 0xffff) {
+ index += 2;
+ switch (id >> 13) {
+ case 0:
+ reg = (id & 0x1fff) * 4;
+ val = RBIOS32(index);
+ WREG32(reg, val);
+ break;
+ case 2:
+ reg = (id & 0x1fff) * 4;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32(reg, val);
+ break;
+ case 4:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val);
+ break;
+ case 5:
+ reg = id & 0x1fff;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32_PLL(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32_PLL(reg, val);
+ break;
+ case 6:
+ reg = id & 0x1fff;
+ val = RBIOS8(index);
+ index += 1;
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ reg, val);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ default:
+ DRM_ERROR("Unknown id %d\n", id >> 13);
+ break;
+ }
+ id = RBIOS16(index);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
{
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 29763ceae3a..5eece186e03 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,6 +40,26 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
+void radeon_connector_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dp_needs_link_train(radeon_connector)) {
+ if (connector->encoder)
+ dp_link_train(connector->encoder, connector);
+ }
+ }
+ }
+
+}
+
static void radeon_property_change_mode(struct drm_encoder *encoder)
{
struct drm_crtc *crtc = encoder->crtc;
@@ -445,10 +465,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
ret = connector_status_connected;
else {
if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (radeon_connector->edid)
ret = connector_status_connected;
}
@@ -553,17 +573,17 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
if (!encoder)
ret = connector_status_disconnected;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -708,17 +728,17 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
enum drm_connector_status ret = connector_status_disconnected;
bool dret;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -735,6 +755,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
ret = connector_status_disconnected;
} else
ret = connector_status_connected;
+
+ /* multiple connectors on the same encoder with the same ddc line
+ * This tends to be HDMI and DVI on the same encoder with the
+ * same ddc line. If the edid says HDMI, consider the HDMI port
+ * connected and the DVI port disconnected. If the edid doesn't
+ * say HDMI, vice versa.
+ */
+ if (radeon_connector->shared_ddc && connector_status_connected) {
+ struct drm_device *dev = connector->dev;
+ struct drm_connector *list_connector;
+ struct radeon_connector *list_radeon_connector;
+ list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+ if (connector == list_connector)
+ continue;
+ list_radeon_connector = to_radeon_connector(list_connector);
+ if (radeon_connector->devices == list_radeon_connector->devices) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ }
+ } else {
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ }
+ }
+ }
+ }
+ }
}
}
@@ -863,6 +916,91 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
.force = radeon_dvi_force,
};
+static void radeon_dp_connector_destroy(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+ if (radeon_connector->ddc_bus)
+ radeon_i2c_destroy(radeon_connector->ddc_bus);
+ if (radeon_connector->edid)
+ kfree(radeon_connector->edid);
+ if (radeon_dig_connector->dp_i2c_bus)
+ radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+ kfree(radeon_connector->con_priv);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int radeon_dp_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ ret = radeon_ddc_get_modes(radeon_connector);
+ return ret;
+}
+
+static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ enum drm_connector_status ret = connector_status_disconnected;
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ u8 sink_type;
+
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+
+ sink_type = radeon_dp_getsinktype(radeon_connector);
+ if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dp_getdpcd(radeon_connector)) {
+ radeon_dig_connector->dp_sink_type = sink_type;
+ ret = connector_status_connected;
+ }
+ } else {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ if (radeon_ddc_probe(radeon_connector)) {
+ radeon_dig_connector->dp_sink_type = sink_type;
+ ret = connector_status_connected;
+ }
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
+
+ return ret;
+}
+
+static int radeon_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+ /* XXX check mode bandwidth */
+
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ return radeon_dp_mode_valid_helper(radeon_connector, mode);
+ else
+ return MODE_OK;
+}
+
+struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+ .get_modes = radeon_dp_get_modes,
+ .mode_valid = radeon_dp_mode_valid,
+ .best_encoder = radeon_dvi_encoder,
+};
+
+struct drm_connector_funcs radeon_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_connector_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
void
radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id,
@@ -871,7 +1009,8 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb,
uint32_t igp_lane_info,
- uint16_t connector_object_id)
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -911,6 +1050,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->devices = supported_device;
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
+ radeon_connector->hpd = *hpd;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -963,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
- radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- 1);
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ }
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
@@ -997,16 +1139,23 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+ ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (ret)
goto failed;
if (i2c_bus->valid) {
+ /* add DP i2c bus */
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+ if (!radeon_dig_connector->dp_i2c_bus)
+ goto failed;
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus)
goto failed;
}
subpixel_order = SubPixelHorizontalRGB;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.coherent_mode_property,
+ 1);
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1020,6 +1169,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ 1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -1038,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
goto failed;
}
- drm_mode_create_scaling_mode_property(dev);
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -1063,7 +1214,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t connector_object_id)
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -1093,6 +1245,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->connector_id = connector_id;
radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id;
+ radeon_connector->hpd = *hpd;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1160,6 +1313,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ 1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 4f7afc79dd8..0b2f9c2ad2c 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1941,8 +1941,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
for (t = 0; t < dev_priv->usec_timeout; t++) {
u32 done_age = GET_SCRATCH(dev_priv, 1);
DRM_DEBUG("done_age = %d\n", done_age);
- for (i = start; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
+ for (i = 0; i < dma->buf_count; i++) {
+ buf = dma->buflist[start];
buf_priv = buf->dev_private;
if (buf->file_priv == NULL || (buf->pending &&
buf_priv->age <=
@@ -1951,7 +1951,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
buf->pending = 0;
return buf;
}
- start = 0;
+ if (++start >= dma->buf_count)
+ start = 0;
}
if (t) {
@@ -1960,47 +1961,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
}
}
- DRM_DEBUG("returning NULL!\n");
return NULL;
}
-#if 0
-struct drm_buf *radeon_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_buf_priv_t *buf_priv;
- struct drm_buf *buf;
- int i, t;
- int start;
- u32 done_age;
-
- done_age = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
- if (++dev_priv->last_buf >= dma->buf_count)
- dev_priv->last_buf = 0;
-
- start = dev_priv->last_buf;
- dev_priv->stats.freelist_loops++;
-
- for (t = 0; t < 2; t++) {
- for (i = start; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
- buf_priv = buf->dev_private;
- if (buf->file_priv == 0 || (buf->pending &&
- buf_priv->age <=
- done_age)) {
- dev_priv->stats.requested_bufs++;
- buf->pending = 0;
- return buf;
- }
- }
- start = 0;
- }
-
- return NULL;
-}
-#endif
-
void radeon_freelist_reset(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5ab2cf96a26..65590a0f1d9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
- p->relocs[i].lobj.robj = p->relocs[i].robj;
+ p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.rdomain = r->read_domains;
p->relocs[i].lobj.wdomain = r->write_domain;
p->relocs[i].handle = r->handle;
p->relocs[i].flags = r->flags;
INIT_LIST_HEAD(&p->relocs[i].lobj.list);
- radeon_object_list_add_object(&p->relocs[i].lobj,
- &p->validated);
+ radeon_bo_list_add_object(&p->relocs[i].lobj,
+ &p->validated);
}
}
- return radeon_object_list_validate(&p->validated, p->ib->fence);
+ return radeon_bo_list_validate(&p->validated, p->ib->fence);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -190,9 +190,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
unsigned i;
if (error) {
- radeon_object_list_unvalidate(&parser->validated);
+ radeon_bo_list_unvalidate(&parser->validated,
+ parser->ib->fence);
} else {
- radeon_object_list_clean(&parser->validated);
+ radeon_bo_list_unreserve(&parser->validated);
}
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 41bb76fbe73..02bcdb1240c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev)
if (rdev->family < CHIP_R600) {
int i;
- for (i = 0; i < 8; i++) {
- WREG32(RADEON_SURFACE0_INFO +
- i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
- 0);
+ for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+ if (rdev->surface_regs[i].bo)
+ radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
+ else
+ radeon_clear_surface_reg(rdev, i);
}
/* enable surfaces */
WREG32(RADEON_SURFACE_CNTL, 0);
@@ -208,6 +209,24 @@ bool radeon_card_posted(struct radeon_device *rdev)
}
+bool radeon_boot_test_post_card(struct radeon_device *rdev)
+{
+ if (radeon_card_posted(rdev))
+ return true;
+
+ if (rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ if (rdev->is_atom_bios)
+ atom_asic_init(rdev->mode_info.atom_context);
+ else
+ radeon_combios_asic_init(rdev->ddev);
+ return true;
+ } else {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return false;
+ }
+}
+
int radeon_dummy_page_init(struct radeon_device *rdev)
{
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
@@ -463,12 +482,16 @@ int radeon_atombios_init(struct radeon_device *rdev)
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+ atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
}
void radeon_atombios_fini(struct radeon_device *rdev)
{
- kfree(rdev->mode_info.atom_context);
+ if (rdev->mode_info.atom_context) {
+ kfree(rdev->mode_info.atom_context->scratch);
+ kfree(rdev->mode_info.atom_context);
+ }
kfree(rdev->mode_info.atom_card_info);
}
@@ -544,16 +567,24 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
+ if (rdev->family >= CHIP_R600)
+ spin_lock_init(&rdev->ih.lock);
+ mutex_init(&rdev->gem.mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
+ /* setup workqueue */
+ rdev->wq = create_workqueue("radeon");
+ if (rdev->wq == NULL)
+ return -ENOMEM;
+
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r) {
return r;
}
- if (radeon_agpmode == -1) {
+ if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
}
@@ -620,6 +651,7 @@ void radeon_device_fini(struct radeon_device *rdev)
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
radeon_fini(rdev);
+ destroy_workqueue(rdev->wq);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
@@ -633,6 +665,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *crtc;
+ int r;
if (dev == NULL || rdev == NULL) {
return -ENODEV;
@@ -643,26 +676,31 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
- struct radeon_object *robj;
+ struct radeon_bo *robj;
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
robj = rfb->obj->driver_private;
- if (robj != rdev->fbdev_robj) {
- radeon_object_unpin(robj);
+ if (robj != rdev->fbdev_rbo) {
+ r = radeon_bo_reserve(robj, false);
+ if (unlikely(r == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
}
}
/* evict vram memory */
- radeon_object_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev);
/* wait for gpu to finish processing current batch */
radeon_fence_wait_last(rdev);
radeon_save_bios_scratch_regs(rdev);
radeon_suspend(rdev);
+ radeon_hpd_fini(rdev);
/* evict remaining vram memory */
- radeon_object_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev);
pci_save_state(dev->pdev);
if (state.event == PM_EVENT_SUSPEND) {
@@ -695,6 +733,8 @@ int radeon_resume_kms(struct drm_device *dev)
fb_set_suspend(rdev->fbdev_info, 0);
release_console_sem();
+ /* reset hpd state */
+ radeon_hpd_init(rdev);
/* blat the mode back in */
drm_helper_resume_force_mode(dev);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c85df4afcb7..a133b833e45 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -250,6 +250,16 @@ static const char *connector_names[13] = {
"HDMI-B",
};
+static const char *hpd_names[7] = {
+ "NONE",
+ "HPD1",
+ "HPD2",
+ "HPD3",
+ "HPD4",
+ "HPD5",
+ "HPD6",
+};
+
static void radeon_print_display_setup(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -264,16 +274,18 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus)
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
radeon_connector->ddc_bus->rec.mask_clk_reg,
radeon_connector->ddc_bus->rec.mask_data_reg,
radeon_connector->ddc_bus->rec.a_clk_reg,
radeon_connector->ddc_bus->rec.a_data_reg,
- radeon_connector->ddc_bus->rec.put_clk_reg,
- radeon_connector->ddc_bus->rec.put_data_reg,
- radeon_connector->ddc_bus->rec.get_clk_reg,
- radeon_connector->ddc_bus->rec.get_data_reg);
+ radeon_connector->ddc_bus->rec.en_clk_reg,
+ radeon_connector->ddc_bus->rec.en_data_reg,
+ radeon_connector->ddc_bus->rec.y_clk_reg,
+ radeon_connector->ddc_bus->rec.y_data_reg);
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -324,6 +336,7 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
ret = radeon_get_legacy_connector_info_from_table(dev);
}
if (ret) {
+ radeon_setup_encoder_clones(dev);
radeon_print_display_setup(dev);
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
radeon_ddc_dump(drm_connector);
@@ -336,12 +349,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
int ret = 0;
+ if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+ if (dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
+ }
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
if (radeon_connector->edid) {
@@ -361,9 +379,9 @@ static int radeon_ddc_dump(struct drm_connector *connector)
if (!radeon_connector->ddc_bus)
return -1;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (edid) {
kfree(edid);
}
@@ -542,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll,
*post_div_p = best_post_div;
}
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p,
+ int flags)
+{
+ fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
+ fixed20_12 pll_out_max, pll_out_min;
+ fixed20_12 pll_in_max, pll_in_min;
+ fixed20_12 reference_freq;
+ fixed20_12 error, ffreq, a, b;
+
+ pll_out_max.full = rfixed_const(pll->pll_out_max);
+ pll_out_min.full = rfixed_const(pll->pll_out_min);
+ pll_in_max.full = rfixed_const(pll->pll_in_max);
+ pll_in_min.full = rfixed_const(pll->pll_in_min);
+ reference_freq.full = rfixed_const(pll->reference_freq);
+ do_div(freq, 10);
+ ffreq.full = rfixed_const(freq);
+ error.full = rfixed_const(100 * 100);
+
+ /* max p */
+ p.full = rfixed_div(pll_out_max, ffreq);
+ p.full = rfixed_floor(p);
+
+ /* min m */
+ m.full = rfixed_div(reference_freq, pll_in_max);
+ m.full = rfixed_ceil(m);
+
+ while (1) {
+ n.full = rfixed_div(ffreq, reference_freq);
+ n.full = rfixed_mul(n, m);
+ n.full = rfixed_mul(n, p);
+
+ f_vco.full = rfixed_div(n, m);
+ f_vco.full = rfixed_mul(f_vco, reference_freq);
+
+ f_pclk.full = rfixed_div(f_vco, p);
+
+ if (f_pclk.full > ffreq.full)
+ error.full = f_pclk.full - ffreq.full;
+ else
+ error.full = ffreq.full - f_pclk.full;
+ error.full = rfixed_div(error, f_pclk);
+ a.full = rfixed_const(100 * 100);
+ error.full = rfixed_mul(error, a);
+
+ a.full = rfixed_mul(m, p);
+ a.full = rfixed_div(n, a);
+ best_freq.full = rfixed_mul(reference_freq, a);
+
+ if (rfixed_trunc(error) < 25)
+ break;
+
+ a.full = rfixed_const(1);
+ m.full = m.full + a.full;
+ a.full = rfixed_div(reference_freq, m);
+ if (a.full >= pll_in_min.full)
+ continue;
+
+ m.full = rfixed_div(reference_freq, pll_in_max);
+ m.full = rfixed_ceil(m);
+ a.full= rfixed_const(1);
+ p.full = p.full - a.full;
+ a.full = rfixed_mul(p, ffreq);
+ if (a.full >= pll_out_min.full)
+ continue;
+ else {
+ DRM_ERROR("Unable to find pll dividers\n");
+ break;
+ }
+ }
+
+ a.full = rfixed_const(10);
+ b.full = rfixed_mul(n, a);
+
+ frac_n.full = rfixed_floor(n);
+ frac_n.full = rfixed_mul(frac_n, a);
+ frac_n.full = b.full - frac_n.full;
+
+ *dot_clock_p = rfixed_trunc(best_freq);
+ *fb_div_p = rfixed_trunc(n);
+ *frac_fb_div_p = rfixed_trunc(frac_n);
+ *ref_div_p = rfixed_trunc(m);
+ *post_div_p = rfixed_trunc(p);
+
+ DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+}
+
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -642,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
return -ENOMEM;
rdev->mode_info.coherent_mode_property->values[0] = 0;
- rdev->mode_info.coherent_mode_property->values[0] = 1;
+ rdev->mode_info.coherent_mode_property->values[1] = 1;
}
if (!ASIC_IS_AVIVO(rdev)) {
@@ -666,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
if (!rdev->mode_info.load_detect_property)
return -ENOMEM;
rdev->mode_info.load_detect_property->values[0] = 0;
- rdev->mode_info.load_detect_property->values[0] = 1;
+ rdev->mode_info.load_detect_property->values[1] = 1;
drm_mode_create_scaling_mode_property(rdev->ddev);
@@ -723,6 +833,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
if (!ret) {
return ret;
}
+ /* initialize hpd */
+ radeon_hpd_init(rdev);
drm_helper_initial_config(rdev->ddev);
return 0;
}
@@ -730,6 +842,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.mode_config_initialized) {
+ radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
@@ -750,9 +863,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
if (first) {
- radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+ /* set scaling */
+ if (radeon_encoder->rmx_type == RMX_OFF)
+ radeon_crtc->rmx_type = RMX_OFF;
+ else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
+ mode->vdisplay < radeon_encoder->native_mode.vdisplay)
+ radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+ else
+ radeon_crtc->rmx_type = RMX_OFF;
+ /* copy native mode */
memcpy(&radeon_crtc->native_mode,
- &radeon_encoder->native_mode,
+ &radeon_encoder->native_mode,
sizeof(struct drm_display_mode));
first = false;
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 7f50fb864af..28077247f4f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -86,6 +86,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
+int radeon_new_pll = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -120,6 +121,9 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
+MODULE_PARM_DESC(r4xx_atom, "Select new PLL code for AVIVO chips");
+module_param_named(new_pll, radeon_new_pll, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 350962e0f34..e13785282a8 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1104,7 +1104,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
# define R600_IT_WAIT_REG_MEM 0x00003C00
# define R600_IT_MEM_WRITE 0x00003D00
# define R600_IT_INDIRECT_BUFFER 0x00003200
-# define R600_IT_CP_INTERRUPT 0x00004000
# define R600_IT_SURFACE_SYNC 0x00004300
# define R600_CB0_DEST_BASE_ENA (1 << 6)
# define R600_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index d42bc512d75..b4f23ec9320 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -35,6 +35,51 @@ extern int atom_debug;
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
+static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *clone_encoder;
+ uint32_t index_mask = 0;
+ int count;
+
+ /* DIG routing gets problematic */
+ if (rdev->family >= CHIP_R600)
+ return index_mask;
+ /* LVDS/TV are too wacky */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return index_mask;
+ /* DVO requires 2x ppll clocks depending on tmds chip */
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
+ return index_mask;
+
+ count = -1;
+ list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+ count++;
+
+ if (clone_encoder == encoder)
+ continue;
+ if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ continue;
+ if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
+ continue;
+ else
+ index_mask |= (1 << count);
+ }
+ return index_mask;
+}
+
+void radeon_setup_encoder_clones(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->possible_clones = radeon_encoder_clones(encoder);
+ }
+}
+
uint32_t
radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
@@ -163,29 +208,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
-/* used for both atom and legacy */
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
-
- if (mode->hdisplay < native_mode->hdisplay ||
- mode->vdisplay < native_mode->vdisplay) {
- int mode_id = adjusted_mode->base.id;
- *adjusted_mode = *native_mode;
- if (!ASIC_IS_AVIVO(rdev)) {
- adjusted_mode->hdisplay = mode->hdisplay;
- adjusted_mode->vdisplay = mode->vdisplay;
- }
- adjusted_mode->base.id = mode_id;
- }
-}
-
-
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -198,14 +220,24 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (radeon_encoder->rmx_type != RMX_OFF)
- radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
-
/* hw bug */
if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ int mode_id = adjusted_mode->base.id;
+ *adjusted_mode = *native_mode;
+ if (!ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->hdisplay = mode->hdisplay;
+ adjusted_mode->vdisplay = mode->vdisplay;
+ }
+ adjusted_mode->base.id = mode_id;
+ }
+
+ /* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
@@ -218,6 +250,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
}
}
+ if (ASIC_IS_DCE3(rdev) &&
+ (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ radeon_dp_set_link_config(connector, mode);
+ }
+
return true;
}
@@ -392,7 +430,7 @@ union lvds_encoder_control {
LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
};
-static void
+void
atombios_digital_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
@@ -522,6 +560,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *radeon_dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
@@ -551,10 +590,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
return ATOM_ENCODER_MODE_LVDS;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
- /*if (radeon_output->MonType == MT_DP)
- return ATOM_ENCODER_MODE_DP;
- else*/
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ radeon_dig_connector = radeon_connector->con_priv;
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ return ATOM_ENCODER_MODE_DP;
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -573,6 +612,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
}
}
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link B -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
+ */
static void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
{
@@ -614,10 +677,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
} else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ /* XXX doesn't really matter which dig encoder we pick as long as it's
+ * not already in use
+ */
+ if (dig_connector->linkb)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* Only dig2 encoder can drive LVTMA */
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
num = 2;
break;
@@ -652,18 +722,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
}
}
- if (radeon_encoder->pixel_clock > 165000) {
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
+ args.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dig_connector->dp_clock == 270000)
+ args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.ucLaneNum = dig_connector->dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
args.ucLaneNum = 8;
- } else {
- if (dig_connector->linkb)
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
- else
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+ else
args.ucLaneNum = 4;
- }
- args.ucEncoderMode = atombios_get_encoder_mode(encoder);
+ if (dig_connector->linkb)
+ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ else
+ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -674,8 +747,8 @@ union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
};
-static void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -687,6 +760,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
+ bool is_dp = false;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
@@ -704,6 +778,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
dig_connector = radeon_connector->con_priv;
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
+ is_dp = true;
+
memset(&args, 0, sizeof(args));
if (ASIC_IS_DCE32(rdev))
@@ -724,17 +801,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v1.usInitInfo = radeon_connector->connector_object_id;
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v1.asMode.ucLaneSel = lane_num;
+ args.v1.asMode.ucLaneSet = lane_set;
} else {
- if (radeon_encoder->pixel_clock > 165000)
+ if (is_dp)
+ args.v1.usPixelClock =
+ cpu_to_le16(dig_connector->dp_clock / 10);
+ else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
if (ASIC_IS_DCE32(rdev)) {
- if (radeon_encoder->pixel_clock > 165000)
- args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
if (dig->dig_block)
args.v2.acConfig.ucEncoderSel = 1;
+ if (dig_connector->linkb)
+ args.v2.acConfig.ucLinkSel = 1;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -751,7 +834,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
break;
}
- if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (is_dp)
+ args.v2.acConfig.fCoherentMode = 1;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
}
@@ -760,17 +845,20 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+ /* XXX doesn't really matter which dig encoder we pick as long as it's
+ * not already in use
+ */
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
if (rdev->flags & RADEON_IS_IGP) {
if (radeon_encoder->pixel_clock > 165000) {
- args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
- ATOM_TRANSMITTER_CONFIG_LINKA_B);
if (dig_connector->igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
else if (dig_connector->igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
} else {
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
if (dig_connector->igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else if (dig_connector->igp_lane_info & 0x2)
@@ -780,35 +868,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
else if (dig_connector->igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
}
- } else {
- if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
- ATOM_TRANSMITTER_CONFIG_LINKA_B |
- ATOM_TRANSMITTER_CONFIG_LANE_0_7);
- else {
- if (dig_connector->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- }
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* Only dig2 encoder can drive LVTMA */
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
- ATOM_TRANSMITTER_CONFIG_LINKA_B |
- ATOM_TRANSMITTER_CONFIG_LANE_0_7);
- else {
- if (dig_connector->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- }
break;
}
- if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+ if (is_dp)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
}
@@ -918,12 +996,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ dp_link_train(encoder, connector);
+ }
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
break;
}
} else {
@@ -1025,13 +1107,33 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- } else
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ } else {
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return;
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ /* XXX doesn't really matter which dig encoder we pick as long as it's
+ * not already in use
+ */
+ if (dig_connector->linkb)
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* Only dig2 encoder can drive LVTMA */
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1104,11 +1206,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- if (radeon_encoder->enc_priv) {
- struct radeon_encoder_atom_dig *dig;
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
+ if (radeon_encoder->enc_priv) {
+ struct radeon_encoder_atom_dig *dig;
- dig = radeon_encoder->enc_priv;
- dig->dig_block = radeon_crtc->crtc_id;
+ dig = radeon_encoder->enc_priv;
+ dig->dig_block = radeon_crtc->crtc_id;
+ }
}
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -1134,14 +1239,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1354,7 +1459,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
encoder->possible_crtcs = 0x1;
else
encoder->possible_crtcs = 0x3;
- encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d10eb43645c..3ba213d1b06 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev,
struct radeon_framebuffer *rfb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
- struct radeon_object *robj = NULL;
+ struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
int size, aligned_size, ret;
u64 fb_gpuaddr;
@@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev,
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
false, ttm_bo_type_kernel,
- false, &gobj);
+ &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
surface_width, surface_height);
ret = -ENOMEM;
goto out;
}
- robj = gobj->driver_private;
+ rbo = gobj->driver_private;
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
@@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev,
}
#endif
- if (tiling_flags)
- radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch);
+ if (tiling_flags) {
+ ret = radeon_bo_set_tiling_flags(rbo,
+ tiling_flags | RADEON_TILING_SURFACE,
+ mode_cmd.pitch);
+ if (ret)
+ dev_err(rdev->dev, "FB failed to set tiling flags\n");
+ }
mutex_lock(&rdev->ddev->struct_mutex);
fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
if (fb == NULL) {
@@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev,
ret = -ENOMEM;
goto out_unref;
}
- ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ ret = radeon_bo_reserve(rbo, false);
+ if (unlikely(ret != 0))
+ goto out_unref;
+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ if (ret) {
+ radeon_bo_unreserve(rbo);
+ goto out_unref;
+ }
+ if (fb_tiled)
+ radeon_bo_check_tiling(rbo, 0, 0);
+ ret = radeon_bo_kmap(rbo, &fbptr);
+ radeon_bo_unreserve(rbo);
if (ret) {
- printk(KERN_ERR "failed to pin framebuffer\n");
- ret = -ENOMEM;
goto out_unref;
}
@@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev,
*fb_p = fb;
rfb = to_radeon_framebuffer(fb);
rdev->fbdev_rfb = rfb;
- rdev->fbdev_robj = robj;
+ rdev->fbdev_rbo = rbo;
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
if (info == NULL) {
@@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev,
if (ret)
goto out_unref;
- if (fb_tiled)
- radeon_object_check_tiling(robj, 0, 0);
-
- ret = radeon_object_kmap(robj, &fbptr);
- if (ret) {
- goto out_unref;
- }
-
- memset_io(fbptr, 0, aligned_size);
+ memset_io(fbptr, 0xff, aligned_size);
strcpy(info->fix.id, "radeondrmfb");
@@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev,
return 0;
out_unref:
- if (robj) {
- radeon_object_kunmap(robj);
+ if (rbo) {
+ ret = radeon_bo_reserve(rbo, false);
+ if (likely(ret == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unreserve(rbo);
+ }
}
if (fb && ret) {
list_del(&fb->filp_head);
@@ -321,14 +331,22 @@ int radeon_parse_options(char *options)
int radeonfb_probe(struct drm_device *dev)
{
- return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
+ struct radeon_device *rdev = dev->dev_private;
+ int bpp_sel = 32;
+
+ /* select 8 bpp console on RN50 or 16MB cards */
+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 8;
+
+ return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
}
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
{
struct fb_info *info;
struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
- struct radeon_object *robj;
+ struct radeon_bo *rbo;
+ int r;
if (!fb) {
return -EINVAL;
@@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
info = fb->fbdev;
if (info) {
struct radeon_fb_device *rfbdev = info->par;
- robj = rfb->obj->driver_private;
+ rbo = rfb->obj->driver_private;
unregister_framebuffer(info);
- radeon_object_kunmap(robj);
- radeon_object_unpin(robj);
+ r = radeon_bo_reserve(rbo, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
drm_fb_helper_free(&rfbdev->helper);
framebuffer_release(info);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 3beb26d7471..cb4cd97ae39 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -168,37 +168,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return signaled;
}
-int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy)
-{
- struct radeon_device *rdev;
- int ret = 0;
-
- rdev = fence->rdev;
-
- __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-
- while (1) {
- if (radeon_fence_signaled(fence))
- break;
-
- if (time_after_eq(jiffies, fence->timeout)) {
- ret = -EBUSY;
- break;
- }
-
- if (lazy)
- schedule_timeout(1);
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- }
- __set_current_state(TASK_RUNNING);
- return ret;
-}
-
-
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
@@ -216,13 +185,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return 0;
}
- if (rdev->family >= CHIP_R600) {
- r = r600_fence_wait(fence, intr, 0);
- if (r == -ERESTARTSYS)
- return -EBUSY;
- return r;
- }
-
retry:
cur_jiffies = jiffies;
timeout = HZ / 100;
@@ -231,14 +193,17 @@ retry:
}
if (intr) {
+ radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
- if (unlikely(r == -ERESTARTSYS)) {
- return -EBUSY;
- }
+ radeon_irq_kms_sw_irq_put(rdev);
+ if (unlikely(r < 0))
+ return r;
} else {
+ radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
+ radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
if (unlikely(r == 0)) {
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
index 90187d17384..3d4d84e078a 100644
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ b/drivers/gpu/drm/radeon/radeon_fixed.h
@@ -38,6 +38,23 @@ typedef union rfixed {
#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
#define rfixed_trunc(A) ((A).full >> 12)
+static inline u32 rfixed_floor(fixed20_12 A)
+{
+ u32 non_frac = rfixed_trunc(A);
+
+ return rfixed_const(non_frac);
+}
+
+static inline u32 rfixed_ceil(fixed20_12 A)
+{
+ u32 non_frac = rfixed_trunc(A);
+
+ if (A.full > rfixed_const(non_frac))
+ return rfixed_const(non_frac + 1);
+ else
+ return rfixed_const(non_frac);
+}
+
static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
{
u64 tmp = ((u64)A.full << 13);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a68d7566178..e73d56e83fa 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj == NULL) {
- r = radeon_object_create(rdev, NULL,
- rdev->gart.table_size,
- true,
- RADEON_GEM_DOMAIN_VRAM,
- false, &rdev->gart.table.vram.robj);
+ r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
+ true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->gart.table.vram.robj);
if (r) {
return r;
}
@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
uint64_t gpu_addr;
int r;
- r = radeon_object_pin(rdev->gart.table.vram.robj,
- RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
- if (r) {
- radeon_object_unref(&rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (unlikely(r != 0))
return r;
- }
- r = radeon_object_kmap(rdev->gart.table.vram.robj,
- (void **)&rdev->gart.table.vram.ptr);
+ r = radeon_bo_pin(rdev->gart.table.vram.robj,
+ RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
- radeon_object_unpin(rdev->gart.table.vram.robj);
- radeon_object_unref(&rdev->gart.table.vram.robj);
- DRM_ERROR("radeon: failed to map gart vram table.\n");
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
return r;
}
+ r = radeon_bo_kmap(rdev->gart.table.vram.robj,
+ (void **)&rdev->gart.table.vram.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
rdev->gart.table_addr = gpu_addr;
- return 0;
+ return r;
}
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
+ int r;
+
if (rdev->gart.table.vram.robj == NULL) {
return;
}
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
- radeon_object_unref(&rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ radeon_bo_unref(&rdev->gart.table.vram.robj);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d880edf254d..2944486871b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
- struct radeon_object *robj = gobj->driver_private;
+ struct radeon_bo *robj = gobj->driver_private;
gobj->driver_private = NULL;
if (robj) {
- radeon_object_unref(&robj);
+ radeon_bo_unref(&robj);
}
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
- int alignment, int initial_domain,
- bool discardable, bool kernel,
- bool interruptible,
- struct drm_gem_object **obj)
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct drm_gem_object **obj)
{
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
*obj = NULL;
@@ -65,8 +64,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
- interruptible, &robj);
+ r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
if (r) {
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
size, initial_domain, alignment);
@@ -83,33 +81,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
- struct radeon_object *robj = obj->driver_private;
- uint32_t flags;
+ struct radeon_bo *robj = obj->driver_private;
+ int r;
- switch (pin_domain) {
- case RADEON_GEM_DOMAIN_VRAM:
- flags = TTM_PL_FLAG_VRAM;
- break;
- case RADEON_GEM_DOMAIN_GTT:
- flags = TTM_PL_FLAG_TT;
- break;
- default:
- flags = TTM_PL_FLAG_SYSTEM;
- break;
- }
- return radeon_object_pin(robj, flags, gpu_addr);
+ r = radeon_bo_reserve(robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(robj, pin_domain, gpu_addr);
+ radeon_bo_unreserve(robj);
+ return r;
}
void radeon_gem_object_unpin(struct drm_gem_object *obj)
{
- struct radeon_object *robj = obj->driver_private;
- radeon_object_unpin(robj);
+ struct radeon_bo *robj = obj->driver_private;
+ int r;
+
+ r = radeon_bo_reserve(robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
}
int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
- struct radeon_object *robj;
+ struct radeon_bo *robj;
uint32_t domain;
int r;
@@ -127,11 +125,12 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = radeon_object_wait(robj);
+ r = radeon_bo_wait(robj, NULL, false);
if (r) {
printk(KERN_ERR "Failed to wait for object !\n");
return r;
}
+ radeon_hdp_flush(robj->rdev);
}
return 0;
}
@@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev)
void radeon_gem_fini(struct radeon_device *rdev)
{
- radeon_object_force_delete(rdev);
+ radeon_bo_force_delete(rdev);
}
@@ -158,9 +157,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct drm_radeon_gem_info *args = data;
args->vram_size = rdev->mc.real_vram_size;
- /* FIXME: report somethings that makes sense */
- args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
- args->gart_size = rdev->mc.gtt_size;
+ args->vram_visible = rdev->mc.real_vram_size;
+ if (rdev->stollen_vga_memory)
+ args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+ if (rdev->fbdev_rbo)
+ args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+ args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
+ RADEON_IB_POOL_SIZE*64*1024;
return 0;
}
@@ -192,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
- args->initial_domain, false,
- false, true, &gobj);
+ args->initial_domain, false,
+ false, &gobj);
if (r) {
return r;
}
@@ -218,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* just validate the BO into a certain domain */
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@@ -244,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_mmap *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
- int r;
+ struct radeon_bo *robj;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_mmap(robj, &args->addr_ptr);
+ args->addr_ptr = radeon_bo_mmap_offset(robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
- return r;
+ return 0;
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -264,16 +266,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
- uint32_t cur_placement;
+ uint32_t cur_placement = 0;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_busy_domain(robj, &cur_placement);
+ r = radeon_bo_wait(robj, &cur_placement, true);
switch (cur_placement) {
case TTM_PL_VRAM:
args->domain = RADEON_GEM_DOMAIN_VRAM;
@@ -297,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
@@ -305,10 +307,11 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_wait(robj);
+ r = radeon_bo_wait(robj, NULL, false);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
+ radeon_hdp_flush(robj->rdev);
return r;
}
@@ -317,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_set_tiling *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r = 0;
DRM_DEBUG("%d \n", args->handle);
@@ -325,7 +328,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL)
return -EINVAL;
robj = gobj->driver_private;
- radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+ r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
@@ -337,16 +340,19 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_get_tiling *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *rbo;
int r = 0;
DRM_DEBUG("\n");
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -EINVAL;
- robj = gobj->driver_private;
- radeon_object_get_tiling_flags(robj, &args->tiling_flags,
- &args->pitch);
+ rbo = gobj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
+ radeon_bo_unreserve(rbo);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index dd438d32e5c..da3da1e89d0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,35 +59,43 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
+void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
- struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
- struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
/* RV410 appears to have a bug where the hw i2c in reset
* holds the i2c port in a bad state - switch hw i2c away before
* doing DDC - do this for all r200s/r300s/r400s for safety sake
*/
- if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
- if (rec->a_clk_reg == RADEON_GPIO_MONID) {
- WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
- R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
- } else {
- WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
- R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+ if (rec->hw_capable) {
+ if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+ if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+ WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+ R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+ } else {
+ WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+ R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+ }
}
}
- if (lock_state) {
- temp = RREG32(rec->a_clk_reg);
- temp &= ~(rec->a_clk_mask);
- WREG32(rec->a_clk_reg, temp);
-
- temp = RREG32(rec->a_data_reg);
- temp &= ~(rec->a_data_mask);
- WREG32(rec->a_data_reg, temp);
- }
+ /* clear the output pin values */
+ temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+ WREG32(rec->a_clk_reg, temp);
+
+ temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+ WREG32(rec->a_data_reg, temp);
+
+ /* set the pins to input */
+ temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, temp);
+
+ temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+ WREG32(rec->en_data_reg, temp);
+
+ /* mask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg);
if (lock_state)
temp |= rec->mask_clk_mask;
@@ -112,8 +120,9 @@ static int get_clock(void *i2c_priv)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->get_clk_reg);
- val &= rec->get_clk_mask;
+ /* read the value off the pin */
+ val = RREG32(rec->y_clk_reg);
+ val &= rec->y_clk_mask;
return (val != 0);
}
@@ -126,8 +135,10 @@ static int get_data(void *i2c_priv)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->get_data_reg);
- val &= rec->get_data_mask;
+ /* read the value off the pin */
+ val = RREG32(rec->y_data_reg);
+ val &= rec->y_data_mask;
+
return (val != 0);
}
@@ -138,9 +149,10 @@ static void set_clock(void *i2c_priv, int clock)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
- val |= clock ? 0 : rec->put_clk_mask;
- WREG32(rec->put_clk_reg, val);
+ /* set pin direction */
+ val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+ val |= clock ? 0 : rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, val);
}
static void set_data(void *i2c_priv, int data)
@@ -150,14 +162,15 @@ static void set_data(void *i2c_priv, int data)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
- val |= data ? 0 : rec->put_data_mask;
- WREG32(rec->put_data_reg, val);
+ /* set pin direction */
+ val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+ val |= data ? 0 : rec->en_data_mask;
+ WREG32(rec->en_data_reg, val);
}
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
- struct radeon_i2c_bus_rec *rec,
- const char *name)
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
{
struct radeon_i2c_chan *i2c;
int ret;
@@ -167,20 +180,19 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
return NULL;
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.algo_data = &i2c->algo;
i2c->dev = dev;
- i2c->algo.setsda = set_data;
- i2c->algo.setscl = set_clock;
- i2c->algo.getsda = get_data;
- i2c->algo.getscl = get_clock;
- i2c->algo.udelay = 20;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ i2c->adapter.algo_data = &i2c->algo.bit;
+ i2c->algo.bit.setsda = set_data;
+ i2c->algo.bit.setscl = set_clock;
+ i2c->algo.bit.getsda = get_data;
+ i2c->algo.bit.getscl = get_clock;
+ i2c->algo.bit.udelay = 20;
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
* make this, 2 jiffies is a lot more reliable */
- i2c->algo.timeout = 2;
- i2c->algo.data = i2c;
+ i2c->algo.bit.timeout = 2;
+ i2c->algo.bit.data = i2c;
i2c->rec = *rec;
- i2c_set_adapdata(&i2c->adapter, i2c);
-
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
DRM_INFO("Failed to register i2c %s\n", name);
@@ -194,6 +206,38 @@ out_free:
}
+struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
+{
+ struct radeon_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+ if (i2c == NULL)
+ return NULL;
+
+ i2c->rec = *rec;
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ i2c->adapter.algo_data = &i2c->algo.dp;
+ i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
+ i2c->algo.dp.address = 0;
+ ret = i2c_dp_aux_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_INFO("Failed to register i2c %s\n", name);
+ goto out_free;
+ }
+
+ return i2c;
+out_free:
+ kfree(i2c);
+ return NULL;
+
+}
+
+
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
@@ -207,3 +251,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
{
return NULL;
}
+
+void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val)
+{
+ u8 out_buf[2];
+ u8 in_buf[2];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
+ *val = in_buf[0];
+ DRM_DEBUG("val = 0x%02x\n", *val);
+ } else {
+ DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+ addr, *val);
+ }
+}
+
+void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 val)
+{
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = val;
+
+ if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
+ DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+ addr, val);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a0fe6232dcb..9223296fe37 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -39,11 +39,32 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
return radeon_irq_process(rdev);
}
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void radeon_hotplug_work_func(struct work_struct *work)
+{
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ hotplug_work);
+ struct drm_device *dev = rdev->ddev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
+ }
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_sysfs_hotplug_event(dev);
+}
+
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
unsigned i;
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
for (i = 0; i < 2; i++) {
@@ -87,17 +108,25 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_SINGLE_CRTC)
num_crtc = 1;
-
+ spin_lock_init(&rdev->irq.sw_lock);
r = drm_vblank_init(rdev->ddev, num_crtc);
if (r) {
return r;
}
/* enable msi */
rdev->msi_enabled = 0;
- if (rdev->family >= CHIP_RV380) {
+ /* MSIs don't seem to work on my rs780;
+ * not sure about rs880 or other rs780s.
+ * Needs more investigation.
+ */
+ if ((rdev->family >= CHIP_RV380) &&
+ (rdev->family != CHIP_RS780) &&
+ (rdev->family != CHIP_RS880)) {
int ret = pci_enable_msi(rdev->pdev);
- if (!ret)
+ if (!ret) {
rdev->msi_enabled = 1;
+ DRM_INFO("radeon: using MSI.\n");
+ }
}
drm_irq_install(rdev->ddev);
rdev->irq.installed = true;
@@ -114,3 +143,29 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
pci_disable_msi(rdev->pdev);
}
}
+
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+ if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
+ rdev->irq.sw_int = true;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
+ rdev->irq.sw_int = false;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index ba128621057..f23b05606eb 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,10 +30,19 @@
#include "radeon.h"
#include "radeon_drm.h"
+int radeon_driver_unload_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (rdev == NULL)
+ return 0;
+ radeon_modeset_fini(rdev);
+ radeon_device_fini(rdev);
+ kfree(rdev);
+ dev->dev_private = NULL;
+ return 0;
+}
-/*
- * Driver load/unload
- */
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
struct radeon_device *rdev;
@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
*/
r = radeon_device_init(rdev, dev, dev->pdev, flags);
if (r) {
- DRM_ERROR("Fatal error while trying to initialize radeon.\n");
- return r;
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ goto out;
}
/* Again modeset_init should fail only on fatal error
* otherwise it should provide enough functionalities
* for shadowfb to run
*/
r = radeon_modeset_init(rdev);
- if (r) {
- return r;
- }
- return 0;
-}
-
-int radeon_driver_unload_kms(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
-
- if (rdev == NULL)
- return 0;
- radeon_modeset_fini(rdev);
- radeon_device_fini(rdev);
- kfree(rdev);
- dev->dev_private = NULL;
- return 0;
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+out:
+ if (r)
+ radeon_driver_unload_kms(dev);
+ return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 8d0b7aa87fa..b82ede98e15 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -30,6 +30,18 @@
#include "radeon.h"
#include "atom.h"
+static void radeon_overscan_setup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
+ WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
+ WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
+}
+
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -292,8 +304,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
uint32_t mask;
if (radeon_crtc->crtc_id)
- mask = (RADEON_CRTC2_EN |
- RADEON_CRTC2_DISP_DIS |
+ mask = (RADEON_CRTC2_DISP_DIS |
RADEON_CRTC2_VSYNC_DIS |
RADEON_CRTC2_HSYNC_DIS |
RADEON_CRTC2_DISP_REQ_EN_B);
@@ -305,7 +316,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
if (radeon_crtc->crtc_id)
- WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
@@ -319,7 +330,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
- WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
@@ -400,14 +411,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
uint64_t base;
uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
uint32_t crtc_pitch, pitch_pixels;
uint32_t tiling_flags;
int format;
uint32_t gen_cntl_reg, gen_cntl_val;
+ int r;
DRM_DEBUG("\n");
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
radeon_fb = to_radeon_framebuffer(crtc->fb);
@@ -431,10 +449,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return false;
}
+ /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
return -EINVAL;
}
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+ if (tiling_flags & RADEON_TILING_MICRO)
+ DRM_ERROR("trying to scanout microtiled buffer\n");
+
/* if scanout was in GTT this really wouldn't work */
/* crtc offset is from display base addr not FB location */
radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
@@ -449,10 +479,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
(crtc->fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
- radeon_object_get_tiling_flags(obj->driver_private,
- &tiling_flags, NULL);
- if (tiling_flags & RADEON_TILING_MICRO)
- DRM_ERROR("trying to scanout microtiled buffer\n");
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev))
@@ -530,7 +556,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
- radeon_gem_object_unpin(radeon_fb->obj);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
}
/* Bytes per pixel may have changed */
@@ -642,12 +673,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc2_gen_cntl;
uint32_t disp2_merge_cntl;
- /* check to see if TV DAC is enabled for another crtc and keep it enabled */
- if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
- crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
- else
- crtc2_gen_cntl = 0;
-
+ /* if TV DAC is enabled for another crtc and keep it enabled */
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
crtc2_gen_cntl |= ((format << 8)
| RADEON_CRTC2_VSYNC_DIS
| RADEON_CRTC2_HSYNC_DIS
@@ -676,7 +703,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc_ext_cntl;
uint32_t disp_merge_cntl;
- crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
+ crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
+ crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
| (format << 8)
| RADEON_CRTC_DISP_REQ_EN_B
| ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -779,15 +807,17 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
- if (lvds) {
- if (lvds->use_bios_dividers) {
- pll_ref_div = lvds->panel_ref_divider;
- pll_fb_post_div = (lvds->panel_fb_divider |
- (lvds->panel_post_divider << 16));
- htotal_cntl = 0;
- use_bios_divs = true;
+ if (!rdev->is_atom_bios) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+ if (lvds) {
+ if (lvds->use_bios_dividers) {
+ pll_ref_div = lvds->panel_ref_divider;
+ pll_fb_post_div = (lvds->panel_fb_divider |
+ (lvds->panel_post_divider << 16));
+ htotal_cntl = 0;
+ use_bios_divs = true;
+ }
}
}
pll_flags |= RADEON_PLL_USE_REF_DIV;
@@ -1027,6 +1057,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_set_crtc_timing(crtc, adjusted_mode);
radeon_set_pll(crtc, adjusted_mode);
+ radeon_overscan_setup(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0) {
radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
} else {
@@ -1042,12 +1073,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
- radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtci;
+
+ /*
+ * The hardware wedges sometimes if you reconfigure one CRTC
+ * whilst another is running (see fdo bug #24611).
+ */
+ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
+ radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
}
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
- radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtci;
+
+ /*
+ * Reenable the CRTCs that should be running.
+ */
+ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
+ if (crtci->enabled)
+ radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+ }
}
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 00382122869..df00515e81f 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -136,7 +136,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
- if ((!rdev->is_atom_bios)) {
+ if (rdev->is_atom_bios) {
+ /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
+ * need to call that on resume to set up the reg properly.
+ */
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ } else {
struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
if (lvds) {
DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
@@ -147,8 +154,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
(lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
} else
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
- } else
- lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ }
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
lvds_gen_cntl &= ~(RADEON_LVDS_ON |
RADEON_LVDS_BLON |
@@ -184,9 +190,9 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
-static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -194,15 +200,22 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (radeon_encoder->rmx_type != RMX_OFF)
- radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ int mode_id = adjusted_mode->base.id;
+ *adjusted_mode = *native_mode;
+ adjusted_mode->hdisplay = mode->hdisplay;
+ adjusted_mode->vdisplay = mode->vdisplay;
+ adjusted_mode->base.id = mode_id;
+ }
return true;
}
static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.dpms = radeon_legacy_lvds_dpms,
- .mode_fixup = radeon_legacy_lvds_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_lvds_prepare,
.mode_set = radeon_legacy_lvds_mode_set,
.commit = radeon_legacy_lvds_commit,
@@ -214,17 +227,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -410,7 +412,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
.dpms = radeon_legacy_primary_dac_dpms,
- .mode_fixup = radeon_legacy_primary_dac_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_primary_dac_prepare,
.mode_set = radeon_legacy_primary_dac_mode_set,
.commit = radeon_legacy_primary_dac_commit,
@@ -423,16 +425,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -584,7 +576,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
.dpms = radeon_legacy_tmds_int_dpms,
- .mode_fixup = radeon_legacy_tmds_int_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tmds_int_prepare,
.mode_set = radeon_legacy_tmds_int_mode_set,
.commit = radeon_legacy_tmds_int_commit,
@@ -596,17 +588,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -697,6 +678,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
/*if (mode->clock > 165000)
fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
}
+ if (!radeon_combios_external_tmds_setup(encoder))
+ radeon_external_tmds_setup(encoder);
}
if (radeon_crtc->crtc_id == 0) {
@@ -724,9 +707,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
+static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+ if (tmds) {
+ if (tmds->i2c_bus)
+ radeon_i2c_destroy(tmds->i2c_bus);
+ }
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
+
static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
.dpms = radeon_legacy_tmds_ext_dpms,
- .mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tmds_ext_prepare,
.mode_set = radeon_legacy_tmds_ext_mode_set,
.commit = radeon_legacy_tmds_ext_commit,
@@ -735,20 +731,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs
static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
- .destroy = radeon_enc_destroy,
+ .destroy = radeon_ext_tmds_enc_destroy,
};
-static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -1265,7 +1250,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
.dpms = radeon_legacy_tv_dac_dpms,
- .mode_fixup = radeon_legacy_tv_dac_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tv_dac_prepare,
.mode_set = radeon_legacy_tv_dac_mode_set,
.commit = radeon_legacy_tv_dac_commit,
@@ -1302,6 +1287,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
return tmds;
}
+static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_ext_tmds *tmds = NULL;
+ bool ret;
+
+ if (rdev->is_atom_bios)
+ return NULL;
+
+ tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
+
+ if (!tmds)
+ return NULL;
+
+ ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
+
+ if (ret == false)
+ radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
+
+ return tmds;
+}
+
void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
{
@@ -1329,7 +1337,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
encoder->possible_crtcs = 0x1;
else
encoder->possible_crtcs = 0x3;
- encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL;
@@ -1373,7 +1380,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
if (!rdev->is_atom_bios)
- radeon_combios_get_ext_tmds_info(radeon_encoder);
+ radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ace726aa0d7..44d4b652ea1 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -33,6 +33,7 @@
#include <drm_crtc.h>
#include <drm_mode.h>
#include <drm_edid.h>
+#include <drm_dp_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
@@ -89,24 +90,45 @@ enum radeon_tv_std {
TV_STD_PAL_CN,
};
+/* radeon gpio-based i2c
+ * 1. "mask" reg and bits
+ * grabs the gpio pins for software use
+ * 0=not held 1=held
+ * 2. "a" reg and bits
+ * output pin value
+ * 0=low 1=high
+ * 3. "en" reg and bits
+ * sets the pin direction
+ * 0=input 1=output
+ * 4. "y" reg and bits
+ * input pin value
+ * 0=low 1=high
+ */
struct radeon_i2c_bus_rec {
bool valid;
+ /* id used by atom */
+ uint8_t i2c_id;
+ /* can be used with hw i2c engine */
+ bool hw_capable;
+ /* uses multi-media i2c engine */
+ bool mm_i2c;
+ /* regs and bits */
uint32_t mask_clk_reg;
uint32_t mask_data_reg;
uint32_t a_clk_reg;
uint32_t a_data_reg;
- uint32_t put_clk_reg;
- uint32_t put_data_reg;
- uint32_t get_clk_reg;
- uint32_t get_data_reg;
+ uint32_t en_clk_reg;
+ uint32_t en_data_reg;
+ uint32_t y_clk_reg;
+ uint32_t y_data_reg;
uint32_t mask_clk_mask;
uint32_t mask_data_mask;
- uint32_t put_clk_mask;
- uint32_t put_data_mask;
- uint32_t get_clk_mask;
- uint32_t get_data_mask;
uint32_t a_clk_mask;
uint32_t a_data_mask;
+ uint32_t en_clk_mask;
+ uint32_t en_data_mask;
+ uint32_t y_clk_mask;
+ uint32_t y_data_mask;
};
struct radeon_tmds_pll {
@@ -150,9 +172,12 @@ struct radeon_pll {
};
struct radeon_i2c_chan {
- struct drm_device *dev;
struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
+ struct drm_device *dev;
+ union {
+ struct i2c_algo_dp_aux_data dp;
+ struct i2c_algo_bit_data bit;
+ } algo;
struct radeon_i2c_bus_rec rec;
};
@@ -170,6 +195,11 @@ enum radeon_connector_table {
CT_EMAC,
};
+enum radeon_dvo_chip {
+ DVO_SIL164,
+ DVO_SIL1178,
+};
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
@@ -261,6 +291,13 @@ struct radeon_encoder_int_tmds {
struct radeon_tmds_pll tmds_pll[4];
};
+struct radeon_encoder_ext_tmds {
+ /* tmds over dvo */
+ struct radeon_i2c_chan *i2c_bus;
+ uint8_t slave_addr;
+ enum radeon_dvo_chip dvo_chip;
+};
+
/* spread spectrum */
struct radeon_atom_ss {
uint16_t percentage;
@@ -302,6 +339,35 @@ struct radeon_encoder {
struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
bool linkb;
+ /* displayport */
+ struct radeon_i2c_chan *dp_i2c_bus;
+ u8 dpcd[8];
+ u8 dp_sink_type;
+ int dp_clock;
+ int dp_lane_count;
+};
+
+struct radeon_gpio_rec {
+ bool valid;
+ u8 id;
+ u32 reg;
+ u32 mask;
+};
+
+enum radeon_hpd_id {
+ RADEON_HPD_NONE = 0,
+ RADEON_HPD_1,
+ RADEON_HPD_2,
+ RADEON_HPD_3,
+ RADEON_HPD_4,
+ RADEON_HPD_5,
+ RADEON_HPD_6,
+};
+
+struct radeon_hpd {
+ enum radeon_hpd_id hpd;
+ u8 plugged_state;
+ struct radeon_gpio_rec gpio;
};
struct radeon_connector {
@@ -318,6 +384,7 @@ struct radeon_connector {
void *con_priv;
bool dac_load_detect;
uint16_t connector_object_id;
+ struct radeon_hpd hpd;
};
struct radeon_framebuffer {
@@ -325,10 +392,37 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
+extern void radeon_connector_hotplug(struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
+extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
+ struct drm_display_mode *mode);
+extern void radeon_dp_set_link_config(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+extern void dp_link_train(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+ int action, uint8_t lane_num,
+ uint8_t lane_set);
+extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte);
+
+extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name);
extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val);
+extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
+ u8 slave_addr,
+ u8 addr,
+ u8 val);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
@@ -343,12 +437,24 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *post_div_p,
int flags);
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p,
+ int flags);
+
+extern void radeon_setup_encoder_clones(struct drm_device *dev);
+
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
+extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
@@ -378,12 +484,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
-bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds);
extern struct radeon_encoder_primary_dac *
radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_tv_dac *
@@ -395,6 +505,8 @@ extern struct radeon_encoder_tv_dac *
radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_primary_dac *
radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
@@ -426,16 +538,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
void radeon_legacy_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
+extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
void radeon_get_clock_info(struct drm_device *dev);
extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f056dadc5c..2040937682f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,74 +34,32 @@
#include "radeon_drm.h"
#include "radeon.h"
-struct radeon_object {
- struct ttm_buffer_object tobj;
- struct list_head list;
- struct radeon_device *rdev;
- struct drm_gem_object *gobj;
- struct ttm_bo_kmap_obj kmap;
- unsigned pin_count;
- uint64_t gpu_addr;
- void *kptr;
- bool is_iomem;
- uint32_t tiling_flags;
- uint32_t pitch;
- int surface_reg;
-};
int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
/*
* To exclude mutual BO access we rely on bo_reserve exclusion, as all
* function are calling it.
*/
-static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
- return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
-}
-
-static void radeon_object_unreserve(struct radeon_object *robj)
-{
- ttm_bo_unreserve(&robj->tobj);
-}
-
-static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
-{
- struct radeon_object *robj;
+ struct radeon_bo *bo;
- robj = container_of(tobj, struct radeon_object, tobj);
- list_del_init(&robj->list);
- radeon_object_clear_surface_reg(robj);
- kfree(robj);
+ bo = container_of(tbo, struct radeon_bo, tbo);
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&bo->rdev->gem.mutex);
+ radeon_bo_clear_surface_reg(bo);
+ kfree(bo);
}
-static inline void radeon_object_gpu_addr(struct radeon_object *robj)
+static inline u32 radeon_ttm_flags_from_domain(u32 domain)
{
- /* Default gpu address */
- robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
- if (robj->tobj.mem.mm_node == NULL) {
- return;
- }
- robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
- switch (robj->tobj.mem.mem_type) {
- case TTM_PL_VRAM:
- robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
- break;
- case TTM_PL_TT:
- robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
- robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
- return;
- }
-}
+ u32 flags = 0;
-static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
-{
- uint32_t flags = 0;
if (domain & RADEON_GEM_DOMAIN_VRAM) {
flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
}
@@ -117,17 +75,32 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
return flags;
}
-int radeon_object_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj,
- unsigned long size,
- bool kernel,
- uint32_t domain,
- bool interruptible,
- struct radeon_object **robj_ptr)
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
+{
+ u32 c = 0;
+
+ rbo->placement.fpfn = 0;
+ rbo->placement.lpfn = 0;
+ rbo->placement.placement = rbo->placements;
+ rbo->placement.busy_placement = rbo->placements;
+ if (domain & RADEON_GEM_DOMAIN_VRAM)
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+ if (domain & RADEON_GEM_DOMAIN_GTT)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ if (domain & RADEON_GEM_DOMAIN_CPU)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ rbo->placement.num_placement = c;
+ rbo->placement.num_busy_placement = c;
+}
+
+int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+ unsigned long size, bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr)
{
- struct radeon_object *robj;
+ struct radeon_bo *bo;
enum ttm_bo_type type;
- uint32_t flags;
+ u32 flags;
int r;
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -138,206 +111,125 @@ int radeon_object_create(struct radeon_device *rdev,
} else {
type = ttm_bo_type_device;
}
- *robj_ptr = NULL;
- robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
- if (robj == NULL) {
+ *bo_ptr = NULL;
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
return -ENOMEM;
- }
- robj->rdev = rdev;
- robj->gobj = gobj;
- robj->surface_reg = -1;
- INIT_LIST_HEAD(&robj->list);
-
- flags = radeon_object_flags_from_domain(domain);
- r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
- 0, 0, false, NULL, size,
- &radeon_ttm_object_object_destroy);
+ bo->rdev = rdev;
+ bo->gobj = gobj;
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
+
+ flags = radeon_ttm_flags_from_domain(domain);
+ /* Kernel allocation are uninterruptible */
+ r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
+ flags, 0, 0, !kernel, NULL, size,
+ &radeon_ttm_bo_destroy);
if (unlikely(r != 0)) {
- /* ttm call radeon_ttm_object_object_destroy if error happen */
- DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
- size, flags, 0);
+ if (r != -ERESTARTSYS)
+ dev_err(rdev->dev,
+ "object_init failed for (%ld, 0x%08X)\n",
+ size, flags);
return r;
}
- *robj_ptr = robj;
+ *bo_ptr = bo;
if (gobj) {
- list_add_tail(&robj->list, &rdev->gem.objects);
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_add_tail(&bo->list, &rdev->gem.objects);
+ mutex_unlock(&bo->rdev->gem.mutex);
}
return 0;
}
-int radeon_object_kmap(struct radeon_object *robj, void **ptr)
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
{
+ bool is_iomem;
int r;
- spin_lock(&robj->tobj.lock);
- if (robj->kptr) {
+ if (bo->kptr) {
if (ptr) {
- *ptr = robj->kptr;
+ *ptr = bo->kptr;
}
- spin_unlock(&robj->tobj.lock);
return 0;
}
- spin_unlock(&robj->tobj.lock);
- r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
if (r) {
return r;
}
- spin_lock(&robj->tobj.lock);
- robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
- spin_unlock(&robj->tobj.lock);
+ bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr) {
- *ptr = robj->kptr;
+ *ptr = bo->kptr;
}
- radeon_object_check_tiling(robj, 0, 0);
+ radeon_bo_check_tiling(bo, 0, 0);
return 0;
}
-void radeon_object_kunmap(struct radeon_object *robj)
+void radeon_bo_kunmap(struct radeon_bo *bo)
{
- spin_lock(&robj->tobj.lock);
- if (robj->kptr == NULL) {
- spin_unlock(&robj->tobj.lock);
+ if (bo->kptr == NULL)
return;
- }
- robj->kptr = NULL;
- spin_unlock(&robj->tobj.lock);
- radeon_object_check_tiling(robj, 0, 0);
- ttm_bo_kunmap(&robj->kmap);
+ bo->kptr = NULL;
+ radeon_bo_check_tiling(bo, 0, 0);
+ ttm_bo_kunmap(&bo->kmap);
}
-void radeon_object_unref(struct radeon_object **robj)
+void radeon_bo_unref(struct radeon_bo **bo)
{
- struct ttm_buffer_object *tobj;
+ struct ttm_buffer_object *tbo;
- if ((*robj) == NULL) {
+ if ((*bo) == NULL)
return;
- }
- tobj = &((*robj)->tobj);
- ttm_bo_unref(&tobj);
- if (tobj == NULL) {
- *robj = NULL;
- }
-}
-
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
-{
- *offset = robj->tobj.addr_space_offset;
- return 0;
+ tbo = &((*bo)->tbo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
}
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
- uint64_t *gpu_addr)
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
- uint32_t flags;
- uint32_t tmp;
- int r;
+ int r, i;
- flags = radeon_object_flags_from_domain(domain);
- spin_lock(&robj->tobj.lock);
- if (robj->pin_count) {
- robj->pin_count++;
- if (gpu_addr != NULL) {
- *gpu_addr = robj->gpu_addr;
- }
- spin_unlock(&robj->tobj.lock);
+ radeon_ttm_placement_from_domain(bo, domain);
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
return 0;
}
- spin_unlock(&robj->tobj.lock);
- r = radeon_object_reserve(robj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
- return r;
- }
- tmp = robj->tobj.mem.placement;
- ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
- robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- false, false);
- radeon_object_gpu_addr(robj);
- if (gpu_addr != NULL) {
- *gpu_addr = robj->gpu_addr;
- }
- robj->pin_count = 1;
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to pin object.\n");
- }
- radeon_object_unreserve(robj);
+ radeon_ttm_placement_from_domain(bo, domain);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
+ if (likely(r == 0)) {
+ bo->pin_count = 1;
+ if (gpu_addr != NULL)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
+ }
+ if (unlikely(r != 0))
+ dev_err(bo->rdev->dev, "%p pin failed\n", bo);
return r;
}
-void radeon_object_unpin(struct radeon_object *robj)
+int radeon_bo_unpin(struct radeon_bo *bo)
{
- uint32_t flags;
- int r;
+ int r, i;
- spin_lock(&robj->tobj.lock);
- if (!robj->pin_count) {
- spin_unlock(&robj->tobj.lock);
- printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
- return;
- }
- robj->pin_count--;
- if (robj->pin_count) {
- spin_unlock(&robj->tobj.lock);
- return;
- }
- spin_unlock(&robj->tobj.lock);
- r = radeon_object_reserve(robj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
- return;
- }
- flags = robj->tobj.mem.placement;
- robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- false, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to unpin buffer.\n");
- }
- radeon_object_unreserve(robj);
-}
-
-int radeon_object_wait(struct radeon_object *robj)
-{
- int r = 0;
-
- /* FIXME: should use block reservation instead */
- r = radeon_object_reserve(robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for waiting.\n");
- return r;
- }
- spin_lock(&robj->tobj.lock);
- if (robj->tobj.sync_obj) {
- r = ttm_bo_wait(&robj->tobj, true, true, false);
- }
- spin_unlock(&robj->tobj.lock);
- radeon_object_unreserve(robj);
- return r;
-}
-
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
-{
- int r = 0;
-
- r = radeon_object_reserve(robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for waiting.\n");
- return r;
- }
- spin_lock(&robj->tobj.lock);
- *cur_placement = robj->tobj.mem.mem_type;
- if (robj->tobj.sync_obj) {
- r = ttm_bo_wait(&robj->tobj, true, true, true);
+ if (!bo->pin_count) {
+ dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+ return 0;
}
- spin_unlock(&robj->tobj.lock);
- radeon_object_unreserve(robj);
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r != 0))
+ dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
}
-int radeon_object_evict_vram(struct radeon_device *rdev)
+int radeon_bo_evict_vram(struct radeon_device *rdev)
{
if (rdev->flags & RADEON_IS_IGP) {
/* Useless to evict on IGP chips */
@@ -346,30 +238,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev)
return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}
-void radeon_object_force_delete(struct radeon_device *rdev)
+void radeon_bo_force_delete(struct radeon_device *rdev)
{
- struct radeon_object *robj, *n;
+ struct radeon_bo *bo, *n;
struct drm_gem_object *gobj;
if (list_empty(&rdev->gem.objects)) {
return;
}
- DRM_ERROR("Userspace still has active objects !\n");
- list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
+ dev_err(rdev->dev, "Userspace still has active objects !\n");
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
mutex_lock(&rdev->ddev->struct_mutex);
- gobj = robj->gobj;
- DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
- gobj, robj, (unsigned long)gobj->size,
- *((unsigned long *)&gobj->refcount));
- list_del_init(&robj->list);
- radeon_object_unref(&robj);
+ gobj = bo->gobj;
+ dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+ gobj, bo, (unsigned long)gobj->size,
+ *((unsigned long *)&gobj->refcount));
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&bo->rdev->gem.mutex);
+ radeon_bo_unref(&bo);
gobj->driver_private = NULL;
drm_gem_object_unreference(gobj);
mutex_unlock(&rdev->ddev->struct_mutex);
}
}
-int radeon_object_init(struct radeon_device *rdev)
+int radeon_bo_init(struct radeon_device *rdev)
{
/* Add an MTRR for the VRAM */
rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
@@ -382,13 +276,13 @@ int radeon_object_init(struct radeon_device *rdev)
return radeon_ttm_init(rdev);
}
-void radeon_object_fini(struct radeon_device *rdev)
+void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
}
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
- struct list_head *head)
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head)
{
if (lobj->wdomain) {
list_add(&lobj->list, head);
@@ -397,72 +291,63 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
}
}
-int radeon_object_list_reserve(struct list_head *head)
+int radeon_bo_list_reserve(struct list_head *head)
{
- struct radeon_object_list *lobj;
+ struct radeon_bo_list *lobj;
int r;
list_for_each_entry(lobj, head, list){
- if (!lobj->robj->pin_count) {
- r = radeon_object_reserve(lobj->robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object.\n");
- return r;
- }
- } else {
- }
+ r = radeon_bo_reserve(lobj->bo, false);
+ if (unlikely(r != 0))
+ return r;
}
return 0;
}
-void radeon_object_list_unreserve(struct list_head *head)
+void radeon_bo_list_unreserve(struct list_head *head)
{
- struct radeon_object_list *lobj;
+ struct radeon_bo_list *lobj;
list_for_each_entry(lobj, head, list) {
- if (!lobj->robj->pin_count) {
- radeon_object_unreserve(lobj->robj);
- }
+ /* only unreserve object we successfully reserved */
+ if (radeon_bo_is_reserved(lobj->bo))
+ radeon_bo_unreserve(lobj->bo);
}
}
-int radeon_object_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head, void *fence)
{
- struct radeon_object_list *lobj;
- struct radeon_object *robj;
+ struct radeon_bo_list *lobj;
+ struct radeon_bo *bo;
struct radeon_fence *old_fence = NULL;
int r;
- r = radeon_object_list_reserve(head);
+ r = radeon_bo_list_reserve(head);
if (unlikely(r != 0)) {
- radeon_object_list_unreserve(head);
return r;
}
list_for_each_entry(lobj, head, list) {
- robj = lobj->robj;
- if (!robj->pin_count) {
+ bo = lobj->bo;
+ if (!bo->pin_count) {
if (lobj->wdomain) {
- robj->tobj.proposed_placement =
- radeon_object_flags_from_domain(lobj->wdomain);
+ radeon_ttm_placement_from_domain(bo,
+ lobj->wdomain);
} else {
- robj->tobj.proposed_placement =
- radeon_object_flags_from_domain(lobj->rdomain);
+ radeon_ttm_placement_from_domain(bo,
+ lobj->rdomain);
}
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- true, false);
- if (unlikely(r)) {
- DRM_ERROR("radeon: failed to validate.\n");
+ r = ttm_buffer_object_validate(&bo->tbo,
+ &bo->placement,
+ true, false);
+ if (unlikely(r))
return r;
- }
- radeon_object_gpu_addr(robj);
}
- lobj->gpu_offset = robj->gpu_addr;
- lobj->tiling_flags = robj->tiling_flags;
+ lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+ lobj->tiling_flags = bo->tiling_flags;
if (fence) {
- old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
- robj->tobj.sync_obj = radeon_fence_ref(fence);
- robj->tobj.sync_obj_arg = NULL;
+ old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+ bo->tbo.sync_obj = radeon_fence_ref(fence);
+ bo->tbo.sync_obj_arg = NULL;
}
if (old_fence) {
radeon_fence_unref(&old_fence);
@@ -471,51 +356,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
return 0;
}
-void radeon_object_list_unvalidate(struct list_head *head)
+void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
{
- struct radeon_object_list *lobj;
- struct radeon_fence *old_fence = NULL;
+ struct radeon_bo_list *lobj;
+ struct radeon_fence *old_fence;
- list_for_each_entry(lobj, head, list) {
- old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
- lobj->robj->tobj.sync_obj = NULL;
- if (old_fence) {
- radeon_fence_unref(&old_fence);
+ if (fence)
+ list_for_each_entry(lobj, head, list) {
+ old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
+ if (old_fence == fence) {
+ lobj->bo->tbo.sync_obj = NULL;
+ radeon_fence_unref(&old_fence);
+ }
}
- }
- radeon_object_list_unreserve(head);
-}
-
-void radeon_object_list_clean(struct list_head *head)
-{
- radeon_object_list_unreserve(head);
+ radeon_bo_list_unreserve(head);
}
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma)
{
- return ttm_fbdev_mmap(vma, &robj->tobj);
+ return ttm_fbdev_mmap(vma, &bo->tbo);
}
-unsigned long radeon_object_size(struct radeon_object *robj)
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
{
- return robj->tobj.num_pages << PAGE_SHIFT;
-}
-
-int radeon_object_get_surface_reg(struct radeon_object *robj)
-{
- struct radeon_device *rdev = robj->rdev;
+ struct radeon_device *rdev = bo->rdev;
struct radeon_surface_reg *reg;
- struct radeon_object *old_object;
+ struct radeon_bo *old_object;
int steal;
int i;
- if (!robj->tiling_flags)
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+ if (!bo->tiling_flags)
return 0;
- if (robj->surface_reg >= 0) {
- reg = &rdev->surface_regs[robj->surface_reg];
- i = robj->surface_reg;
+ if (bo->surface_reg >= 0) {
+ reg = &rdev->surface_regs[bo->surface_reg];
+ i = bo->surface_reg;
goto out;
}
@@ -523,10 +401,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
reg = &rdev->surface_regs[i];
- if (!reg->robj)
+ if (!reg->bo)
break;
- old_object = reg->robj;
+ old_object = reg->bo;
if (old_object->pin_count == 0)
steal = i;
}
@@ -537,91 +415,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
return -ENOMEM;
/* find someone with a surface reg and nuke their BO */
reg = &rdev->surface_regs[steal];
- old_object = reg->robj;
+ old_object = reg->bo;
/* blow away the mapping */
DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
- ttm_bo_unmap_virtual(&old_object->tobj);
+ ttm_bo_unmap_virtual(&old_object->tbo);
old_object->surface_reg = -1;
i = steal;
}
- robj->surface_reg = i;
- reg->robj = robj;
+ bo->surface_reg = i;
+ reg->bo = bo;
out:
- radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
- robj->tobj.mem.mm_node->start << PAGE_SHIFT,
- robj->tobj.num_pages << PAGE_SHIFT);
+ radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+ bo->tbo.mem.mm_node->start << PAGE_SHIFT,
+ bo->tbo.num_pages << PAGE_SHIFT);
return 0;
}
-void radeon_object_clear_surface_reg(struct radeon_object *robj)
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
{
- struct radeon_device *rdev = robj->rdev;
+ struct radeon_device *rdev = bo->rdev;
struct radeon_surface_reg *reg;
- if (robj->surface_reg == -1)
+ if (bo->surface_reg == -1)
return;
- reg = &rdev->surface_regs[robj->surface_reg];
- radeon_clear_surface_reg(rdev, robj->surface_reg);
+ reg = &rdev->surface_regs[bo->surface_reg];
+ radeon_clear_surface_reg(rdev, bo->surface_reg);
- reg->robj = NULL;
- robj->surface_reg = -1;
+ reg->bo = NULL;
+ bo->surface_reg = -1;
}
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
- uint32_t tiling_flags, uint32_t pitch)
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ uint32_t tiling_flags, uint32_t pitch)
{
- robj->tiling_flags = tiling_flags;
- robj->pitch = pitch;
+ int r;
+
+ r = radeon_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+ bo->tiling_flags = tiling_flags;
+ bo->pitch = pitch;
+ radeon_bo_unreserve(bo);
+ return 0;
}
-void radeon_object_get_tiling_flags(struct radeon_object *robj,
- uint32_t *tiling_flags,
- uint32_t *pitch)
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+ uint32_t *tiling_flags,
+ uint32_t *pitch)
{
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
if (tiling_flags)
- *tiling_flags = robj->tiling_flags;
+ *tiling_flags = bo->tiling_flags;
if (pitch)
- *pitch = robj->pitch;
+ *pitch = bo->pitch;
}
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
- bool force_drop)
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+ bool force_drop)
{
- if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+ if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
if (force_drop) {
- radeon_object_clear_surface_reg(robj);
+ radeon_bo_clear_surface_reg(bo);
return 0;
}
- if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
+ if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
if (!has_moved)
return 0;
- if (robj->surface_reg >= 0)
- radeon_object_clear_surface_reg(robj);
+ if (bo->surface_reg >= 0)
+ radeon_bo_clear_surface_reg(bo);
return 0;
}
- if ((robj->surface_reg >= 0) && !has_moved)
+ if ((bo->surface_reg >= 0) && !has_moved)
return 0;
- return radeon_object_get_surface_reg(robj);
+ return radeon_bo_get_surface_reg(bo);
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem)
{
- struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
- radeon_object_check_tiling(robj, 0, 1);
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 1);
}
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
- radeon_object_check_tiling(robj, 0, 0);
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 10e8af6bb45..f6b69c2c0d0 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -28,19 +28,152 @@
#ifndef __RADEON_OBJECT_H__
#define __RADEON_OBJECT_H__
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
-/*
- * TTM.
+/**
+ * radeon_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type: ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
+ */
+static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
+{
+ switch (mem_type) {
+ case TTM_PL_VRAM:
+ return RADEON_GEM_DOMAIN_VRAM;
+ case TTM_PL_TT:
+ return RADEON_GEM_DOMAIN_GTT;
+ case TTM_PL_SYSTEM:
+ return RADEON_GEM_DOMAIN_CPU;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo: bo structure
+ * @no_wait: don't sleep while trying to reserve (return -EBUSY)
+ *
+ * Returns:
+ * -EBUSY: buffer is busy and @no_wait is true
+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
*/
-struct radeon_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct ttm_global_reference mem_global_ref;
- bool mem_global_referenced;
- struct ttm_bo_device bdev;
-};
+static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+{
+ int r;
+
+retry:
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r == -ERESTART)
+ goto retry;
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ return r;
+ }
+ return 0;
+}
+
+static inline void radeon_bo_unreserve(struct radeon_bo *bo)
+{
+ ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * radeon_bo_gpu_offset - return GPU offset of bo
+ * @bo: radeon object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be usefull to add check for this for debugging.
+ */
+static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
+{
+ return bo->tbo.offset;
+}
+
+static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+{
+ return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+{
+ return !!atomic_read(&bo->tbo.reserved);
+}
+
+/**
+ * radeon_bo_mmap_offset - return mmap offset of bo
+ * @bo: radeon object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ *
+ * Note: addr_space_offset is constant after ttm bo init thus isn't protected
+ * by any lock.
+ */
+static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
+{
+ return bo->tbo.addr_space_offset;
+}
+
+static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+ bool no_wait)
+{
+ int r;
+
+retry:
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r == -ERESTART)
+ goto retry;
+ dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+ return r;
+ }
+ spin_lock(&bo->tbo.lock);
+ if (mem_type)
+ *mem_type = bo->tbo.mem.mem_type;
+ if (bo->tbo.sync_obj)
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ spin_unlock(&bo->tbo.lock);
+ ttm_bo_unreserve(&bo->tbo);
+ if (unlikely(r == -ERESTART))
+ goto retry;
+ return r;
+}
+extern int radeon_bo_create(struct radeon_device *rdev,
+ struct drm_gem_object *gobj, unsigned long size,
+ bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr);
+extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern void radeon_bo_unref(struct radeon_bo **bo);
+extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern void radeon_bo_force_delete(struct radeon_device *rdev);
+extern int radeon_bo_init(struct radeon_device *rdev);
+extern void radeon_bo_fini(struct radeon_device *rdev);
+extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head);
+extern int radeon_bo_list_reserve(struct list_head *head);
+extern void radeon_bo_list_unreserve(struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, void *fence);
+extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+ struct vm_area_struct *vma);
+extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ u32 tiling_flags, u32 pitch);
+extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+ u32 *tiling_flags, u32 *pitch);
+extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+ bool force_drop);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 46146c6a2a0..34b08d307c8 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev);
int radeon_pm_init(struct radeon_device *rdev)
{
if (radeon_debugfs_pm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for CP !\n");
+ DRM_ERROR("Failed to register debugfs file for PM!\n");
}
return 0;
@@ -44,8 +44,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev));
+ seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 29ab75903ec..6d0a009dd4a 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -887,6 +887,7 @@
# define RADEON_FP_PANEL_FORMAT (1 << 3)
# define RADEON_FP_EN_TMDS (1 << 7)
# define RADEON_FP_DETECT_SENSE (1 << 8)
+# define RADEON_FP_DETECT_INT_POL (1 << 9)
# define R200_FP_SOURCE_SEL_MASK (3 << 10)
# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10)
# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10)
@@ -894,6 +895,7 @@
# define R200_FP_SOURCE_SEL_TRANS (3 << 10)
# define RADEON_FP_SEL_CRTC1 (0 << 13)
# define RADEON_FP_SEL_CRTC2 (1 << 13)
+# define R300_HPD_SEL(x) ((x) << 13)
# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
@@ -909,6 +911,7 @@
# define RADEON_FP2_ON (1 << 2)
# define RADEON_FP2_PANEL_FORMAT (1 << 3)
# define RADEON_FP2_DETECT_SENSE (1 << 8)
+# define RADEON_FP2_DETECT_INT_POL (1 << 9)
# define R200_FP2_SOURCE_SEL_MASK (3 << 10)
# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10)
# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10)
@@ -988,14 +991,20 @@
#define RADEON_GEN_INT_CNTL 0x0040
# define RADEON_CRTC_VBLANK_MASK (1 << 0)
+# define RADEON_FP_DETECT_MASK (1 << 4)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
+# define RADEON_FP2_DETECT_MASK (1 << 10)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
+# define RADEON_FP_DETECT_STAT (1 << 4)
+# define RADEON_FP_DETECT_STAT_ACK (1 << 4)
# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
+# define RADEON_FP2_DETECT_STAT (1 << 10)
+# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
# define RADEON_SW_INT_FIRE (1 << 26)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
@@ -1051,20 +1060,25 @@
/* Multimedia I2C bus */
#define RADEON_I2C_CNTL_0 0x0090
-#define RADEON_I2C_DONE (1<<0)
-#define RADEON_I2C_NACK (1<<1)
-#define RADEON_I2C_HALT (1<<2)
-#define RADEON_I2C_SOFT_RST (1<<5)
-#define RADEON_I2C_DRIVE_EN (1<<6)
-#define RADEON_I2C_DRIVE_SEL (1<<7)
-#define RADEON_I2C_START (1<<8)
-#define RADEON_I2C_STOP (1<<9)
-#define RADEON_I2C_RECEIVE (1<<10)
-#define RADEON_I2C_ABORT (1<<11)
-#define RADEON_I2C_GO (1<<12)
+#define RADEON_I2C_DONE (1 << 0)
+#define RADEON_I2C_NACK (1 << 1)
+#define RADEON_I2C_HALT (1 << 2)
+#define RADEON_I2C_SOFT_RST (1 << 5)
+#define RADEON_I2C_DRIVE_EN (1 << 6)
+#define RADEON_I2C_DRIVE_SEL (1 << 7)
+#define RADEON_I2C_START (1 << 8)
+#define RADEON_I2C_STOP (1 << 9)
+#define RADEON_I2C_RECEIVE (1 << 10)
+#define RADEON_I2C_ABORT (1 << 11)
+#define RADEON_I2C_GO (1 << 12)
+#define RADEON_I2C_PRESCALE_SHIFT 16
#define RADEON_I2C_CNTL_1 0x0094
-#define RADEON_I2C_SEL (1<<16)
-#define RADEON_I2C_EN (1<<17)
+#define RADEON_I2C_DATA_COUNT_SHIFT 0
+#define RADEON_I2C_ADDR_COUNT_SHIFT 4
+#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
+#define RADEON_I2C_SEL (1 << 16)
+#define RADEON_I2C_EN (1 << 17)
+#define RADEON_I2C_TIME_LIMIT_SHIFT 24
#define RADEON_I2C_DATA 0x0098
#define RADEON_DVI_I2C_CNTL_0 0x02e0
@@ -1072,7 +1086,7 @@
# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
-#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */
+#define RADEON_DVI_I2C_CNTL_1 0x02e4
#define RADEON_DVI_I2C_DATA 0x02e8
#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
@@ -1143,15 +1157,16 @@
# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
-#define RADEON_LCD_GPIO_MASK 0x01a0
-#define RADEON_GPIOPAD_EN 0x01a0
-#define RADEON_LCD_GPIO_Y_REG 0x01a4
-#define RADEON_MDGPIO_A_REG 0x01ac
-#define RADEON_MDGPIO_EN_REG 0x01b0
-#define RADEON_MDGPIO_MASK 0x0198
+
#define RADEON_GPIOPAD_MASK 0x0198
#define RADEON_GPIOPAD_A 0x019c
-#define RADEON_MDGPIO_Y_REG 0x01b4
+#define RADEON_GPIOPAD_EN 0x01a0
+#define RADEON_GPIOPAD_Y 0x01a4
+#define RADEON_MDGPIO_MASK 0x01a8
+#define RADEON_MDGPIO_A 0x01ac
+#define RADEON_MDGPIO_EN 0x01b0
+#define RADEON_MDGPIO_Y 0x01b4
+
#define RADEON_MEM_ADDR_CONFIG 0x0148
#define RADEON_MEM_BASE 0x0f10 /* PCI */
#define RADEON_MEM_CNTL 0x0140
@@ -1360,6 +1375,9 @@
#define RADEON_OVR_CLR 0x0230
#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
+#define RADEON_OVR2_CLR 0x0330
+#define RADEON_OVR2_WID_LEFT_RIGHT 0x0334
+#define RADEON_OVR2_WID_TOP_BOTTOM 0x0338
/* first capture unit */
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 747b4bffb84..4d12b2d17b4 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0;
/* Allocate 1M object buffer */
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
- r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
- true, RADEON_GEM_DOMAIN_GTT,
- false, &rdev->ib_pool.robj);
+ r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
+ true, RADEON_GEM_DOMAIN_GTT,
+ &rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
return r;
}
- r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+ r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
if (r) {
+ radeon_bo_unreserve(rdev->ib_pool.robj);
DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
return r;
}
- r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
+ r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
+ radeon_bo_unreserve(rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
return r;
@@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
+ int r;
+
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
- radeon_object_kunmap(rdev->ib_pool.robj);
- radeon_object_unref(&rdev->ib_pool.robj);
+ r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->ib_pool.robj);
+ radeon_bo_unpin(rdev->ib_pool.robj);
+ radeon_bo_unreserve(rdev->ib_pool.robj);
+ }
+ radeon_bo_unref(&rdev->ib_pool.robj);
rdev->ib_pool.robj = NULL;
}
mutex_unlock(&rdev->ib_pool.mutex);
@@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
- r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
- true,
- RADEON_GEM_DOMAIN_GTT,
- false,
- &rdev->cp.ring_obj);
+ r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.ring_obj);
if (r) {
- DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->cp.ring_obj,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->cp.gpu_addr);
+ r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.gpu_addr);
if (r) {
- DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
+ dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
- r = radeon_object_kmap(rdev->cp.ring_obj,
+ r = radeon_bo_kmap(rdev->cp.ring_obj,
(void **)&rdev->cp.ring);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
if (r) {
- DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
@@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
void radeon_ring_fini(struct radeon_device *rdev)
{
+ int r;
+
mutex_lock(&rdev->cp.mutex);
if (rdev->cp.ring_obj) {
- radeon_object_kunmap(rdev->cp.ring_obj);
- radeon_object_unpin(rdev->cp.ring_obj);
- radeon_object_unref(&rdev->cp.ring_obj);
+ r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->cp.ring_obj);
+ radeon_bo_unpin(rdev->cp.ring_obj);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
+ }
+ radeon_bo_unref(&rdev->cp.ring_obj);
rdev->cp.ring = NULL;
rdev->cp.ring_obj = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f8a465d9a1c..391c973ec4d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -30,8 +30,8 @@
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
void radeon_test_moves(struct radeon_device *rdev)
{
- struct radeon_object *vram_obj = NULL;
- struct radeon_object **gtt_obj = NULL;
+ struct radeon_bo *vram_obj = NULL;
+ struct radeon_bo **gtt_obj = NULL;
struct radeon_fence *fence = NULL;
uint64_t gtt_addr, vram_addr;
unsigned i, n, size;
@@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
- false, &vram_obj);
+ r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
+ &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
}
-
- r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+ r = radeon_bo_reserve(vram_obj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
if (r) {
DRM_ERROR("Failed to pin VRAM object\n");
goto out_cleanup;
}
-
for (i = 0; i < n; i++) {
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- r = radeon_object_create(rdev, NULL, size, true,
- RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i);
+ r = radeon_bo_create(rdev, NULL, size, true,
+ RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_cleanup;
}
- r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+ r = radeon_bo_reserve(gtt_obj[i], false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i);
goto out_cleanup;
}
- r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+ r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
DRM_ERROR("Failed to map GTT object %d\n", i);
goto out_cleanup;
@@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev)
gtt_start++)
*gtt_start = gtt_start;
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
r = radeon_fence_create(rdev, &fence);
if (r) {
@@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_fence_unref(&fence);
- r = radeon_object_kmap(vram_obj, &vram_map);
+ r = radeon_bo_kmap(vram_obj, &vram_map);
if (r) {
DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
goto out_cleanup;
@@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev)
"expected 0x%p (GTT map 0x%p-0x%p)\n",
i, *vram_start, gtt_start, gtt_map,
gtt_end);
- radeon_object_kunmap(vram_obj);
+ radeon_bo_kunmap(vram_obj);
goto out_cleanup;
}
*vram_start = vram_start;
}
- radeon_object_kunmap(vram_obj);
+ radeon_bo_kunmap(vram_obj);
r = radeon_fence_create(rdev, &fence);
if (r) {
@@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_fence_unref(&fence);
- r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+ r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
DRM_ERROR("Failed to map GTT object after copy %d\n", i);
goto out_cleanup;
@@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev)
"expected 0x%p (VRAM map 0x%p-0x%p)\n",
i, *gtt_start, vram_start, vram_map,
vram_end);
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
goto out_cleanup;
}
}
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
gtt_addr - rdev->mc.gtt_location);
@@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev)
out_cleanup:
if (vram_obj) {
- radeon_object_unpin(vram_obj);
- radeon_object_unref(&vram_obj);
+ if (radeon_bo_is_reserved(vram_obj)) {
+ radeon_bo_unpin(vram_obj);
+ radeon_bo_unreserve(vram_obj);
+ }
+ radeon_bo_unref(&vram_obj);
}
if (gtt_obj) {
for (i = 0; i < n; i++) {
if (gtt_obj[i]) {
- radeon_object_unpin(gtt_obj[i]);
- radeon_object_unref(&gtt_obj[i]);
+ if (radeon_bo_is_reserved(gtt_obj[i])) {
+ radeon_bo_unpin(gtt_obj[i]);
+ radeon_bo_unreserve(gtt_obj[i]);
+ }
+ radeon_bo_unref(&gtt_obj[i]);
}
}
kfree(gtt_obj);
@@ -206,4 +216,3 @@ out_cleanup:
printk(KERN_WARNING "Error while testing BO move.\n");
}
}
-
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index eda4ade24c3..5a19d529d1c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
- man->gpu_offset = 0;
+ man->gpu_offset = rdev->mc.gtt_location;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->gpu_offset = 0;
+ man->gpu_offset = rdev->mc.vram_location;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -197,16 +197,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
return 0;
}
-static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
+static void radeon_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
{
- uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
-
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ break;
+ case TTM_PL_TT:
default:
- return (cur_placement & ~TTM_PL_MASK_CACHING) |
- TTM_PL_FLAG_SYSTEM |
- TTM_PL_FLAG_CACHED;
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
}
+ *placement = rbo->placement;
}
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
@@ -283,14 +286,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
- uint32_t proposed_placement;
+ u32 placements;
+ struct ttm_placement placement;
int r;
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait);
if (unlikely(r)) {
return r;
@@ -329,15 +339,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
- uint32_t proposed_flags;
+ struct ttm_placement placement;
+ u32 placements;
int r;
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
- interruptible, no_wait);
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
if (unlikely(r)) {
return r;
}
@@ -407,18 +423,6 @@ memcpy:
return r;
}
-const uint32_t radeon_mem_prios[] = {
- TTM_PL_VRAM,
- TTM_PL_TT,
- TTM_PL_SYSTEM,
-};
-
-const uint32_t radeon_busy_prios[] = {
- TTM_PL_TT,
- TTM_PL_VRAM,
- TTM_PL_SYSTEM,
-};
-
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
@@ -446,10 +450,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
}
static struct ttm_bo_driver radeon_bo_driver = {
- .mem_type_prio = radeon_mem_prios,
- .mem_busy_prio = radeon_busy_prios,
- .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
- .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
@@ -482,27 +482,31 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
- ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+ rdev->mc.real_vram_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = radeon_object_create(rdev, NULL, 256 * 1024, true,
- RADEON_GEM_DOMAIN_VRAM, false,
- &rdev->stollen_vga_memory);
+ r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ &rdev->stollen_vga_memory);
if (r) {
return r;
}
- r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (r)
+ return r;
+ r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ radeon_bo_unreserve(rdev->stollen_vga_memory);
if (r) {
- radeon_object_unref(&rdev->stollen_vga_memory);
+ radeon_bo_unref(&rdev->stollen_vga_memory);
return r;
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
(unsigned)rdev->mc.real_vram_size / (1024 * 1024));
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
- ((rdev->mc.gtt_size) >> PAGE_SHIFT));
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+ rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
@@ -523,9 +527,15 @@ int radeon_ttm_init(struct radeon_device *rdev)
void radeon_ttm_fini(struct radeon_device *rdev)
{
+ int r;
+
if (rdev->stollen_vga_memory) {
- radeon_object_unpin(rdev->stollen_vga_memory);
- radeon_object_unref(&rdev->stollen_vga_memory);
+ r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (r == 0) {
+ radeon_bo_unpin(rdev->stollen_vga_memory);
+ radeon_bo_unreserve(rdev->stollen_vga_memory);
+ }
+ radeon_bo_unref(&rdev->stollen_vga_memory);
}
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index ca037160a58..c1fcdddb6be 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -352,7 +352,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
u32 tmp;
/* Setup GPU memory space */
- tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
+ tmp = RREG32(R_00015C_NB_TOM);
rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
@@ -387,13 +387,13 @@ static int rs400_startup(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
rs400_gpu_init(rdev);
+ r100_enable_bm(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r = rs400_gart_enable(rdev);
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */
r300_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rs400_startup(rdev);
}
@@ -452,7 +454,7 @@ void rs400_fini(struct radeon_device *rdev)
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -490,10 +492,9 @@ int rs400_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Get vram informations */
@@ -510,7 +511,7 @@ int rs400_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5f117cd8736..4f8ea426057 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,6 +45,122 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+int rs600_mc_init(struct radeon_device *rdev)
+{
+ /* read back the MC value from the hw */
+ int r;
+ u32 tmp;
+
+ /* Setup GPU memory space */
+ tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
+ rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
+ rdev->mc.gtt_location = 0xffffffffUL;
+ r = radeon_mc_setup(rdev);
+ if (r)
+ return r;
+ return 0;
+}
+
+/* hpd for digital panel detect/disconnect */
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = false;
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
+ if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
+ if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ return connected;
+}
+
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = rs600_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ if (connected)
+ tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+ else
+ tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ if (connected)
+ tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+ else
+ tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+void rs600_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+ S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+ S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
+ rdev->irq.hpd[1] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ rs600_irq_set(rdev);
+}
+
+void rs600_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+ S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+ S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
+ rdev->irq.hpd[1] = false;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
/*
* GART.
*/
@@ -100,40 +216,40 @@ int rs600_gart_enable(struct radeon_device *rdev)
WREG32(R_00004C_BUS_CNTL, tmp);
/* FIXME: setup default page */
WREG32_MC(R_000100_MC_PT0_CNTL,
- (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
- S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+ (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+ S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
for (i = 0; i < 19; i++) {
WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
- S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
- S_00016C_SYSTEM_ACCESS_MODE_MASK(
- V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
- S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
- V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
- S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
- S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
- S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
+ S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+ S_00016C_SYSTEM_ACCESS_MODE_MASK(
+ V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
+ S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+ V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
+ S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
+ S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+ S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
}
-
- /* System context map to GART space */
- WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
- WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
-
/* enable first context */
- WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
- WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
- S_000102_ENABLE_PAGE_TABLE(1) |
- S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+ S_000102_ENABLE_PAGE_TABLE(1) |
+ S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+
/* disable all other contexts */
- for (i = 1; i < 8; i++) {
+ for (i = 1; i < 8; i++)
WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
- }
/* setup the page table */
WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
- rdev->gart.table_addr);
+ rdev->gart.table_addr);
+ WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+ WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+ /* System context maps to VRAM space */
+ WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
+ WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
+
/* enable page tables */
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
@@ -146,15 +262,20 @@ int rs600_gart_enable(struct radeon_device *rdev)
void rs600_gart_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 tmp;
+ int r;
/* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0);
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (r == 0) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -189,6 +310,10 @@ int rs600_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
uint32_t mode_int = 0;
+ u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
+ ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+ u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
+ ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
@@ -199,8 +324,16 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.crtc_vblank_int[1]) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
+ if (rdev->irq.hpd[0]) {
+ hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+ }
+ if (rdev->irq.hpd[1]) {
+ hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ }
WREG32(R_000040_GEN_INT_CNTL, tmp);
WREG32(R_006540_DxMODE_INT_MASK, mode_int);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
return 0;
}
@@ -208,6 +341,7 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
uint32_t irq_mask = ~C_000044_SW_INT;
+ u32 tmp;
if (G_000044_DISPLAY_INT_STAT(irqs)) {
*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
@@ -219,6 +353,16 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
WREG32(R_006D34_D2MODE_VBLANK_STATUS,
S_006D34_D2MODE_VBLANK_ACK(1));
}
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+ tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ }
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+ tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ }
} else {
*r500_disp_int = 0;
}
@@ -244,6 +388,7 @@ int rs600_irq_process(struct radeon_device *rdev)
{
uint32_t status, msi_rearm;
uint32_t r500_disp_int;
+ bool queue_hotplug = false;
status = rs600_irq_ack(rdev, &r500_disp_int);
if (!status && !r500_disp_int) {
@@ -258,8 +403,18 @@ int rs600_irq_process(struct radeon_device *rdev)
drm_handle_vblank(rdev->ddev, 0);
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
drm_handle_vblank(rdev->ddev, 1);
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD1\n");
+ }
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD2\n");
+ }
status = rs600_irq_ack(rdev, &r500_disp_int);
}
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
@@ -301,9 +456,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs600 ? */
r100_hdp_reset(rdev);
- /* FIXME: is this correct ? */
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
@@ -312,9 +465,20 @@ void rs600_gpu_init(struct radeon_device *rdev)
void rs600_vram_info(struct radeon_device *rdev)
{
- /* FIXME: to do or is these values sane ? */
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
+
+ rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+ if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+ rdev->mc.real_vram_size = rdev->mc.aper_size;
}
void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -388,7 +552,6 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -423,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rs600_startup(rdev);
}
@@ -445,7 +610,7 @@ void rs600_fini(struct radeon_device *rdev)
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -482,10 +647,9 @@ int rs600_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -493,7 +657,7 @@ int rs600_init(struct radeon_device *rdev)
/* Get vram informations */
rs600_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
+ r = rs600_mc_init(rdev);
if (r)
return r;
rs600_debugfs(rdev);
@@ -505,7 +669,7 @@ int rs600_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs600_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index 81308924859..c1c8f5885cb 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -30,27 +30,12 @@
/* Registers */
#define R_000040_GEN_INT_CNTL 0x000040
-#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0)
-#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1)
-#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE
-#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12)
-#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1)
-#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF
-#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
-#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
-#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
-#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
-#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
-#define C_000040_SNAPSHOT2 0xFFFFFF7F
-#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
-#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
-#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
-#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
-#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
-#define C_000040_FP2_DETECT 0xFFFFFBFF
-#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
-#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
-#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
+#define S_000040_SCRATCH_INT_MASK(x) (((x) & 0x1) << 18)
+#define G_000040_SCRATCH_INT_MASK(x) (((x) >> 18) & 0x1)
+#define C_000040_SCRATCH_INT_MASK 0xFFFBFFFF
+#define S_000040_GUI_IDLE_MASK(x) (((x) & 0x1) << 19)
+#define G_000040_GUI_IDLE_MASK(x) (((x) >> 19) & 0x1)
+#define C_000040_GUI_IDLE_MASK 0xFFF7FFFF
#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
@@ -370,7 +355,90 @@
#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
-
+#define S_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 16)
+#define G_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) >> 16) & 0x1)
+#define C_007EDC_DACA_AUTODETECT_INTERRUPT 0xFFFEFFFF
+#define S_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 17)
+#define G_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) >> 17) & 0x1)
+#define C_007EDC_DACB_AUTODETECT_INTERRUPT 0xFFFDFFFF
+#define S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) & 0x1) << 18)
+#define G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) >> 18) & 0x1)
+#define C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT 0xFFFBFFFF
+#define S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) & 0x1) << 19)
+#define G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) >> 19) & 0x1)
+#define C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT 0xFFF7FFFF
+#define R_007828_DACA_AUTODETECT_CONTROL 0x007828
+#define S_007828_DACA_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
+#define G_007828_DACA_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
+#define C_007828_DACA_AUTODETECT_MODE 0xFFFFFFFC
+#define S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
+#define S_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
+#define G_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
+#define C_007828_DACA_AUTODETECT_CHECK_MASK 0xFFFCFFFF
+#define R_007838_DACA_AUTODETECT_INT_CONTROL 0x007838
+#define S_007838_DACA_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
+#define C_007838_DACA_DACA_AUTODETECT_ACK 0xFFFFFFFE
+#define S_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
+#define G_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
+#define C_007838_DACA_AUTODETECT_INT_ENABLE 0xFFFCFFFF
+#define R_007A28_DACB_AUTODETECT_CONTROL 0x007A28
+#define S_007A28_DACB_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
+#define G_007A28_DACB_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
+#define C_007A28_DACB_AUTODETECT_MODE 0xFFFFFFFC
+#define S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
+#define S_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
+#define G_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
+#define C_007A28_DACB_AUTODETECT_CHECK_MASK 0xFFFCFFFF
+#define R_007A38_DACB_AUTODETECT_INT_CONTROL 0x007A38
+#define S_007A38_DACB_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
+#define C_007A38_DACB_DACA_AUTODETECT_ACK 0xFFFFFFFE
+#define S_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
+#define G_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
+#define C_007A38_DACB_AUTODETECT_INT_ENABLE 0xFFFCFFFF
+#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL 0x007D00
+#define S_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) & 0x1) << 0)
+#define G_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) >> 0) & 0x1)
+#define C_007D00_DC_HOT_PLUG_DETECT1_EN 0xFFFFFFFE
+#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0x007D04
+#define S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) & 0x1) << 0)
+#define G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) >> 0) & 0x1)
+#define C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0xFFFFFFFE
+#define S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) & 0x1) << 1)
+#define G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) >> 1) & 0x1)
+#define C_007D04_DC_HOT_PLUG_DETECT1_SENSE 0xFFFFFFFD
+#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL 0x007D08
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x) (((x) & 0x1) << 0)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK 0xFFFFFFFE
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY 0xFFFFFEFF
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) & 0x1) << 16)
+#define G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) >> 16) & 0x1)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_EN 0xFFFEFFFF
+#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL 0x007D10
+#define S_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) & 0x1) << 0)
+#define G_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) >> 0) & 0x1)
+#define C_007D10_DC_HOT_PLUG_DETECT2_EN 0xFFFFFFFE
+#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0x007D14
+#define S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) & 0x1) << 0)
+#define G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) >> 0) & 0x1)
+#define C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0xFFFFFFFE
+#define S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) & 0x1) << 1)
+#define G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) >> 1) & 0x1)
+#define C_007D14_DC_HOT_PLUG_DETECT2_SENSE 0xFFFFFFFD
+#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL 0x007D18
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x) (((x) & 0x1) << 0)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK 0xFFFFFFFE
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY 0xFFFFFEFF
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
+#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
/* MC registers */
#define R_000000_MC_STATUS 0x000000
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 27547175cf9..1e22f52d603 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev)
void rs690_vram_info(struct radeon_device *rdev)
{
- uint32_t tmp;
fixed20_12 a;
rs400_gart_adjust_size(rdev);
- /* DDR for all card after R300 & IGP */
+
rdev->mc.vram_is_ddr = true;
- /* FIXME: is this correct for RS690/RS740 ? */
- tmp = RREG32(RADEON_MEM_CNTL);
- if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
- rdev->mc.vram_width = 128;
- } else {
- rdev->mc.vram_width = 64;
- }
+ rdev->mc.vram_width = 128;
+
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+ if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+ rdev->mc.real_vram_size = rdev->mc.aper_size;
+
rs690_pm_info(rdev);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
@@ -161,6 +162,21 @@ void rs690_vram_info(struct radeon_device *rdev)
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
}
+static int rs690_mc_init(struct radeon_device *rdev)
+{
+ int r;
+ u32 tmp;
+
+ /* Setup GPU memory space */
+ tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ r = radeon_mc_setup(rdev);
+ if (r)
+ return r;
+ return 0;
+}
+
void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
struct drm_display_mode *mode2)
@@ -244,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256);
- a.full = rfixed_mul(wm->num_line_pair, b);
- request_fifo_depth.full = rfixed_div(a, c);
+ a.full = rfixed_div(b, c);
+ request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
@@ -374,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
@@ -383,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
} else {
a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a);
+ wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -605,7 +624,6 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -640,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rs690_startup(rdev);
}
@@ -662,7 +682,7 @@ void rs690_fini(struct radeon_device *rdev)
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -700,10 +720,9 @@ int rs690_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -711,7 +730,7 @@ int rs690_init(struct radeon_device *rdev)
/* Get vram informations */
rs690_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
+ r = rs690_mc_init(rdev);
if (r)
return r;
rv515_debugfs(rdev);
@@ -723,7 +742,7 @@ int rs690_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ba68c9fe90a..59632a506b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -478,7 +478,6 @@ static int rv515_startup(struct radeon_device *rdev)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -514,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rv515_startup(rdev);
}
@@ -540,11 +541,11 @@ void rv515_fini(struct radeon_device *rdev)
r100_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
- rv370_pcie_gart_fini(rdev);
+ rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -580,10 +581,8 @@ int rv515_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -603,7 +602,7 @@ int rv515_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rv370_pcie_gart_init(rdev);
@@ -892,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256);
- a.full = rfixed_mul(wm->num_line_pair, b);
- request_fifo_depth.full = rfixed_div(a, c);
+ a.full = rfixed_div(b, c);
+ request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
@@ -995,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
estimated_width.full = rfixed_div(estimated_width, consumption_time);
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
- wm->priority_mark.full = rfixed_const(10);
+ wm->priority_mark.full = wm->priority_mark_max.full;
} else {
a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a);
+ wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 5e06ee7076f..fbb0357f1ec 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
void rv770_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
+ int i, r;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -870,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev)
{
int r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
@@ -880,13 +892,26 @@ static int rv770_startup(struct radeon_device *rdev)
}
rv770_gpu_init(rdev);
- r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("failed to pin blit object %d\n", r);
return r;
}
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ r600_irq_set(rdev);
+
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
@@ -934,13 +959,19 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
+ int r;
+
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
r600_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */
- radeon_object_unpin(rdev->r600_blit.shader_obj);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
return 0;
}
@@ -975,7 +1006,11 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev) && rdev->bios) {
+ if (!r600_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
@@ -998,31 +1033,31 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_irq_kms_init(rdev);
if (r)
return r;
+
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
- if (!rdev->me_fw || !rdev->pfp_fw) {
- r = r600_cp_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
r = r600_pcie_gart_init(rdev);
if (r)
return r;
- rdev->accel_working = true;
r = r600_blit_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled blitter (%d).\n", r);
- rdev->accel_working = false;
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ return r;
}
+ rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
rv770_suspend(rdev);
@@ -1034,12 +1069,12 @@ int rv770_init(struct radeon_device *rdev)
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
}
@@ -1051,6 +1086,8 @@ void rv770_fini(struct radeon_device *rdev)
rv770_suspend(rdev);
r600_blit_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
radeon_ring_fini(rdev);
r600_wb_fini(rdev);
rv770_pcie_gart_fini(rdev);
@@ -1059,7 +1096,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_clocks_fini(rdev);
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b0a9de7a57c..1e138f5bae0 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,6 +3,7 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
- ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
+ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 87c06252d46..a835b6fe42a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+/* Notes:
+ *
+ * We store bo pointer in drm_mm_node struct so we know which bo own a
+ * specific node. There is no protection on the pointer, thus to make
+ * sure things don't go berserk you have to access this pointer while
+ * holding the global lru lock and make sure anytime you free a node you
+ * reset the pointer to NULL.
+ */
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
@@ -51,6 +59,60 @@ static struct attribute ttm_bo_count = {
.mode = S_IRUGO
};
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+ int i;
+
+ for (i = 0; i <= TTM_PL_PRIV5; i++)
+ if (flags & (1 << i)) {
+ *mem_type = i;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
+ struct ttm_mem_type_manager *man)
+{
+ printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
+ printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
+ printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
+ printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
+ printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
+ printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
+ printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
+ printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
+ man->available_caching);
+ printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
+ man->default_caching);
+ spin_lock(&glob->lru_lock);
+ drm_mm_debug_table(&man->manager, TTM_PFX);
+ spin_unlock(&glob->lru_lock);
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_global *glob = bo->glob;
+ struct ttm_mem_type_manager *man;
+ int i, ret, mem_type;
+
+ printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
+ for (i = 0; i < placement->num_placement; i++) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return;
+ man = &bdev->man[mem_type];
+ printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i], mem_type);
+ ttm_mem_type_manager_debug(glob, man);
+ }
+}
+
static ssize_t ttm_bo_global_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
@@ -117,7 +179,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
ret = wait_event_interruptible(bo->event_queue,
atomic_read(&bo->reserved) == 0);
if (unlikely(ret != 0))
- return -ERESTART;
+ return ret;
} else {
wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
}
@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
/*
* Call bo->mutex locked.
*/
-
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -275,9 +336,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags | TTM_PAGE_FLAG_USER,
glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL))
+ if (unlikely(bo->ttm == NULL)) {
ret = -ENOMEM;
- break;
+ break;
+ }
ret = ttm_tt_set_user(bo->ttm, current,
bo->buffer_start, bo->num_pages);
@@ -328,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-
- struct ttm_mem_reg *old_mem = &bo->mem;
- uint32_t save_flags = old_mem->placement;
-
- *old_mem = *mem;
+ bo->mem = *mem;
mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, mem->placement,
- TTM_PL_MASK_MEMTYPE);
goto moved;
}
@@ -418,6 +474,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
}
if (bo->mem.mm_node) {
+ bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
@@ -554,24 +611,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
}
EXPORT_SYMBOL(ttm_bo_unref);
-static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
- bool interruptible, bool no_wait)
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ bool no_wait)
{
- int ret = 0;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_reg evict_mem;
- uint32_t proposed_placement;
-
- if (bo->mem.mem_type != mem_type)
- goto out;
+ struct ttm_placement placement;
+ int ret = 0;
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
- if (ret != -ERESTART) {
+ if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
"Failed to expire sync object before "
"buffer eviction.\n");
@@ -584,116 +638,139 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- proposed_placement = bdev->driver->evict_flags(bo);
-
- ret = ttm_bo_mem_space(bo, proposed_placement,
- &evict_mem, interruptible, no_wait);
- if (unlikely(ret != 0 && ret != -ERESTART))
- ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
- &evict_mem, interruptible, no_wait);
-
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->driver->evict_flags(bo, &placement);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+ no_wait);
if (ret) {
- if (ret != -ERESTART)
+ if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
"Failed to find memory space for "
"buffer 0x%p eviction.\n", bo);
+ ttm_bo_mem_space_debug(bo, &placement);
+ }
goto out;
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait);
if (ret) {
- if (ret != -ERESTART)
+ if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+ spin_lock(&glob->lru_lock);
+ if (evict_mem.mm_node) {
+ evict_mem.mm_node->private = NULL;
+ drm_mm_put_block(evict_mem.mm_node);
+ evict_mem.mm_node = NULL;
+ }
+ spin_unlock(&glob->lru_lock);
goto out;
}
+ bo->evicted = true;
+out:
+ return ret;
+}
+
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ uint32_t mem_type,
+ bool interruptible, bool no_wait)
+{
+ struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_buffer_object *bo;
+ int ret, put_count = 0;
spin_lock(&glob->lru_lock);
- if (evict_mem.mm_node) {
- drm_mm_put_block(evict_mem.mm_node);
- evict_mem.mm_node = NULL;
- }
+ bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
+ kref_get(&bo->list_kref);
+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- bo->evicted = true;
-out:
+ if (unlikely(ret != 0))
+ return ret;
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ret = ttm_bo_evict(bo, interruptible, no_wait);
+ ttm_bo_unreserve(bo);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
+static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
+ struct ttm_mem_type_manager *man,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ struct drm_mm_node **node)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ unsigned long lpfn;
+ int ret;
+
+ lpfn = placement->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+ *node = NULL;
+ do {
+ ret = drm_mm_pre_get(&man->manager);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&glob->lru_lock);
+ *node = drm_mm_search_free_in_range(&man->manager,
+ mem->num_pages, mem->page_alignment,
+ placement->fpfn, lpfn, 1);
+ if (unlikely(*node == NULL)) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+ *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn,
+ lpfn);
+ spin_unlock(&glob->lru_lock);
+ } while (*node == NULL);
+ return 0;
+}
+
/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
-static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- uint32_t mem_type,
- bool interruptible, bool no_wait)
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ uint32_t mem_type,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
{
+ struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
- struct drm_mm_node *node;
- struct ttm_buffer_object *entry;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct list_head *lru;
- unsigned long num_pages = mem->num_pages;
- int put_count = 0;
+ struct drm_mm_node *node;
int ret;
-retry_pre_get:
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret != 0))
- return ret;
-
- spin_lock(&glob->lru_lock);
do {
- node = drm_mm_search_free(&man->manager, num_pages,
- mem->page_alignment, 1);
+ ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+ if (unlikely(ret != 0))
+ return ret;
if (node)
break;
-
- lru = &man->lru;
- if (list_empty(lru))
+ spin_lock(&glob->lru_lock);
+ if (list_empty(&man->lru)) {
+ spin_unlock(&glob->lru_lock);
break;
-
- entry = list_first_entry(lru, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
-
- ret =
- ttm_bo_reserve_locked(entry, interruptible, no_wait,
- false, 0);
-
- if (likely(ret == 0))
- put_count = ttm_bo_del_from_lru(entry);
-
+ }
spin_unlock(&glob->lru_lock);
-
+ ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
+ no_wait);
if (unlikely(ret != 0))
return ret;
-
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
-
- ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
-
- ttm_bo_unreserve(entry);
-
- kref_put(&entry->list_kref, ttm_bo_release_list);
- if (ret)
- return ret;
-
- spin_lock(&glob->lru_lock);
} while (1);
-
- if (!node) {
- spin_unlock(&glob->lru_lock);
+ if (node == NULL)
return -ENOMEM;
- }
-
- node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
- if (unlikely(!node)) {
- spin_unlock(&glob->lru_lock);
- goto retry_pre_get;
- }
-
- spin_unlock(&glob->lru_lock);
mem->mm_node = node;
mem->mem_type = mem_type;
return 0;
@@ -724,7 +801,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
return result;
}
-
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
bool disallow_fixed,
uint32_t mem_type,
@@ -757,66 +833,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
* space.
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_type_manager *man;
-
- uint32_t num_prios = bdev->driver->num_mem_type_prio;
- const uint32_t *prios = bdev->driver->mem_type_prio;
- uint32_t i;
uint32_t mem_type = TTM_PL_SYSTEM;
uint32_t cur_flags = 0;
bool type_found = false;
bool type_ok = false;
- bool has_eagain = false;
+ bool has_erestartsys = false;
struct drm_mm_node *node = NULL;
- int ret;
+ int i, ret;
mem->mm_node = NULL;
- for (i = 0; i < num_prios; ++i) {
- mem_type = prios[i];
+ for (i = 0; i <= placement->num_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
- mem_type, proposed_placement,
- &cur_flags);
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ placement->placement[i],
+ &cur_flags);
if (!type_ok)
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM)
break;
if (man->has_type && man->use_type) {
type_found = true;
- do {
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret))
- return ret;
-
- spin_lock(&glob->lru_lock);
- node = drm_mm_search_free(&man->manager,
- mem->num_pages,
- mem->page_alignment,
- 1);
- if (unlikely(!node)) {
- spin_unlock(&glob->lru_lock);
- break;
- }
- node = drm_mm_get_block_atomic(node,
- mem->num_pages,
- mem->
- page_alignment);
- spin_unlock(&glob->lru_lock);
- } while (!node);
+ ret = ttm_bo_man_get_node(bo, man, placement, mem,
+ &node);
+ if (unlikely(ret))
+ return ret;
}
if (node)
break;
@@ -826,67 +891,65 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = node;
mem->mem_type = mem_type;
mem->placement = cur_flags;
+ if (node)
+ node->private = bo;
return 0;
}
if (!type_found)
return -EINVAL;
- num_prios = bdev->driver->num_mem_busy_prio;
- prios = bdev->driver->mem_busy_prio;
-
- for (i = 0; i < num_prios; ++i) {
- mem_type = prios[i];
+ for (i = 0; i <= placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
man = &bdev->man[mem_type];
-
if (!man->has_type)
continue;
-
if (!ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
- mem_type,
- proposed_placement, &cur_flags))
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ placement->placement[i],
+ &cur_flags))
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
- ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
- interruptible, no_wait);
-
+ ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+ interruptible, no_wait);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
+ mem->mm_node->private = bo;
return 0;
}
-
- if (ret == -ERESTART)
- has_eagain = true;
+ if (ret == -ERESTARTSYS)
+ has_erestartsys = true;
}
-
- ret = (has_eagain) ? -ERESTART : -ENOMEM;
+ ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
{
- int ret = 0;
-
if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
return -EBUSY;
- ret = wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->cpu_writers) == 0);
-
- if (ret == -ERESTARTSYS)
- ret = -ERESTART;
-
- return ret;
+ return wait_event_interruptible(bo->event_queue,
+ atomic_read(&bo->cpu_writers) == 0);
}
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@@ -899,101 +962,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
-
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock);
-
if (ret)
return ret;
-
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
-
/*
* Determine where to move the buffer.
*/
-
- ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
- interruptible, no_wait);
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
if (ret)
goto out_unlock;
-
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
-
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
+ mem.mm_node->private = NULL;
drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock);
}
return ret;
}
-static int ttm_bo_mem_compat(uint32_t proposed_placement,
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
- if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
- return 0;
- if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
- return 0;
-
- return 1;
+ int i;
+
+ for (i = 0; i < placement->num_placement; i++) {
+ if ((placement->placement[i] & mem->placement &
+ TTM_PL_MASK_CACHING) &&
+ (placement->placement[i] & mem->placement &
+ TTM_PL_MASK_MEM))
+ return i;
+ }
+ return -1;
}
int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait)
{
int ret;
BUG_ON(!atomic_read(&bo->reserved));
- bo->proposed_placement = proposed_placement;
-
- TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
- (unsigned long)proposed_placement,
- (unsigned long)bo->mem.placement);
-
+ /* Check that range is valid */
+ if (placement->lpfn || placement->fpfn)
+ if (placement->fpfn > placement->lpfn ||
+ (placement->lpfn - placement->fpfn) < bo->num_pages)
+ return -EINVAL;
/*
* Check whether we need to move buffer.
*/
-
- if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
- ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
- interruptible, no_wait);
- if (ret) {
- if (ret != -ERESTART)
- printk(KERN_ERR TTM_PFX
- "Failed moving buffer. "
- "Proposed placement 0x%08x\n",
- bo->proposed_placement);
- if (ret == -ENOMEM)
- printk(KERN_ERR TTM_PFX
- "Out of aperture space or "
- "DRM memory quota.\n");
+ ret = ttm_bo_mem_compat(placement, &bo->mem);
+ if (ret < 0) {
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+ if (ret)
return ret;
- }
+ } else {
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the compatible memory placement flags to the active flags
+ */
+ ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+ ~TTM_PL_MASK_MEMTYPE);
}
-
/*
* We might need to add a TTM.
*/
-
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ret = ttm_bo_add_ttm(bo, true);
if (ret)
return ret;
}
- /*
- * Validation has succeeded, move the access and other
- * non-mapping-related flag bits from the proposed flags to
- * the active flags
- */
-
- ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
- ~TTM_PL_MASK_MEMTYPE);
-
return 0;
}
EXPORT_SYMBOL(ttm_buffer_object_validate);
@@ -1041,8 +1085,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
size_t acc_size,
void (*destroy) (struct ttm_buffer_object *))
{
- int ret = 0;
+ int i, c, ret = 0;
unsigned long num_pages;
+ uint32_t placements[8];
+ struct ttm_placement placement;
size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1099,7 +1145,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
+ if (flags & (1 << i))
+ placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
+ placement.placement = placements;
+ placement.num_placement = c;
+ placement.busy_placement = placements;
+ placement.num_busy_placement = c;
+ ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
if (ret)
goto out_err;
@@ -1134,8 +1189,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
- int ret;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ int ret;
size_t acc_size =
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1160,66 +1215,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
return ret;
}
-static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
- uint32_t mem_type, bool allow_errors)
-{
- int ret;
-
- spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
-
- if (ret && allow_errors)
- goto out;
-
- if (bo->mem.mem_type == mem_type)
- ret = ttm_bo_evict(bo, mem_type, false, false);
-
- if (ret) {
- if (allow_errors) {
- goto out;
- } else {
- ret = 0;
- printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
- }
- }
-
-out:
- return ret;
-}
-
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- struct list_head *head,
- unsigned mem_type, bool allow_errors)
+ unsigned mem_type, bool allow_errors)
{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry;
int ret;
- int put_count;
/*
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&glob->lru_lock);
-
- while (!list_empty(head)) {
- entry = list_first_entry(head, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
- ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
- put_count = ttm_bo_del_from_lru(entry);
+ while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
- BUG_ON(ret);
- ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
- ttm_bo_unreserve(entry);
- kref_put(&entry->list_kref, ttm_bo_release_list);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+ } else {
+ printk(KERN_ERR TTM_PFX
+ "Cleanup eviction failed\n");
+ }
+ }
spin_lock(&glob->lru_lock);
}
-
spin_unlock(&glob->lru_lock);
-
return 0;
}
@@ -1246,7 +1267,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = 0;
if (mem_type > 0) {
- ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+ ttm_bo_force_list_clean(bdev, mem_type, false);
spin_lock(&glob->lru_lock);
if (drm_mm_clean(&man->manager))
@@ -1279,12 +1300,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
return 0;
}
- return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+ return ttm_bo_force_list_clean(bdev, mem_type, true);
}
EXPORT_SYMBOL(ttm_bo_evict_mm);
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_offset, unsigned long p_size)
+ unsigned long p_size)
{
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
@@ -1314,7 +1335,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
type);
return ret;
}
- ret = drm_mm_init(&man->manager, p_offset, p_size);
+ ret = drm_mm_init(&man->manager, 0, p_size);
if (ret)
return ret;
}
@@ -1463,7 +1484,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
*/
- ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
if (unlikely(ret != 0))
goto out_no_sys;
@@ -1693,7 +1714,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
ret = wait_event_interruptible
(bo->event_queue, atomic_read(&bo->reserved) == 0);
if (unlikely(ret != 0))
- return -ERESTART;
+ return ret;
} else {
wait_event(bo->event_queue,
atomic_read(&bo->reserved) == 0);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 61c5572d2b9..2ecf7d0c64f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
#endif
return tmp;
}
+EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long bus_base,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1c040d04033..609a85a4d85 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = ttm_bo_wait(bo, false, true, false);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
- retval = (ret != -ERESTART) ?
+ retval = (ret != -ERESTARTSYS) ?
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock;
}
@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
switch (ret) {
case 0:
break;
- case -ERESTART:
- ret = -EINTR;
- goto out_unref;
case -EBUSY:
ret = -EAGAIN;
goto out_unref;
@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
switch (ret) {
case 0:
break;
- case -ERESTART:
- return -EINTR;
case -EBUSY:
return -EAGAIN;
default:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
new file mode 100644
index 00000000000..c285c2902d1
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_execbuf_util.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ entry->reserved = false;
+ ttm_bo_unreserve(bo);
+ }
+}
+EXPORT_SYMBOL(ttm_eu_backoff_reservation);
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+{
+ struct ttm_validate_buffer *entry;
+ int ret;
+
+retry:
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ entry->reserved = false;
+ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
+ if (ret != 0) {
+ ttm_eu_backoff_reservation(list);
+ if (ret == -EAGAIN) {
+ ret = ttm_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ } else
+ return ret;
+ }
+
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ttm_eu_backoff_reservation(list);
+ ret = ttm_bo_wait_cpu(bo, false);
+ if (ret)
+ return ret;
+ goto retry;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_eu_reserve_buffers);
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ struct ttm_bo_driver *driver = bo->bdev->driver;
+ void *old_sync_obj;
+
+ spin_lock(&bo->lock);
+ old_sync_obj = bo->sync_obj;
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ bo->sync_obj_arg = entry->new_sync_obj_arg;
+ spin_unlock(&bo->lock);
+ ttm_bo_unreserve(bo);
+ entry->reserved = false;
+ if (old_sync_obj)
+ driver->sync_obj_unref(&old_sync_obj);
+ }
+}
+EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
new file mode 100644
index 00000000000..f619ebcaa4e
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -0,0 +1,311 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_lock.h"
+#include "ttm/ttm_module.h"
+#include <asm/atomic.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#define TTM_WRITE_LOCK_PENDING (1 << 0)
+#define TTM_VT_LOCK_PENDING (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
+#define TTM_VT_LOCK (1 << 3)
+#define TTM_SUSPEND_LOCK (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+ spin_lock_init(&lock->lock);
+ init_waitqueue_head(&lock->queue);
+ lock->rw = 0;
+ lock->flags = 0;
+ lock->kill_takers = false;
+ lock->signal = SIGKILL;
+}
+EXPORT_SYMBOL(ttm_lock_init);
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ if (--lock->rw == 0)
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_read_unlock);
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ locked = true;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible)
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_read_lock(lock));
+ else
+ wait_event(lock->queue, __ttm_read_lock(lock));
+ return ret;
+}
+EXPORT_SYMBOL(ttm_read_lock);
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+ bool block = true;
+
+ *locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ block = false;
+ *locked = true;
+ } else if (lock->flags == 0) {
+ block = false;
+ }
+ spin_unlock(&lock->lock);
+
+ return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+ bool locked;
+
+ if (interruptible)
+ ret = wait_event_interruptible
+ (lock->queue, __ttm_read_trylock(lock, &locked));
+ else
+ wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
+
+ if (unlikely(ret != 0)) {
+ BUG_ON(locked);
+ return ret;
+ }
+
+ return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->rw = 0;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_write_unlock);
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+ lock->rw = -1;
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ locked = true;
+ } else {
+ lock->flags |= TTM_WRITE_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_write_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ }
+ } else
+ wait_event(lock->queue, __ttm_read_lock(lock));
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_write_lock);
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->rw = 1;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+ int ret = 0;
+
+ spin_lock(&lock->lock);
+ if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+ ret = -EINVAL;
+ lock->flags &= ~TTM_VT_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ printk(KERN_INFO TTM_PFX "vt unlock.\n");
+
+ return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+ int ret;
+
+ *p_base = NULL;
+ ret = __ttm_vt_unlock(lock);
+ BUG_ON(ret != 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ lock->flags |= TTM_VT_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_VT_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+ bool interruptible,
+ struct ttm_object_file *tfile)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_vt_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ return ret;
+ }
+ } else
+ wait_event(lock->queue, __ttm_vt_lock(lock));
+
+ /*
+ * Add a base-object, the destructor of which will
+ * make sure the lock is released if the client dies
+ * while holding it.
+ */
+
+ ret = ttm_base_object_init(tfile, &lock->base, false,
+ ttm_lock_type, &ttm_vt_lock_remove, NULL);
+ if (ret)
+ (void)__ttm_vt_unlock(lock);
+ else {
+ lock->vt_holder = tfile;
+ printk(KERN_INFO TTM_PFX "vt lock.\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_vt_lock);
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+ return ttm_ref_object_base_unref(lock->vt_holder,
+ lock->base.hash.key, TTM_REF_USAGE);
+}
+EXPORT_SYMBOL(ttm_vt_unlock);
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_SUSPEND_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+ lock->flags |= TTM_SUSPEND_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+ wait_event(lock->queue, __ttm_suspend_lock(lock));
+}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 072c281a6bb..f5245c02b8f 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+ struct ttm_mem_zone *zone;
uint64_t mem;
int ret;
- if (unlikely(!zone))
- return -ENOMEM;
-
if (si->totalhigh == 0)
return 0;
+ zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+ if (unlikely(!zone))
+ return -ENOMEM;
+
mem = si->totalram;
mem *= si->mem_unit;
@@ -322,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
* No special dma32 zone needed.
*/
- if (mem <= ((uint64_t) 1ULL << 32))
+ if (mem <= ((uint64_t) 1ULL << 32)) {
+ kfree(zone);
return 0;
+ }
/*
* Limit max dma32 memory to 4GB for now
@@ -460,6 +463,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
{
return ttm_mem_global_free_zone(glob, NULL, amount);
}
+EXPORT_SYMBOL(ttm_mem_global_free);
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
@@ -533,6 +537,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
interruptible);
}
+EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
@@ -588,3 +593,4 @@ size_t ttm_round_pot(size_t size)
}
return 0;
}
+EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
new file mode 100644
index 00000000000..1099abac824
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -0,0 +1,452 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_module.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+struct ttm_object_file {
+ struct ttm_object_device *tdev;
+ rwlock_t lock;
+ struct list_head ref_list;
+ struct drm_open_hash ref_hash[TTM_REF_NUM];
+ struct kref refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+ rwlock_t object_lock;
+ struct drm_open_hash object_hash;
+ atomic_t object_count;
+ struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+ struct drm_hash_item hash;
+ struct list_head head;
+ struct kref kref;
+ struct ttm_base_object *obj;
+ enum ttm_ref_type ref_type;
+ struct ttm_object_file *tfile;
+};
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+ kref_get(&tfile->refcount);
+ return tfile;
+}
+
+static void ttm_object_file_destroy(struct kref *kref)
+{
+ struct ttm_object_file *tfile =
+ container_of(kref, struct ttm_object_file, refcount);
+
+ kfree(tfile);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ kref_put(&tfile->refcount, ttm_object_file_destroy);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type object_type,
+ void (*refcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ int ret;
+
+ base->shareable = shareable;
+ base->tfile = ttm_object_file_ref(tfile);
+ base->refcount_release = refcount_release;
+ base->ref_obj_release = ref_obj_release;
+ base->object_type = object_type;
+ write_lock(&tdev->object_lock);
+ kref_init(&base->refcount);
+ ret = drm_ht_just_insert_please(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
+ write_unlock(&tdev->object_lock);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+out_err1:
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+out_err0:
+ return ret;
+}
+EXPORT_SYMBOL(ttm_base_object_init);
+
+static void ttm_release_base(struct kref *kref)
+{
+ struct ttm_base_object *base =
+ container_of(kref, struct ttm_base_object, refcount);
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ write_unlock(&tdev->object_lock);
+ if (base->refcount_release) {
+ ttm_object_file_unref(&base->tfile);
+ base->refcount_release(&base);
+ }
+ write_lock(&tdev->object_lock);
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ *p_base = NULL;
+
+ /*
+ * Need to take the lock here to avoid racing with
+ * users trying to look up the object.
+ */
+
+ write_lock(&tdev->object_lock);
+ (void)kref_put(&base->refcount, &ttm_release_base);
+ write_unlock(&tdev->object_lock);
+}
+EXPORT_SYMBOL(ttm_base_object_unref);
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+ uint32_t key)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct drm_hash_item *hash;
+ int ret;
+
+ read_lock(&tdev->object_lock);
+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+
+ if (likely(ret == 0)) {
+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
+ kref_get(&base->refcount);
+ }
+ read_unlock(&tdev->object_lock);
+
+ if (unlikely(ret != 0))
+ return NULL;
+
+ if (tfile != base->tfile && !base->shareable) {
+ printk(KERN_ERR TTM_PFX
+ "Attempted access of non-shareable object.\n");
+ ttm_base_object_unref(&base);
+ return NULL;
+ }
+
+ return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ int ret = -EINVAL;
+
+ if (existed != NULL)
+ *existed = true;
+
+ while (ret == -EINVAL) {
+ read_lock(&tfile->lock);
+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
+
+ if (ret == 0) {
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ kref_get(&ref->kref);
+ read_unlock(&tfile->lock);
+ break;
+ }
+
+ read_unlock(&tfile->lock);
+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+ if (unlikely(ref == NULL)) {
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ return -ENOMEM;
+ }
+
+ ref->hash.key = base->hash.key;
+ ref->obj = base;
+ ref->tfile = tfile;
+ ref->ref_type = ref_type;
+ kref_init(&ref->kref);
+
+ write_lock(&tfile->lock);
+ ret = drm_ht_insert_item(ht, &ref->hash);
+
+ if (likely(ret == 0)) {
+ list_add_tail(&ref->head, &tfile->ref_list);
+ kref_get(&base->refcount);
+ write_unlock(&tfile->lock);
+ if (existed != NULL)
+ *existed = false;
+ break;
+ }
+
+ write_unlock(&tfile->lock);
+ BUG_ON(ret != -EINVAL);
+
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ kfree(ref);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_ref_object_add);
+
+static void ttm_ref_object_release(struct kref *kref)
+{
+ struct ttm_ref_object *ref =
+ container_of(kref, struct ttm_ref_object, kref);
+ struct ttm_base_object *base = ref->obj;
+ struct ttm_object_file *tfile = ref->tfile;
+ struct drm_open_hash *ht;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+ ht = &tfile->ref_hash[ref->ref_type];
+ (void)drm_ht_remove_item(ht, &ref->hash);
+ list_del(&ref->head);
+ write_unlock(&tfile->lock);
+
+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+ base->ref_obj_release(base, ref->ref_type);
+
+ ttm_base_object_unref(&ref->obj);
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ kfree(ref);
+ write_lock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key, enum ttm_ref_type ref_type)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ int ret;
+
+ write_lock(&tfile->lock);
+ ret = drm_ht_find_item(ht, key, &hash);
+ if (unlikely(ret != 0)) {
+ write_unlock(&tfile->lock);
+ return -EINVAL;
+ }
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ kref_put(&ref->kref, ttm_ref_object_release);
+ write_unlock(&tfile->lock);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_ref_object_base_unref);
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+ struct ttm_ref_object *ref;
+ struct list_head *list;
+ unsigned int i;
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ write_lock(&tfile->lock);
+
+ /*
+ * Since we release the lock within the loop, we have to
+ * restart it from the beginning each time.
+ */
+
+ while (!list_empty(&tfile->ref_list)) {
+ list = tfile->ref_list.next;
+ ref = list_entry(list, struct ttm_ref_object, head);
+ ttm_ref_object_release(&ref->kref);
+ }
+
+ for (i = 0; i < TTM_REF_NUM; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ write_unlock(&tfile->lock);
+ ttm_object_file_unref(&tfile);
+}
+EXPORT_SYMBOL(ttm_object_file_release);
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+ unsigned int hash_order)
+{
+ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+ unsigned int i;
+ unsigned int j = 0;
+ int ret;
+
+ if (unlikely(tfile == NULL))
+ return NULL;
+
+ rwlock_init(&tfile->lock);
+ tfile->tdev = tdev;
+ kref_init(&tfile->refcount);
+ INIT_LIST_HEAD(&tfile->ref_list);
+
+ for (i = 0; i < TTM_REF_NUM; ++i) {
+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+ if (ret) {
+ j = i;
+ goto out_err;
+ }
+ }
+
+ return tfile;
+out_err:
+ for (i = 0; i < j; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ kfree(tfile);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_object_file_init);
+
+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
+ *mem_glob,
+ unsigned int hash_order)
+{
+ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(tdev == NULL))
+ return NULL;
+
+ tdev->mem_glob = mem_glob;
+ rwlock_init(&tdev->object_lock);
+ atomic_set(&tdev->object_count, 0);
+ ret = drm_ht_create(&tdev->object_hash, hash_order);
+
+ if (likely(ret == 0))
+ return tdev;
+
+ kfree(tdev);
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_object_device_init);
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+ struct ttm_object_device *tdev = *p_tdev;
+
+ *p_tdev = NULL;
+
+ write_lock(&tdev->object_lock);
+ drm_ht_remove(&tdev->object_hash);
+ write_unlock(&tdev->object_lock);
+
+ kfree(tdev);
+}
+EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7bcb89f39ce..9c2b1cc5dba 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
ttm->state = tt_unbound;
return 0;
}
+EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 7cb50bdde46..e3f46e0cb7d 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -36,17 +36,27 @@
#ifndef _DRM_H_
#define _DRM_H_
+#if defined(__linux__)
+
#include <linux/types.h>
-#include <asm/ioctl.h> /* For _IO* macros */
-#define DRM_IOCTL_NR(n) _IOC_NR(n)
-#define DRM_IOC_VOID _IOC_NONE
-#define DRM_IOC_READ _IOC_READ
-#define DRM_IOC_WRITE _IOC_WRITE
-#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
-#define DRM_MAJOR 226
-#define DRM_MAX_MINOR 15
+#else /* One of the BSDs */
+
+#include <sys/ioccom.h>
+#include <sys/types.h>
+typedef int8_t __s8;
+typedef uint8_t __u8;
+typedef int16_t __s16;
+typedef uint16_t __u16;
+typedef int32_t __s32;
+typedef uint32_t __u32;
+typedef int64_t __s64;
+typedef uint64_t __u64;
+typedef unsigned long drm_handle_t;
+
+#endif
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
@@ -59,7 +69,6 @@
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
-typedef unsigned int drm_handle_t;
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
@@ -454,6 +463,7 @@ struct drm_irq_busid {
enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
@@ -461,8 +471,8 @@ enum drm_vblank_seq_type {
};
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
-#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
- _DRM_VBLANK_NEXTONMISS)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
+ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
struct drm_wait_vblank_request {
enum drm_vblank_seq_type type;
@@ -686,6 +696,8 @@ struct drm_gem_open {
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
+#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
/**
* Device specific ioctls should only be in their respective headers
@@ -698,6 +710,35 @@ struct drm_gem_open {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
+/**
+ * Header for events written back to userspace on the drm fd. The
+ * type defines the type of event, the length specifies the total
+ * length of the event (including the header), and user_data is
+ * typically a 64 bit value passed with the ioctl that triggered the
+ * event. A read on the drm fd will always only return complete
+ * events, that is, if for example the read buffer is 100 bytes, and
+ * there are two 64 byte events pending, only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
+ * up are chipset specific.
+ */
+struct drm_event {
+ __u32 type;
+ __u32 length;
+};
+
+#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+
+struct drm_event_vblank {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+ __u32 sequence;
+ __u32 reserved;
+};
+
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index c8e64bbadbc..19ef8ebdc66 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -245,16 +245,6 @@ extern void drm_ut_debug_printk(unsigned int request_level,
#endif
-#define DRM_PROC_LIMIT (PAGE_SIZE-80)
-
-#define DRM_PROC_PRINT(fmt, arg...) \
- len += sprintf(&buf[len], fmt , ##arg); \
- if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
-
-#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
- len += sprintf(&buf[len], fmt , ##arg); \
- if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
-
/*@}*/
/***********************************************************************/
@@ -265,19 +255,8 @@ extern void drm_ut_debug_printk(unsigned int request_level,
#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
-#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-/**
- * Get the private SAREA mapping.
- *
- * \param _dev DRM device.
- * \param _ctx context number.
- * \param _map output mapping.
- */
-#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
- (_map) = (_dev)->context_sareas[_ctx]; \
-} while(0)
/**
* Test that the hardware lock is held by the caller, returning otherwise.
@@ -297,18 +276,6 @@ do { \
} while (0)
/**
- * Copy and IOCTL return string to user space
- */
-#define DRM_COPY( name, value ) \
- len = strlen( value ); \
- if ( len > name##_len ) len = name##_len; \
- name##_len = strlen( value ); \
- if ( len && name ) { \
- if ( copy_to_user( name, value, len ) ) \
- return -EFAULT; \
- }
-
-/**
* Ioctl function type.
*
* \param inode device inode.
@@ -322,6 +289,9 @@ typedef int drm_ioctl_t(struct drm_device *dev, void *data,
typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_MAJOR 226
+
#define DRM_AUTH 0x1
#define DRM_MASTER 0x2
#define DRM_ROOT_ONLY 0x4
@@ -426,6 +396,14 @@ struct drm_buf_entry {
struct drm_freelist freelist;
};
+/* Event queued up for userspace to read */
+struct drm_pending_event {
+ struct drm_event *event;
+ struct list_head link;
+ struct drm_file *file_priv;
+ void (*destroy)(struct drm_pending_event *event);
+};
+
/** File private data */
struct drm_file {
int authenticated;
@@ -449,6 +427,10 @@ struct drm_file {
struct drm_master *master; /* master this node is currently associated with
N.B. not always minor->master */
struct list_head fbs;
+
+ wait_queue_head_t event_wait;
+ struct list_head event_list;
+ int event_space;
};
/** Wait queue */
@@ -795,6 +777,15 @@ struct drm_driver {
/* Master routines */
int (*master_create)(struct drm_device *dev, struct drm_master *master);
void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+ /**
+ * master_set is called whenever the minor master is set.
+ * master_drop is called whenever the minor master is dropped.
+ */
+
+ int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_open);
+ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_release);
int (*proc_init)(struct drm_minor *minor);
void (*proc_cleanup)(struct drm_minor *minor);
@@ -900,6 +891,12 @@ struct drm_minor {
struct drm_mode_group mode_group;
};
+struct drm_pending_vblank_event {
+ struct drm_pending_event base;
+ int pipe;
+ struct drm_event_vblank event;
+};
+
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
@@ -999,6 +996,12 @@ struct drm_device {
u32 max_vblank_count; /**< size of vblank counter register */
+ /**
+ * List of events
+ */
+ struct list_head vblank_event_list;
+ spinlock_t event_lock;
+
/*@} */
cycles_t ctx_start;
cycles_t lck_start;
@@ -1135,6 +1138,8 @@ extern int drm_lastclose(struct drm_device *dev);
extern int drm_open(struct inode *inode, struct file *filp);
extern int drm_stub_open(struct inode *inode, struct file *filp);
extern int drm_fasync(int fd, struct file *filp, int on);
+extern ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset);
extern int drm_release(struct inode *inode, struct file *filp);
/* Mapping support (drm_vm.h) */
@@ -1295,6 +1300,7 @@ extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern void drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
@@ -1519,14 +1525,27 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
{
+ if (size != 0 && nmemb > ULONG_MAX / size)
+ return NULL;
+
if (size * nmemb <= PAGE_SIZE)
return kcalloc(nmemb, size, GFP_KERNEL);
+ return __vmalloc(size * nmemb,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+}
+
+/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
+static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+{
if (size != 0 && nmemb > ULONG_MAX / size)
return NULL;
+ if (size * nmemb <= PAGE_SIZE)
+ return kmalloc(nmemb * size, GFP_KERNEL);
+
return __vmalloc(size * nmemb,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+ GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
static __inline void drm_free_large(void *ptr)
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b69347b8904..fdf43abc36d 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -123,7 +123,7 @@ struct drm_display_mode {
int type;
/* Proposed mode values */
- int clock;
+ int clock; /* in kHz */
int hdisplay;
int hsync_start;
int hsync_end;
@@ -164,8 +164,8 @@ struct drm_display_mode {
int *private;
int private_flags;
- int vrefresh;
- float hsync;
+ int vrefresh; /* in Hz */
+ int hsync; /* in kHz */
};
enum drm_connector_status {
@@ -242,6 +242,21 @@ struct drm_framebuffer_funcs {
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle);
+ /**
+ * Optinal callback for the dirty fb ioctl.
+ *
+ * Userspace can notify the driver via this callback
+ * that a area of the framebuffer has changed and should
+ * be flushed to the display hardware.
+ *
+ * See documentation in drm_mode.h for the struct
+ * drm_mode_fb_dirty_cmd for more information as all
+ * the semantics and arguments have a one to one mapping
+ * on this function.
+ */
+ int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips);
};
struct drm_framebuffer {
@@ -256,7 +271,7 @@ struct drm_framebuffer {
unsigned int depth;
int bits_per_pixel;
int flags;
- void *fbdev;
+ struct fb_info *fbdev;
u32 pseudo_palette[17];
struct list_head filp_head;
/* if you are using the helper */
@@ -290,6 +305,7 @@ struct drm_property {
struct drm_crtc;
struct drm_connector;
struct drm_encoder;
+struct drm_pending_vblank_event;
/**
* drm_crtc_funcs - control CRTCs for a given device
@@ -333,6 +349,19 @@ struct drm_crtc_funcs {
void (*destroy)(struct drm_crtc *crtc);
int (*set_config)(struct drm_mode_set *set);
+
+ /*
+ * Flip to the given framebuffer. This implements the page
+ * flip ioctl descibed in drm_mode.h, specifically, the
+ * implementation must return immediately and block all
+ * rendering to the current fb until the flip has completed.
+ * If userspace set the event flag in the ioctl, the event
+ * argument will point to an event to send back when the flip
+ * completes, otherwise it will be NULL.
+ */
+ int (*page_flip)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
};
/**
@@ -596,6 +625,7 @@ struct drm_mode_config {
/* Optional properties */
struct drm_property *scaling_mode_property;
struct drm_property *dithering_mode_property;
+ struct drm_property *dirty_info_property;
};
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -667,6 +697,7 @@ extern void drm_mode_validate_size(struct drm_device *dev,
extern void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
extern void drm_mode_sort(struct list_head *mode_list);
+extern int drm_mode_hsync(struct drm_display_mode *mode);
extern int drm_mode_vrefresh(struct drm_display_mode *mode);
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
@@ -703,6 +734,7 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats
char *formats[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_dithering_property(struct drm_device *dev);
+extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern char *drm_get_encoder_name(struct drm_encoder *encoder);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
@@ -711,7 +743,8 @@ extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
-extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type);
+extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
/* IOCTLs */
extern int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@@ -730,6 +763,8 @@ extern int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
extern int drm_mode_addmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
@@ -756,6 +791,8 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern bool drm_detect_hdmi_monitor(struct edid *edid);
+extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool reduced, bool interlaced, bool margins);
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/include/drm/drm_dp_helper.h
index 2b38054d3b6..a49e791db0b 100644
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ b/include/drm/drm_dp_helper.h
@@ -20,8 +20,8 @@
* OF THIS SOFTWARE.
*/
-#ifndef _INTEL_DP_H_
-#define _INTEL_DP_H_
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
/* From the VESA DisplayPort spec */
@@ -43,16 +43,41 @@
#define AUX_I2C_REPLY_MASK (0x3 << 6)
/* AUX CH addresses */
-#define DP_LINK_BW_SET 0x100
+/* DPCD */
+#define DP_DPCD_REV 0x000
+
+#define DP_MAX_LINK_RATE 0x001
+
+#define DP_MAX_LANE_COUNT 0x002
+# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_ENHANCED_FRAME_CAP (1 << 7)
+
+#define DP_MAX_DOWNSPREAD 0x003
+# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
+
+#define DP_NORP 0x004
+
+#define DP_DOWNSTREAMPORT_PRESENT 0x005
+# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
+# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
+/* 00b = DisplayPort */
+/* 01b = Analog */
+/* 10b = TMDS or HDMI */
+/* 11b = Other */
+# define DP_FORMAT_CONVERSION (1 << 3)
+
+#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+
+/* link configuration */
+#define DP_LINK_BW_SET 0x100
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
-#define DP_LANE_COUNT_SET 0x101
+#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
-#define DP_TRAINING_PATTERN_SET 0x102
-
+#define DP_TRAINING_PATTERN_SET 0x102
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
@@ -102,11 +127,14 @@
#define DP_LANE0_1_STATUS 0x202
#define DP_LANE2_3_STATUS 0x203
-
# define DP_LANE_CR_DONE (1 << 0)
# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
+ DP_LANE_CHANNEL_EQ_DONE | \
+ DP_LANE_SYMBOL_LOCKED)
+
#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
#define DP_INTERLANE_ALIGN_DONE (1 << 0)
@@ -120,25 +148,33 @@
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
-
-#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
-#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
-#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
-#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
-#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
-#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
-#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
-#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+#define DP_SET_POWER 0x600
+# define DP_SET_POWER_D0 0x1
+# define DP_SET_POWER_D3 0x2
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
struct i2c_algo_dp_aux_data {
bool running;
u16 address;
int (*aux_ch) (struct i2c_adapter *adapter,
- uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_bytes);
+ int mode, uint8_t write_byte,
+ uint8_t *read_byte);
};
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
-#endif /* _INTEL_DP_H_ */
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 7d6c9a2dfcb..d33c3e03860 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -106,6 +106,10 @@ struct detailed_data_color_point {
u8 wpindex2[3];
} __attribute__((packed));
+struct cvt_timing {
+ u8 code[3];
+} __attribute__((packed));
+
struct detailed_non_pixel {
u8 pad1;
u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
@@ -117,9 +121,13 @@ struct detailed_non_pixel {
struct detailed_data_monitor_range range;
struct detailed_data_wpindex color;
struct std_timing timings[5];
+ struct cvt_timing cvt[4];
} data;
} __attribute__((packed));
+#define EDID_DETAIL_EST_TIMINGS 0xf7
+#define EDID_DETAIL_CVT_3BYTE 0xf8
+#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
#define EDID_DETAIL_STD_MODES 0xfa
#define EDID_DETAIL_MONITOR_CPDATA 0xfb
#define EDID_DETAIL_MONITOR_NAME 0xfc
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 62329f9a42c..4c10be39a43 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
int atomic);
+extern struct drm_mm_node *drm_mm_get_block_range_generic(
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic);
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
@@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa
{
return drm_mm_get_block_generic(parent, size, alignment, 1);
}
+static inline struct drm_mm_node *drm_mm_get_block_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment,
+ start, end, 0);
+}
+static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment,
+ start, end, 1);
+}
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
int best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
@@ -99,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
return block->mm;
}
+extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
#ifdef CONFIG_DEBUG_FS
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
#endif
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 1f908416aed..43009bc2e75 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -27,9 +27,6 @@
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
-#include <linux/kernel.h>
-#include <linux/types.h>
-
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
@@ -78,6 +75,11 @@
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
+/* Dirty info options */
+#define DRM_MODE_DIRTY_OFF 0
+#define DRM_MODE_DIRTY_ON 1
+#define DRM_MODE_DIRTY_ANNOTATE 2
+
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
@@ -225,6 +227,45 @@ struct drm_mode_fb_cmd {
__u32 handle;
};
+#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
+#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
+#define DRM_MODE_FB_DIRTY_FLAGS 0x03
+
+/*
+ * Mark a region of a framebuffer as dirty.
+ *
+ * Some hardware does not automatically update display contents
+ * as a hardware or software draw to a framebuffer. This ioctl
+ * allows userspace to tell the kernel and the hardware what
+ * regions of the framebuffer have changed.
+ *
+ * The kernel or hardware is free to update more then just the
+ * region specified by the clip rects. The kernel or hardware
+ * may also delay and/or coalesce several calls to dirty into a
+ * single update.
+ *
+ * Userspace may annotate the updates, the annotates are a
+ * promise made by the caller that the change is either a copy
+ * of pixels or a fill of a single color in the region specified.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
+ * the number of updated regions are half of num_clips given,
+ * where the clip rects are paired in src and dst. The width and
+ * height of each one of the pairs must match.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
+ * promises that the region specified of the clip rects is filled
+ * completely with a single color as given in the color argument.
+ */
+
+struct drm_mode_fb_dirty_cmd {
+ __u32 fb_id;
+ __u32 flags;
+ __u32 color;
+ __u32 num_clips;
+ __u64 clips_ptr;
+};
+
struct drm_mode_mode_cmd {
__u32 connector_id;
struct drm_mode_modeinfo mode;
@@ -268,4 +309,37 @@ struct drm_mode_crtc_lut {
__u64 blue;
};
+#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * This ioctl will ask KMS to schedule a page flip for the specified
+ * crtc. Once any pending rendering targeting the specified fb (as of
+ * ioctl time) has completed, the crtc will be reprogrammed to display
+ * that fb after the next vertical refresh. The ioctl returns
+ * immediately, but subsequent rendering to the current fb will block
+ * in the execbuffer ioctl until the page flip happens. If a page
+ * flip is already pending as the ioctl is called, EBUSY will be
+ * returned.
+ *
+ * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
+ * request that drm sends back a vblank event (see drm.h: struct
+ * drm_event_vblank) when the page flip is done. The user_data field
+ * passed in with this ioctl will be returned as the user_data field
+ * in the vblank event struct.
+ *
+ * The reserved field must be zero until we figure out something
+ * clever to use it for.
+ */
+
+struct drm_mode_crtc_page_flip {
+ __u32 crtc_id;
+ __u32 fb_id;
+ __u32 flags;
+ __u32 reserved;
+ __u64 user_data;
+};
+
#endif
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 26641e95e0a..393369147a2 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -123,5 +123,5 @@ do { \
remove_wait_queue(&(queue), &entry); \
} while (0)
-#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
+#define DRM_WAKEUP( queue ) wake_up( queue )
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 7e0cb1da92e..ec3f5e80a5d 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -27,11 +27,11 @@
#ifndef _I915_DRM_H_
#define _I915_DRM_H_
+#include "drm.h"
+
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
-#include <linux/types.h>
-#include "drm.h"
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
@@ -186,6 +186,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
#define DRM_I915_GEM_MADVISE 0x26
+#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
+#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -221,8 +223,10 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
-#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -266,6 +270,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
+#define I915_PARAM_HAS_OVERLAY 7
+#define I915_PARAM_HAS_PAGEFLIPPING 8
typedef struct drm_i915_getparam {
int param;
@@ -686,4 +692,70 @@ struct drm_i915_gem_madvise {
__u32 retained;
};
+/* flags */
+#define I915_OVERLAY_TYPE_MASK 0xff
+#define I915_OVERLAY_YUV_PLANAR 0x01
+#define I915_OVERLAY_YUV_PACKED 0x02
+#define I915_OVERLAY_RGB 0x03
+
+#define I915_OVERLAY_DEPTH_MASK 0xff00
+#define I915_OVERLAY_RGB24 0x1000
+#define I915_OVERLAY_RGB16 0x2000
+#define I915_OVERLAY_RGB15 0x3000
+#define I915_OVERLAY_YUV422 0x0100
+#define I915_OVERLAY_YUV411 0x0200
+#define I915_OVERLAY_YUV420 0x0300
+#define I915_OVERLAY_YUV410 0x0400
+
+#define I915_OVERLAY_SWAP_MASK 0xff0000
+#define I915_OVERLAY_NO_SWAP 0x000000
+#define I915_OVERLAY_UV_SWAP 0x010000
+#define I915_OVERLAY_Y_SWAP 0x020000
+#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
+
+#define I915_OVERLAY_FLAGS_MASK 0xff000000
+#define I915_OVERLAY_ENABLE 0x01000000
+
+struct drm_intel_overlay_put_image {
+ /* various flags and src format description */
+ __u32 flags;
+ /* source picture description */
+ __u32 bo_handle;
+ /* stride values and offsets are in bytes, buffer relative */
+ __u16 stride_Y; /* stride for packed formats */
+ __u16 stride_UV;
+ __u32 offset_Y; /* offset for packet formats */
+ __u32 offset_U;
+ __u32 offset_V;
+ /* in pixels */
+ __u16 src_width;
+ __u16 src_height;
+ /* to compensate the scaling factors for partially covered surfaces */
+ __u16 src_scan_width;
+ __u16 src_scan_height;
+ /* output crtc description */
+ __u32 crtc_id;
+ __u16 dst_x;
+ __u16 dst_y;
+ __u16 dst_width;
+ __u16 dst_height;
+};
+
+/* flags */
+#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
+#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
+struct drm_intel_overlay_attrs {
+ __u32 flags;
+ __u32 color_key;
+ __s32 brightness;
+ __u32 contrast;
+ __u32 saturation;
+ __u32 gamma0;
+ __u32 gamma1;
+ __u32 gamma2;
+ __u32 gamma3;
+ __u32 gamma4;
+ __u32 gamma5;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
index 325fd6fb4a4..3ffbc4798af 100644
--- a/include/drm/mga_drm.h
+++ b/include/drm/mga_drm.h
@@ -35,7 +35,7 @@
#ifndef __MGA_DRM_H__
#define __MGA_DRM_H__
-#include <linux/types.h>
+#include "drm.h"
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mga_sarea.h)
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 3b9932ab175..39537f3cf98 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -33,7 +33,7 @@
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
-#include <linux/types.h>
+#include "drm.h"
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 49114617052..4fd498523ce 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,6 +44,29 @@ struct ttm_bo_device;
struct drm_mm_node;
+
+/**
+ * struct ttm_placement
+ *
+ * @fpfn: first valid page frame number to put the object
+ * @lpfn: last valid page frame number to put the object
+ * @num_placement: number of prefered placements
+ * @placement: prefered placements
+ * @num_busy_placement: number of prefered placements when need to evict buffer
+ * @busy_placement: prefered placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+ unsigned fpfn;
+ unsigned lpfn;
+ unsigned num_placement;
+ const uint32_t *placement;
+ unsigned num_busy_placement;
+ const uint32_t *busy_placement;
+};
+
+
/**
* struct ttm_mem_reg
*
@@ -109,10 +132,6 @@ struct ttm_tt;
* the object is destroyed.
* @event_queue: Queue for processes waiting on buffer object status change.
* @lock: spinlock protecting mostly synchronization members.
- * @proposed_placement: Proposed placement for the buffer. Changed only by the
- * creator prior to validation as opposed to bo->mem.proposed_flags which is
- * changed by the implementation prior to a buffer move if it wants to outsmart
- * the buffer creator / user. This latter happens, for example, at eviction.
* @mem: structure describing current placement.
* @persistant_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
@@ -177,7 +196,6 @@ struct ttm_buffer_object {
* Members protected by the bo::reserved lock.
*/
- uint32_t proposed_placement;
struct ttm_mem_reg mem;
struct file *persistant_swap_storage;
struct ttm_tt *ttm;
@@ -285,7 +303,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
* Note: It might be necessary to block validations before the
* wait by reserving the buffer.
* Returns -EBUSY if no_wait is true and the buffer is busy.
- * Returns -ERESTART if interrupted by a signal.
+ * Returns -ERESTARTSYS if interrupted by a signal.
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait);
@@ -293,21 +311,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
* ttm_buffer_object_validate
*
* @bo: The buffer object.
- * @proposed_placement: Proposed_placement for the buffer object.
+ * @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping.
* @no_wait: Return immediately if the buffer is busy.
*
* Changes placement and caching policy of the buffer object
- * according to bo::proposed_flags.
+ * according proposed placement.
* Returns
- * -EINVAL on invalid proposed_flags.
+ * -EINVAL on invalid proposed placement.
* -ENOMEM on out-of-memory condition.
* -EBUSY if no_wait is true and buffer busy.
- * -ERESTART if interrupted by a signal.
+ * -ERESTARTSYS if interrupted by a signal.
*/
extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait);
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait);
+
/**
* ttm_bo_unref
*
@@ -328,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
* waiting for buffer idle. This lock is recursive.
* Returns
* -EBUSY if the buffer is busy and no_wait is true.
- * -ERESTART if interrupted by a signal.
+ * -ERESTARTSYS if interrupted by a signal.
*/
extern int
@@ -371,7 +390,7 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags.
- * -ERESTART: Interrupted by signal while sleeping waiting for resources.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
@@ -411,7 +430,7 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags.
- * -ERESTART: Interrupted by signal while waiting for resources.
+ * -ERESTARTSYS: Interrupted by signal while waiting for resources.
*/
extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
@@ -445,7 +464,6 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
*
* @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type.
- * @p_offset: offset for managed area in pages.
* @p_size: size managed area in pages.
*
* Initialize a manager for a given memory type.
@@ -458,7 +476,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_offset, unsigned long p_size);
+ unsigned long p_size);
/**
* ttm_bo_clean_mm
*
@@ -503,7 +521,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
*
* Returns:
* -EINVAL: Invalid or uninitialized memory type.
- * -ERESTART: The call was interrupted by a signal while waiting to
+ * -ERESTARTSYS: The call was interrupted by a signal while waiting to
* evict a buffer.
*/
@@ -606,7 +624,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
* be called from the fops::read and fops::write method.
* Returns:
* See man (2) write, man(2) read. In particular,
- * the function may return -EINTR if
+ * the function may return -ERESTARTSYS if
* interrupted by a signal.
*/
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e8cd6d20aed..ff7664e0c3c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -242,12 +242,6 @@ struct ttm_mem_type_manager {
/**
* struct ttm_bo_driver
*
- * @mem_type_prio: Priority array of memory types to place a buffer object in
- * if it fits without evicting buffers from any of these memory types.
- * @mem_busy_prio: Priority array of memory types to place a buffer object in
- * if it needs to evict buffers to make room.
- * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
- * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
* @create_ttm_backend_entry: Callback to create a struct ttm_backend.
* @invalidate_caches: Callback to invalidate read caches when a buffer object
* has been evicted.
@@ -265,11 +259,6 @@ struct ttm_mem_type_manager {
*/
struct ttm_bo_driver {
- const uint32_t *mem_type_prio;
- const uint32_t *mem_busy_prio;
- uint32_t num_mem_type_prio;
- uint32_t num_mem_busy_prio;
-
/**
* struct ttm_bo_driver member create_ttm_backend_entry
*
@@ -306,7 +295,8 @@ struct ttm_bo_driver {
* finished, they'll end up in bo->mem.flags
*/
- uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
+ void(*evict_flags) (struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
/**
* struct ttm_bo_driver member move:
*
@@ -545,6 +535,15 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/**
+ * ttm_tt_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Add backing pages to all of @ttm
+ */
+extern int ttm_tt_populate(struct ttm_tt *ttm);
+
+/**
* ttm_ttm_destroy:
*
* @ttm: The struct ttm_tt.
@@ -639,12 +638,12 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* -EBUSY: No space available (only if no_wait == 1).
* -ENOMEM: Could not allocate memory for the buffer object, either due to
* fragmentation or concurrent allocators.
- * -ERESTART: An interruptible sleep was interrupted by a signal.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait);
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait);
/**
* ttm_bo_wait_for_cpu
*
@@ -654,7 +653,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
* Wait until a buffer object is no longer sync'ed for CPU access.
* Returns:
* -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
- * -ERESTART: An interruptible sleep was interrupted by a signal.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
@@ -758,7 +757,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
* -EAGAIN: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1).
- * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
@@ -799,7 +798,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
*
* Returns:
* -EBUSY: If no_wait == 1 and the buffer is already reserved.
- * -ERESTART: If interruptible == 1 and the process received a signal
+ * -ERESTARTSYS: If interruptible == 1 and the process received a signal
* while sleeping.
*/
extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
new file mode 100644
index 00000000000..cd2c475da9e
--- /dev/null
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -0,0 +1,107 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_EXECBUF_UTIL_H_
+#define _TTM_EXECBUF_UTIL_H_
+
+#include "ttm/ttm_bo_api.h"
+#include <linux/list.h>
+
+/**
+ * struct ttm_validate_buffer
+ *
+ * @head: list head for thread-private list.
+ * @bo: refcounted buffer object pointer.
+ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
+ * adding a new sync object.
+ * @reservied: Indicates whether @bo has been reserved for validation.
+ */
+
+struct ttm_validate_buffer {
+ struct list_head head;
+ struct ttm_buffer_object *bo;
+ void *new_sync_obj_arg;
+ bool reserved;
+};
+
+/**
+ * function ttm_eu_backoff_reservation
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ *
+ * Undoes all buffer validation reservations for bos pointed to by
+ * the list entries.
+ */
+
+extern void ttm_eu_backoff_reservation(struct list_head *list);
+
+/**
+ * function ttm_eu_reserve_buffers
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ * @val_seq: A unique sequence number.
+ *
+ * Tries to reserve bos pointed to by the list entries for validation.
+ * If the function returns 0, all buffers are marked as "unfenced",
+ * taken off the lru lists and are not synced for write CPU usage.
+ *
+ * If the function detects a deadlock due to multiple threads trying to
+ * reserve the same buffers in reverse order, all threads except one will
+ * back off and retry. This function may sleep while waiting for
+ * CPU write reservations to be cleared, and for other threads to
+ * unreserve their buffers.
+ *
+ * This function may return -ERESTART or -EAGAIN if the calling process
+ * receives a signal while waiting. In that case, no buffers on the list
+ * will be reserved upon return.
+ *
+ * Buffers reserved by this function should be unreserved by
+ * a call to either ttm_eu_backoff_reservation() or
+ * ttm_eu_fence_buffer_objects() when command submission is complete or
+ * has failed.
+ */
+
+extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
+
+/**
+ * function ttm_eu_fence_buffer_objects.
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ * @sync_obj: The new sync object for the buffers.
+ *
+ * This function should be called when command submission is complete, and
+ * it will add a new sync object to bos pointed to by entries on @list.
+ * It also unreserves all buffers, putting them on lru lists.
+ *
+ */
+
+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
+
+#endif
diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h
new file mode 100644
index 00000000000..81ba0b0b891
--- /dev/null
+++ b/include/drm/ttm/ttm_lock.h
@@ -0,0 +1,247 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+/** @file ttm_lock.h
+ * This file implements a simple replacement for the buffer manager use
+ * of the DRM heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode and write mode
+ * is relatively fast, and intended for in-kernel use only.
+ *
+ * The vt mode is used only when there is a need to block all
+ * user-space processes from validating buffers.
+ * It's allowed to leave kernel space with the vt lock held.
+ * If a user-space process dies while having the vt-lock,
+ * it will be released during the file descriptor release. The vt lock
+ * excludes write lock and read lock.
+ *
+ * The suspend mode is used to lock out all TTM users when preparing for
+ * and executing suspend operations.
+ *
+ */
+
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include "ttm/ttm_object.h"
+#include <linux/wait.h>
+#include <asm/atomic.h>
+
+/**
+ * struct ttm_lock
+ *
+ * @base: ttm base object used solely to release the lock if the client
+ * holding the lock dies.
+ * @queue: Queue for processes waiting for lock change-of-status.
+ * @lock: Spinlock protecting some lock members.
+ * @rw: Read-write lock counter. Protected by @lock.
+ * @flags: Lock state. Protected by @lock.
+ * @kill_takers: Boolean whether to kill takers of the lock.
+ * @signal: Signal to send when kill_takers is true.
+ */
+
+struct ttm_lock {
+ struct ttm_base_object base;
+ wait_queue_head_t queue;
+ spinlock_t lock;
+ int32_t rw;
+ uint32_t flags;
+ bool kill_takers;
+ int signal;
+ struct ttm_object_file *vt_holder;
+};
+
+
+/**
+ * ttm_lock_init
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * Initializes the lock.
+ */
+extern void ttm_lock_init(struct ttm_lock *lock);
+
+/**
+ * ttm_read_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a read lock.
+ */
+extern void ttm_read_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_read_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in read mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_read_trylock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Tries to take the lock in read mode. If the lock is already held
+ * in write mode, the function will return -EBUSY. If the lock is held
+ * in vt or suspend mode, the function will sleep until these modes
+ * are unlocked.
+ *
+ * Returns:
+ * -EBUSY The lock was already held in write mode.
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_downgrade
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Downgrades a write lock to a read lock.
+ */
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Takes the lock in suspend mode. Excludes read and write mode.
+ */
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a suspend lock
+ */
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_vt_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
+ *
+ * Takes the lock in vt mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ * -ENOMEM: Out of memory when locking.
+ */
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+ struct ttm_object_file *tfile);
+
+/**
+ * ttm_vt_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a vt lock.
+ * Returns:
+ * -EINVAL If the lock was not held.
+ */
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_set_kill
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @val: Boolean whether to kill processes taking the lock.
+ * @signal: Signal to send to the process taking the lock.
+ *
+ * The kill-when-taking-lock functionality is used to kill processes that keep
+ * on using the TTM functionality when its resources has been taken down, for
+ * example when the X server exits. A typical sequence would look like this:
+ * - X server takes lock in write mode.
+ * - ttm_lock_set_kill() is called with @val set to true.
+ * - As part of X server exit, TTM resources are taken down.
+ * - X server releases the lock on file release.
+ * - Another dri client wants to render, takes the lock and is killed.
+ *
+ */
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+ int signal)
+{
+ lock->kill_takers = val;
+ if (val)
+ lock->signal = signal;
+}
+
+#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 6983a7cf4da..b199170b3c2 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -33,6 +33,7 @@
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/kobject.h>
+#include <linux/mm.h>
/**
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
new file mode 100644
index 00000000000..703ca4db0a2
--- /dev/null
+++ b/include/drm/ttm/ttm_object.h
@@ -0,0 +1,267 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_object.h
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+#ifndef _TTM_OBJECT_H_
+#define _TTM_OBJECT_H_
+
+#include <linux/list.h>
+#include "drm_hashtab.h"
+#include <linux/kref.h>
+#include <ttm/ttm_memory.h>
+
+/**
+ * enum ttm_ref_type
+ *
+ * Describes what type of reference a ref object holds.
+ *
+ * TTM_REF_USAGE is a simple refcount on a base object.
+ *
+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
+ * buffer object.
+ *
+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
+ * buffer object.
+ *
+ */
+
+enum ttm_ref_type {
+ TTM_REF_USAGE,
+ TTM_REF_SYNCCPU_READ,
+ TTM_REF_SYNCCPU_WRITE,
+ TTM_REF_NUM
+};
+
+/**
+ * enum ttm_object_type
+ *
+ * One entry per ttm object type.
+ * Device-specific types should use the
+ * ttm_driver_typex types.
+ */
+
+enum ttm_object_type {
+ ttm_fence_type,
+ ttm_buffer_type,
+ ttm_lock_type,
+ ttm_driver_type0 = 256,
+ ttm_driver_type1
+};
+
+struct ttm_object_file;
+struct ttm_object_device;
+
+/**
+ * struct ttm_base_object
+ *
+ * @hash: hash entry for the per-device object hash.
+ * @type: derived type this object is base class for.
+ * @shareable: Other ttm_object_files can access this object.
+ *
+ * @tfile: Pointer to ttm_object_file of the creator.
+ * NULL if the object was not created by a user request.
+ * (kernel object).
+ *
+ * @refcount: Number of references to this object, not
+ * including the hash entry. A reference to a base object can
+ * only be held by a ref object.
+ *
+ * @refcount_release: A function to be called when there are
+ * no more references to this object. This function should
+ * destroy the object (or make sure destruction eventually happens),
+ * and when it is called, the object has
+ * already been taken out of the per-device hash. The parameter
+ * "base" should be set to NULL by the function.
+ *
+ * @ref_obj_release: A function to be called when a reference object
+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
+ * this function may, for example, release a lock held by a user-space
+ * process.
+ *
+ * This struct is intended to be used as a base struct for objects that
+ * are visible to user-space. It provides a global name, race-safe
+ * access and refcounting, minimal access contol and hooks for unref actions.
+ */
+
+struct ttm_base_object {
+ struct drm_hash_item hash;
+ enum ttm_object_type object_type;
+ bool shareable;
+ struct ttm_object_file *tfile;
+ struct kref refcount;
+ void (*refcount_release) (struct ttm_base_object **base);
+ void (*ref_obj_release) (struct ttm_base_object *base,
+ enum ttm_ref_type ref_type);
+};
+
+/**
+ * ttm_base_object_init
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @base: The struct ttm_base_object to initialize.
+ * @shareable: This object is shareable with other applcations.
+ * (different @tfile pointers.)
+ * @type: The object type.
+ * @refcount_release: See the struct ttm_base_object description.
+ * @ref_obj_release: See the struct ttm_base_object description.
+ *
+ * Initializes a struct ttm_base_object.
+ */
+
+extern int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object
+ **),
+ void (*ref_obj_release) (struct ttm_base_object
+ *,
+ enum ttm_ref_type
+ ref_type));
+
+/**
+ * ttm_base_object_lookup
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * Also verifies that the object is visible to the application, by
+ * comparing the @tfile argument and checking the object shareable flag.
+ */
+
+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+ *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_unref
+ *
+ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
+ *
+ * Decrements the base object refcount and clears the pointer pointed to by
+ * p_base.
+ */
+
+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+
+/**
+ * ttm_ref_object_add.
+ *
+ * @tfile: A struct ttm_object_file representing the application owning the
+ * ref_object.
+ * @base: The base object to reference.
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
+ *
+ * Adding a ref object to a base object is basically like referencing the
+ * base object, but a user-space application holds the reference. When the
+ * file corresponding to @tfile is closed, all its reference objects are
+ * deleted. A reference object can have different types depending on what
+ * it's intended for. It can be refcounting to prevent object destruction,
+ * When user-space takes a lock, it can add a ref object to that lock to
+ * make sure the lock is released if the application dies. A ref object
+ * will hold a single reference on a base object.
+ */
+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed);
+/**
+ * ttm_ref_object_base_unref
+ *
+ * @key: Key representing the base object.
+ * @ref_type: Ref type of the ref object to be dereferenced.
+ *
+ * Unreference a ref object with type @ref_type
+ * on the base object identified by @key. If there are no duplicate
+ * references, the ref object will be destroyed and the base object
+ * will be unreferenced.
+ */
+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key,
+ enum ttm_ref_type ref_type);
+
+/**
+ * ttm_object_file_init - initialize a struct ttm_object file
+ *
+ * @tdev: A struct ttm_object device this file is initialized on.
+ * @hash_order: Order of the hash table used to hold the reference objects.
+ *
+ * This is typically called by the file_ops::open function.
+ */
+
+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+ *tdev,
+ unsigned int hash_order);
+
+/**
+ * ttm_object_file_release - release data held by a ttm_object_file
+ *
+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
+ * *p_tfile will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_file.
+ * Typically called from file_ops::release. The caller must
+ * ensure that there are no concurrent users of tfile.
+ */
+
+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+
+/**
+ * ttm_object device init - initialize a struct ttm_object_device
+ *
+ * @hash_order: Order of hash table used to hash the base objects.
+ *
+ * This function is typically called on device initialization to prepare
+ * data structures needed for ttm base and ref objects.
+ */
+
+extern struct ttm_object_device *ttm_object_device_init
+ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
+
+/**
+ * ttm_object_device_release - release data held by a ttm_object_device
+ *
+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
+ * *p_tdev will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_device.
+ * Typically called from driver::unload before the destruction of the
+ * device private data structure.
+ */
+
+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+
+#endif
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
index 170786e5c2f..fd11a5bd892 100644
--- a/include/drm/via_drm.h
+++ b/include/drm/via_drm.h
@@ -24,7 +24,7 @@
#ifndef _VIA_DRM_H_
#define _VIA_DRM_H_
-#include <linux/types.h>
+#include "drm.h"
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.