diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2008-12-20 04:54:54 -0500 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2008-12-20 04:54:54 -0500 |
commit | 93b8eef1c098efbea2f1fc0be7e3c681f259a7e7 (patch) | |
tree | 462cc8c2bc07bbc825dab2a200891a28d8643329 /drivers/gpu | |
parent | a2d781fc8d9b16113dd9440107d73c0f21d7cbef (diff) | |
parent | 929096fe9ff1f4b3645cf3919527ab47e8d5e17c (diff) | |
download | linux-3.10-93b8eef1c098efbea2f1fc0be7e3c681f259a7e7.tar.gz linux-3.10-93b8eef1c098efbea2f1fc0be7e3c681f259a7e7.tar.bz2 linux-3.10-93b8eef1c098efbea2f1fc0be7e3c681f259a7e7.zip |
Merge commit 'v2.6.28-rc9' into next
Diffstat (limited to 'drivers/gpu')
25 files changed, 872 insertions, 1045 deletions
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 96f416afc3f..996097acb5e 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -266,11 +266,19 @@ int drm_init(struct drm_driver *driver) for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; + /* Loop around setting up a DRM device for each PCI device + * matching our ID and device class. If we had the internal + * function that pci_get_subsys and pci_get_class used, we'd + * be able to just pass pid in instead of doing a two-stage + * thing. + */ pdev = NULL; - /* pass back in pdev to account for multiple identical cards */ while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev)) != NULL) { + if ((pdev->class & pid->class_mask) != pid->class) + continue; + /* stealth mode requires a manual probe */ pci_dev_get(pdev); drm_get_dev(pdev, pid, driver); @@ -297,6 +305,8 @@ static void drm_cleanup(struct drm_device * dev) return; } + drm_vblank_cleanup(dev); + drm_lastclose(dev); if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 0d46627663b..78eeed5caaf 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -406,8 +406,6 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->driver_features & DRIVER_GEM) drm_gem_release(dev, file_priv); - drm_fasync(-1, filp, 0); - mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { struct drm_ctx_list *pos, *n; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 212a94f715b..1e787f894b3 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -94,7 +94,7 @@ static void vblank_disable_fn(unsigned long arg) } } -static void drm_vblank_cleanup(struct drm_device *dev) +void drm_vblank_cleanup(struct drm_device *dev) { /* Bail if the driver didn't call drm_vblank_init() */ if (dev->num_crtcs == 0) @@ -278,10 +278,6 @@ int drm_irq_uninstall(struct drm_device * dev) free_irq(dev->pdev->irq, dev); - drm_vblank_cleanup(dev); - - dev->locked_tasklet_func = NULL; - return 0; } EXPORT_SYMBOL(drm_irq_uninstall); @@ -699,81 +695,3 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) drm_vbl_send_signals(dev, crtc); } EXPORT_SYMBOL(drm_handle_vblank); - -/** - * Tasklet wrapper function. - * - * \param data DRM device in disguise. - * - * Attempts to grab the HW lock and calls the driver callback on success. On - * failure, leave the lock marked as contended so the callback can be called - * from drm_unlock(). - */ -static void drm_locked_tasklet_func(unsigned long data) -{ - struct drm_device *dev = (struct drm_device *)data; - unsigned long irqflags; - void (*tasklet_func)(struct drm_device *); - - spin_lock_irqsave(&dev->tasklet_lock, irqflags); - tasklet_func = dev->locked_tasklet_func; - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); - - if (!tasklet_func || - !drm_lock_take(&dev->lock, - DRM_KERNEL_CONTEXT)) { - return; - } - - dev->lock.lock_time = jiffies; - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); - - spin_lock_irqsave(&dev->tasklet_lock, irqflags); - tasklet_func = dev->locked_tasklet_func; - dev->locked_tasklet_func = NULL; - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); - - if (tasklet_func != NULL) - tasklet_func(dev); - - drm_lock_free(&dev->lock, - DRM_KERNEL_CONTEXT); -} - -/** - * Schedule a tasklet to call back a driver hook with the HW lock held. - * - * \param dev DRM device. - * \param func Driver callback. - * - * This is intended for triggering actions that require the HW lock from an - * interrupt handler. The lock will be grabbed ASAP after the interrupt handler - * completes. Note that the callback may be called from interrupt or process - * context, it must not make any assumptions about this. Also, the HW lock will - * be held with the kernel context or any client context. - */ -void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *)) -{ - unsigned long irqflags; - static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); - - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) || - test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state)) - return; - - spin_lock_irqsave(&dev->tasklet_lock, irqflags); - - if (dev->locked_tasklet_func) { - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); - return; - } - - dev->locked_tasklet_func = func; - - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); - - drm_tasklet.data = (unsigned long)dev; - - tasklet_hi_schedule(&drm_tasklet); -} -EXPORT_SYMBOL(drm_locked_tasklet); diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 888159e03d2..1cfa72031f8 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -154,8 +154,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_lock *lock = data; - unsigned long irqflags; - void (*tasklet_func)(struct drm_device *); if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", @@ -163,13 +161,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) return -EINVAL; } - spin_lock_irqsave(&dev->tasklet_lock, irqflags); - tasklet_func = dev->locked_tasklet_func; - dev->locked_tasklet_func = NULL; - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); - if (tasklet_func != NULL) - tasklet_func(dev); - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); /* kernel_context_switch isn't used by any of the x86 drm diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 141e33004a7..66c96ec6667 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -92,7 +92,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, spin_lock_init(&dev->count_lock); spin_lock_init(&dev->drw_lock); - spin_lock_init(&dev->tasklet_lock); spin_lock_init(&dev->lock.spinlock); init_timer(&dev->timer); mutex_init(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5ba78e4fd2b..d8fb5d8ee7e 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -3,13 +3,14 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ccflags-y := -Iinclude/drm -i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \ +i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915_suspend.o \ i915_gem.o \ i915_gem_debug.o \ i915_gem_proc.o \ i915_gem_tiling.o +i915-$(CONFIG_ACPI) += i915_opregion.o i915-$(CONFIG_COMPAT) += i915_ioc32.o obj-$(CONFIG_DRM_I915) += i915.o diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 01de536e021..553dd4bc307 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -154,6 +154,9 @@ static int i915_dma_cleanup(struct drm_device * dev) if (I915_NEED_GFX_HWS(dev)) i915_free_hws(dev); + dev_priv->sarea = NULL; + dev_priv->sarea_priv = NULL; + return 0; } @@ -442,7 +445,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev) BEGIN_LP_RING(4); OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); @@ -573,7 +576,7 @@ static int i915_dispatch_flip(struct drm_device * dev) BEGIN_LP_RING(4); OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); @@ -608,7 +611,6 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 *hw_status = dev_priv->hw_status_page; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) dev_priv->sarea_priv; drm_i915_batchbuffer_t *batch = data; @@ -634,7 +636,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); if (sarea_priv) - sarea_priv->last_dispatch = (int)hw_status[5]; + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); return ret; } @@ -642,7 +644,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 *hw_status = dev_priv->hw_status_page; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) dev_priv->sarea_priv; drm_i915_cmdbuffer_t *cmdbuf = data; @@ -670,7 +671,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, } if (sarea_priv) - sarea_priv->last_dispatch = (int)hw_status[5]; + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); return 0; } @@ -846,16 +847,23 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may - * be lost or delayed + * be lost or delayed, but we use them anyways to avoid + * stuck interrupts on some machines. */ - if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) - if (pci_enable_msi(dev->pdev)) - DRM_ERROR("failed to enable MSI\n"); + if (!IS_I945G(dev) && !IS_I945GM(dev)) + pci_enable_msi(dev->pdev); intel_opregion_init(dev); spin_lock_init(&dev_priv->user_irq_lock); + ret = drm_vblank_init(dev, I915_NUM_PIPE); + + if (ret) { + (void) i915_driver_unload(dev); + return ret; + } + return ret; } @@ -960,6 +968,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), + DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f20ffe17df7..adc972cc6bf 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -31,6 +31,7 @@ #define _I915_DRV_H_ #include "i915_reg.h" +#include <linux/io-mapping.h> /* General customization: */ @@ -46,6 +47,8 @@ enum pipe { PIPE_B, }; +#define I915_NUM_PIPE 2 + /* Interface history: * * 1.1: Original. @@ -87,13 +90,6 @@ struct mem_block { struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ }; -typedef struct _drm_i915_vbl_swap { - struct list_head head; - drm_drawable_t drw_id; - unsigned int pipe; - unsigned int sequence; -} drm_i915_vbl_swap_t; - struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -138,6 +134,7 @@ typedef struct drm_i915_private { int user_irq_refcount; /** Cached value of IMR to avoid reads in updating the bitfield */ u32 irq_mask_reg; + u32 pipestat[2]; int tex_lru_log_granularity; int allow_batchbuffer; @@ -145,10 +142,6 @@ typedef struct drm_i915_private { unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; - spinlock_t swaps_lock; - drm_i915_vbl_swap_t vbl_swaps; - unsigned int swaps_pending; - struct intel_opregion opregion; /* Register state */ @@ -156,6 +149,8 @@ typedef struct drm_i915_private { u32 saveDSPACNTR; u32 saveDSPBCNTR; u32 saveDSPARB; + u32 saveRENDERSTANDBY; + u32 saveHWS; u32 savePIPEACONF; u32 savePIPEBCONF; u32 savePIPEASRC; @@ -240,16 +235,19 @@ typedef struct drm_i915_private { u8 saveDACDATA[256*3]; /* 256 3-byte colors */ u8 saveCR[37]; - /** Work task for vblank-related ring access */ - struct work_struct vblank_work; - struct { struct drm_mm gtt_space; + struct io_mapping *gtt_mapping; + /** * List of objects currently involved in rendering from the * ringbuffer. * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * * A reference is held on the buffer while on this list. */ struct list_head active_list; @@ -259,6 +257,8 @@ typedef struct drm_i915_private { * still have a write_domain which needs to be flushed before * unbinding. * + * last_rendering_seqno is 0 while an object is in this list. + * * A reference is held on the buffer while on this list. */ struct list_head flushing_list; @@ -267,6 +267,8 @@ typedef struct drm_i915_private { * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * + * last_rendering_seqno is 0 while an object is in this list. + * * A reference is not held on the buffer while on this list, * as merely being GTT-bound shouldn't prevent its being * freed, and we'll pull it off the list in the free path. @@ -377,8 +379,8 @@ struct drm_i915_gem_object { uint32_t agp_type; /** - * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when - * GEM_DOMAIN_CPU is not in the object's read domain. + * If present, while GEM_DOMAIN_CPU is in the read domain this array + * flags which individual pages are valid. */ uint8_t *page_cpu_valid; }; @@ -400,9 +402,6 @@ struct drm_i915_gem_request { /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; - /** Cache domains that were flushed at the start of the request. */ - uint32_t flush_domains; - struct list_head list; }; @@ -441,7 +440,6 @@ extern int i915_irq_wait(struct drm_device *dev, void *data, void i915_user_irq_get(struct drm_device *dev); void i915_user_irq_put(struct drm_device *dev); -extern void i915_vblank_work_handler(struct work_struct *work); extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); extern void i915_driver_irq_preinstall(struct drm_device * dev); extern int i915_driver_irq_postinstall(struct drm_device *dev); @@ -457,6 +455,13 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); +void +i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); + +void +i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); + + /* i915_mem.c */ extern int i915_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -502,6 +507,8 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_proc_init(struct drm_minor *minor); void i915_gem_proc_cleanup(struct drm_minor *minor); @@ -539,11 +546,18 @@ extern int i915_restore_state(struct drm_device *dev); extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); +#ifdef CONFIG_ACPI /* i915_opregion.c */ extern int intel_opregion_init(struct drm_device *dev); extern void intel_opregion_free(struct drm_device *dev); extern void opregion_asle_intr(struct drm_device *dev); extern void opregion_enable_asle(struct drm_device *dev); +#else +static inline int intel_opregion_init(struct drm_device *dev) { return 0; } +static inline void intel_opregion_free(struct drm_device *dev) { return; } +static inline void opregion_asle_intr(struct drm_device *dev) { return; } +static inline void opregion_enable_asle(struct drm_device *dev) { return; } +#endif /** * Lock test for when it's just for synchronization of ring access. @@ -610,8 +624,9 @@ extern void opregion_enable_asle(struct drm_device *dev); * The area from dword 0x20 to 0x3ff is available for driver usage. */ #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) -#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) +#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) #define I915_GEM_HWS_INDEX 0x20 +#define I915_BREADCRUMB_INDEX 0x21 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 17ae330ff26..ad672d85482 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -31,21 +31,23 @@ #include "i915_drv.h" #include <linux/swap.h> -static int -i915_gem_object_set_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); -static int -i915_gem_object_set_domain_range(struct drm_gem_object *obj, - uint64_t offset, - uint64_t size, - uint32_t read_domains, - uint32_t write_domain); -static int -i915_gem_set_domain(struct drm_gem_object *obj, - struct drm_file *file_priv, - uint32_t read_domains, - uint32_t write_domain); +#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + +static void +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, + uint32_t read_domains, + uint32_t write_domain); +static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); +static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); +static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); +static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, + int write); +static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, + int write); +static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, + uint64_t offset, + uint64_t size); +static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); static int i915_gem_object_get_page_list(struct drm_gem_object *obj); static void i915_gem_object_free_page_list(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); @@ -79,6 +81,22 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, return 0; } +int +i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_get_aperture *args = data; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + args->aper_size = dev->gtt_total; + args->aper_available_size = (args->aper_size - + atomic_read(&dev->pin_memory)); + + return 0; +} + /** * Creates a new mm object and returns a handle to it. @@ -144,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, - I915_GEM_DOMAIN_CPU, 0); + ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, + args->size); if (ret != 0) { drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); @@ -171,35 +189,50 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return 0; } -/* - * Try to write quickly with an atomic kmap. Return true on success. - * - * If this fails (which includes a partial write), we'll redo the whole - * thing with the slow version. - * - * This is a workaround for the low performance of iounmap (approximate - * 10% cpu cost on normal 3D workloads). kmap_atomic on HIGHMEM kernels - * happens to let us map card memory without taking IPIs. When the vmap - * rework lands we should be able to dump this hack. +/* This is the fast write path which cannot handle + * page faults in the source data */ -static inline int fast_user_write(unsigned long pfn, char __user *user_data, - int l, int o) + +static inline int +fast_user_write(struct io_mapping *mapping, + loff_t page_base, int page_offset, + char __user *user_data, + int length) { -#ifdef CONFIG_HIGHMEM - unsigned long unwritten; char *vaddr_atomic; + unsigned long unwritten; - vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0); -#if WATCH_PWRITE - DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", - i, o, l, pfn, vaddr_atomic); -#endif - unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o, user_data, l); - kunmap_atomic(vaddr_atomic, KM_USER0); - return !unwritten; -#else + vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); + unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, + user_data, length); + io_mapping_unmap_atomic(vaddr_atomic); + if (unwritten) + return -EFAULT; + return 0; +} + +/* Here's the write path which can sleep for + * page faults + */ + +static inline int +slow_user_write(struct io_mapping *mapping, + loff_t page_base, int page_offset, + char __user *user_data, + int length) +{ + char __iomem *vaddr; + unsigned long unwritten; + + vaddr = io_mapping_map_wc(mapping, page_base); + if (vaddr == NULL) + return -EFAULT; + unwritten = __copy_from_user(vaddr + page_offset, + user_data, length); + io_mapping_unmap(vaddr); + if (unwritten) + return -EFAULT; return 0; -#endif } static int @@ -208,10 +241,12 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_file *file_priv) { struct drm_i915_gem_object *obj_priv = obj->driver_private; + drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; - loff_t offset; + loff_t offset, page_base; char __user *user_data; - int ret = 0; + int page_offset, page_length; + int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -225,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, mutex_unlock(&dev->struct_mutex); return ret; } - ret = i915_gem_set_domain(obj, file_priv, - I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); + ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) goto fail; @@ -235,57 +269,37 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, obj_priv->dirty = 1; while (remain > 0) { - unsigned long pfn; - int i, o, l; - /* Operation in this page * - * i = page number - * o = offset within page - * l = bytes to copy + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page */ - i = offset >> PAGE_SHIFT; - o = offset & (PAGE_SIZE-1); - l = remain; - if ((o + l) > PAGE_SIZE) - l = PAGE_SIZE - o; - - pfn = (dev->agp->base >> PAGE_SHIFT) + i; - - if (!fast_user_write(pfn, user_data, l, o)) { - unsigned long unwritten; - char __iomem *vaddr; - - vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); -#if WATCH_PWRITE - DRM_INFO("pwrite slow i %d o %d l %d " - "pfn %ld vaddr %p\n", - i, o, l, pfn, vaddr); -#endif - if (vaddr == NULL) { - ret = -EFAULT; - goto fail; - } - unwritten = __copy_from_user(vaddr + o, user_data, l); -#if WATCH_PWRITE - DRM_INFO("unwritten %ld\n", unwritten); -#endif - iounmap(vaddr); - if (unwritten) { - ret = -EFAULT; + page_base = (offset & ~(PAGE_SIZE-1)); + page_offset = offset & (PAGE_SIZE-1); + page_length = remain; + if ((page_offset + remain) > PAGE_SIZE) + page_length = PAGE_SIZE - page_offset; + + ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base, + page_offset, user_data, page_length); + + /* If we get a fault while copying data, then (presumably) our + * source page isn't available. In this case, use the + * non-atomic function + */ + if (ret) { + ret = slow_user_write (dev_priv->mm.gtt_mapping, + page_base, page_offset, + user_data, page_length); + if (ret) goto fail; - } } - remain -= l; - user_data += l; - offset += l; + remain -= page_length; + user_data += page_length; + offset += page_length; } -#if WATCH_PWRITE && 1 - i915_gem_clflush_object(obj); - i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0); - i915_gem_clflush_object(obj); -#endif fail: i915_gem_object_unpin(obj); @@ -305,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, mutex_lock(&dev->struct_mutex); - ret = i915_gem_set_domain(obj, file_priv, - I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); + ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret) { mutex_unlock(&dev->struct_mutex); return ret; @@ -382,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, } /** - * Called when user space prepares to use an object + * Called when user space prepares to use an object with the CPU, either + * through the mmap ioctl's mapping or a GTT mapping. */ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, @@ -390,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_set_domain *args = data; struct drm_gem_object *obj; + uint32_t read_domains = args->read_domains; + uint32_t write_domain = args->write_domain; int ret; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; + /* Only handle setting domains to types used by the CPU. */ + if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + return -EINVAL; + + if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + return -EINVAL; + + /* Having something in the write domain implies it's in the read + * domain, and only that read domain. Enforce that in the request. + */ + if (write_domain != 0 && read_domains != write_domain) + return -EINVAL; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EBADF; @@ -402,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); #if WATCH_BUF DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", - obj, obj->size, args->read_domains, args->write_domain); + obj, obj->size, read_domains, write_domain); #endif - ret = i915_gem_set_domain(obj, file_priv, - args->read_domains, args->write_domain); + if (read_domains & I915_GEM_DOMAIN_GTT) { + ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); + + /* Silently promote "you're not bound, there was nothing to do" + * to success, since the client was just asking us to + * make sure everything was done. + */ + if (ret == -EINVAL) + ret = 0; + } else { + ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); + } + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; @@ -440,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, obj_priv = obj->driver_private; /* Pinned buffers may be scanout, so flush the cache */ - if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { - i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - } + if (obj_priv->pin_count) + i915_gem_object_flush_cpu_write_domain(obj); + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; @@ -517,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj) } static void -i915_gem_object_move_to_active(struct drm_gem_object *obj) +i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -531,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj) /* Move from whatever list we were on to the tail of execution. */ list_move_tail(&obj_priv->list, &dev_priv->mm.active_list); + obj_priv->last_rendering_seqno = seqno; } +static void +i915_gem_object_move_to_flushing(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + BUG_ON(!obj_priv->active); + list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); + obj_priv->last_rendering_seqno = 0; +} static void i915_gem_object_move_to_inactive(struct drm_gem_object *obj) @@ -547,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) else list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + obj_priv->last_rendering_seqno = 0; if (obj_priv->active) { obj_priv->active = 0; drm_gem_object_unreference(obj); @@ -595,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) request->seqno = seqno; request->emitted_jiffies = jiffies; - request->flush_domains = flush_domains; was_empty = list_empty(&dev_priv->mm.request_list); list_add_tail(&request->list, &dev_priv->mm.request_list); + /* Associate any objects on the flushing list matching the write + * domain we're flushing with our flush. + */ + if (flush_domains != 0) { + struct drm_i915_gem_object *obj_priv, *next; + + list_for_each_entry_safe(obj_priv, next, + &dev_priv->mm.flushing_list, list) { + struct drm_gem_object *obj = obj_priv->obj; + + if ((obj->write_domain & flush_domains) == + obj->write_domain) { + obj->write_domain = 0; + i915_gem_object_move_to_active(obj, seqno); + } + } + + } + if (was_empty && !dev_priv->mm.suspended) schedule_delayed_work(&dev_priv->mm.retire_work, HZ); return seqno; @@ -661,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev, __func__, request->seqno, obj); #endif - if (obj->write_domain != 0) { - list_move_tail(&obj_priv->list, - &dev_priv->mm.flushing_list); - } else { + if (obj->write_domain != 0) + i915_gem_object_move_to_flushing(obj); + else i915_gem_object_move_to_inactive(obj); - } - } - - if (request->flush_domains != 0) { - struct drm_i915_gem_object *obj_priv, *next; - - /* Clear the write domain and activity from any buffers - * that are just waiting for a flush matching the one retired. - */ - list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.flushing_list, list) { - struct drm_gem_object *obj = obj_priv->obj; - - if (obj->write_domain & request->flush_domains) { - obj->write_domain = 0; - i915_gem_object_move_to_inactive(obj); - } - } - } } @@ -877,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = obj->driver_private; int ret; - /* If there are writes queued to the buffer, flush and - * create a new seqno to wait for. + /* This function only exists to support waiting for existing rendering, + * not for emitting required flushes. */ - if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { - uint32_t write_domain = obj->write_domain; -#if WATCH_BUF - DRM_INFO("%s: flushing object %p from write domain %08x\n", - __func__, obj, write_domain); -#endif - i915_gem_flush(dev, 0, write_domain); - - i915_gem_object_move_to_active(obj); - obj_priv->last_rendering_seqno = i915_add_request(dev, - write_domain); - BUG_ON(obj_priv->last_rendering_seqno == 0); -#if WATCH_LRU - DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); -#endif - } + BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); /* If there is rendering queued on the buffer being evicted, wait for * it. @@ -935,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return -EINVAL; } - /* Wait for any rendering to complete - */ - ret = i915_gem_object_wait_rendering(obj); - if (ret) { - DRM_ERROR("wait_rendering failed: %d\n", ret); - return ret; - } - /* Move the object to the CPU domain to ensure that * any possible CPU writes while it's not in the GTT * are flushed when we go to remap it. This will * also ensure that all pending GPU writes are finished * before we unbind. */ - ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, - I915_GEM_DOMAIN_CPU); + ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret) { - DRM_ERROR("set_domain failed: %d\n", ret); + if (ret != -ERESTARTSYS) + DRM_ERROR("set_domain failed: %d\n", ret); return ret; } @@ -1068,6 +1095,21 @@ i915_gem_evict_something(struct drm_device *dev) } static int +i915_gem_evict_everything(struct drm_device *dev) +{ + int ret; + + for (;;) { + ret = i915_gem_evict_something(dev); + if (ret != 0) + break; + } + if (ret == -ENOMEM) + return 0; + return ret; +} + +static int i915_gem_object_get_page_list(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; @@ -1153,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) ret = i915_gem_evict_something(dev); if (ret != 0) { - DRM_ERROR("Failed to evict a buffer %d\n", ret); + if (ret != -ERESTARTSYS) + DRM_ERROR("Failed to evict a buffer %d\n", ret); return ret; } goto search_free; @@ -1213,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj) drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); } +/** Flushes any GPU write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + uint32_t seqno; + + if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) + return; + + /* Queue the GPU write cache flushing we need. */ + i915_gem_flush(dev, 0, obj->write_domain); + seqno = i915_add_request(dev, obj->write_domain); + obj->write_domain = 0; + i915_gem_object_move_to_active(obj, seqno); +} + +/** Flushes the GTT write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) +{ + if (obj->write_domain != I915_GEM_DOMAIN_GTT) + return; + + /* No actual flushing is required for the GTT write domain. Writes + * to it immediately go to main memory as far as we know, so there's + * no chipset flush. It also doesn't land in render cache. + */ + obj->write_domain = 0; +} + +/** Flushes the CPU write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + if (obj->write_domain != I915_GEM_DOMAIN_CPU) + return; + + i915_gem_clflush_object(obj); + drm_agp_chipset_flush(dev); + obj->write_domain = 0; +} + +/** + * Moves a single object to the GTT read, and possibly write domain. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +static int +i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int ret; + + /* Not valid to be called on unbound objects. */ + if (obj_priv->gtt_space == NULL) + return -EINVAL; + + i915_gem_object_flush_gpu_write_domain(obj); + /* Wait on any GPU rendering and flushing to occur. */ + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) + return ret; + + /* If we're writing through the GTT domain, then CPU and GPU caches + * will need to be invalidated at next use. + */ + if (write) + obj->read_domains &= I915_GEM_DOMAIN_GTT; + + i915_gem_object_flush_cpu_write_domain(obj); + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); + obj->read_domains |= I915_GEM_DOMAIN_GTT; + if (write) { + obj->write_domain = I915_GEM_DOMAIN_GTT; + obj_priv->dirty = 1; + } + + return 0; +} + +/** + * Moves a single object to the CPU read, and possibly write domain. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +static int +i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) +{ + struct drm_device *dev = obj->dev; + int ret; + + i915_gem_object_flush_gpu_write_domain(obj); + /* Wait on any GPU rendering and flushing to occur. */ + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) + return ret; + + i915_gem_object_flush_gtt_write_domain(obj); + + /* If we have a partially-valid cache of the object in the CPU, + * finish invalidating it and free the per-page flags. + */ + i915_gem_object_set_to_full_cpu_read_domain(obj); + + /* Flush the CPU cache if it's still invalid. */ + if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { + i915_gem_clflush_object(obj); + drm_agp_chipset_flush(dev); + + obj->read_domains |= I915_GEM_DOMAIN_CPU; + } + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + + /* If we're writing through the CPU, then the GPU read domains will + * need to be invalidated at next use. + */ + if (write) { + obj->read_domains &= I915_GEM_DOMAIN_CPU; + obj->write_domain = I915_GEM_DOMAIN_CPU; + } + + return 0; +} + /* * Set the next domain for the specified object. This * may not actually perform the necessary flushing/invaliding though, @@ -1324,16 +1504,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj) * MI_FLUSH * drm_agp_chipset_flush */ -static int -i915_gem_object_set_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain) +static void +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, + uint32_t read_domains, + uint32_t write_domain) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; - int ret; + + BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); + BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); #if WATCH_BUF DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", @@ -1370,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", __func__, flush_domains, invalidate_domains); #endif - /* - * If we're invaliding the CPU cache and flushing a GPU cache, - * then pause for rendering so that the GPU caches will be - * flushed before the cpu cache is invalidated - */ - if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && - (flush_domains & ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT))) { - ret = i915_gem_object_wait_rendering(obj); - if (ret) - return ret; - } i915_gem_clflush_object(obj); } if ((write_domain | flush_domains) != 0) obj->write_domain = write_domain; - - /* If we're invalidating the CPU domain, clear the per-page CPU - * domain list as well. - */ - if (obj_priv->page_cpu_valid != NULL && - (write_domain != 0 || - read_domains & I915_GEM_DOMAIN_CPU)) { - drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, - DRM_MEM_DRIVER); - obj_priv->page_cpu_valid = NULL; - } obj->read_domains = read_domains; dev->invalidate_domains |= invalidate_domains; @@ -1408,49 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, obj->read_domains, obj->write_domain, dev->invalidate_domains, dev->flush_domains); #endif - return 0; } /** - * Set the read/write domain on a range of the object. + * Moves the object from a partially CPU read to a full one. * - * Currently only implemented for CPU reads, otherwise drops to normal - * i915_gem_object_set_domain(). + * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), + * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). */ -static int -i915_gem_object_set_domain_range(struct drm_gem_object *obj, - uint64_t offset, - uint64_t size, - uint32_t read_domains, - uint32_t write_domain) +static void +i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) { + struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; - int ret, i; - if (obj->read_domains & I915_GEM_DOMAIN_CPU) - return 0; + if (!obj_priv->page_cpu_valid) + return; - if (read_domains != I915_GEM_DOMAIN_CPU || - write_domain != 0) - return i915_gem_object_set_domain(obj, - read_domains, write_domain); + /* If we're partially in the CPU read domain, finish moving it in. + */ + if (obj->read_domains & I915_GEM_DOMAIN_CPU) { + int i; - /* Wait on any GPU rendering to the object to be flushed. */ - if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { - ret = i915_gem_object_wait_rendering(obj); - if (ret) - return ret; + for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { + if (obj_priv->page_cpu_valid[i]) + continue; + drm_clflush_pages(obj_priv->page_list + i, 1); + } + drm_agp_chipset_flush(dev); } + /* Free the page_cpu_valid mappings which are now stale, whether + * or not we've got I915_GEM_DOMAIN_CPU. + */ + drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, + DRM_MEM_DRIVER); + obj_priv->page_cpu_valid = NULL; +} + +/** + * Set the CPU read domain on a range of the object. + * + * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's + * not entirely valid. The page_cpu_valid member of the object flags which + * pages have been flushed, and will be respected by + * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping + * of the whole object. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +static int +i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, + uint64_t offset, uint64_t size) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int i, ret; + + if (offset == 0 && size == obj->size) + return i915_gem_object_set_to_cpu_domain(obj, 0); + + i915_gem_object_flush_gpu_write_domain(obj); + /* Wait on any GPU rendering and flushing to occur. */ + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) + return ret; + i915_gem_object_flush_gtt_write_domain(obj); + + /* If we're already fully in the CPU read domain, we're done. */ + if (obj_priv->page_cpu_valid == NULL && + (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) + return 0; + + /* Otherwise, create/clear the per-page CPU read domain flag if we're + * newly adding I915_GEM_DOMAIN_CPU + */ if (obj_priv->page_cpu_valid == NULL) { obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, DRM_MEM_DRIVER); - } + if (obj_priv->page_cpu_valid == NULL) + return -ENOMEM; + } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) + memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); /* Flush the cache on any pages that are still invalid from the CPU's * perspective. */ - for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { + for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; + i++) { if (obj_priv->page_cpu_valid[i]) continue; @@ -1459,39 +1663,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj, obj_priv->page_cpu_valid[i] = 1; } - return 0; -} - -/** - * Once all of the objects have been set in the proper domain, - * perform the necessary flush and invalidate operations. - * - * Returns the write domains flushed, for use in flush tracking. - */ -static uint32_t -i915_gem_dev_set_domain(struct drm_device *dev) -{ - uint32_t flush_domains = dev->flush_domains; - - /* - * Now that all the buffers are synced to the proper domains, - * flush and invalidate the collected domains + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. */ - if (dev->invalidate_domains | dev->flush_domains) { -#if WATCH_EXEC - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", - __func__, - dev->invalidate_domains, - dev->flush_domains); -#endif - i915_gem_flush(dev, - dev->invalidate_domains, - dev->flush_domains); - dev->invalidate_domains = 0; - dev->flush_domains = 0; - } + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); - return flush_domains; + obj->read_domains |= I915_GEM_DOMAIN_CPU; + + return 0; } /** @@ -1503,12 +1682,12 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, struct drm_i915_gem_exec_object *entry) { struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_relocation_entry reloc; struct drm_i915_gem_relocation_entry __user *relocs; struct drm_i915_gem_object *obj_priv = obj->driver_private; int i, ret; - uint32_t last_reloc_offset = -1; - void __iomem *reloc_page = NULL; + void __iomem *reloc_page; /* Choose the GTT offset for our buffer and put it there. */ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); @@ -1572,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, return -EINVAL; } + if (reloc.write_domain & I915_GEM_DOMAIN_CPU || + reloc.read_domains & I915_GEM_DOMAIN_CPU) { + DRM_ERROR("reloc with read/write CPU domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc.target_handle, + (int) reloc.offset, + reloc.read_domains, + reloc.write_domain); + return -EINVAL; + } + if (reloc.write_domain && target_obj->pending_write_domain && reloc.write_domain != target_obj->pending_write_domain) { DRM_ERROR("Write domain conflict: " @@ -1612,45 +1803,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, continue; } - /* Now that we're going to actually write some data in, - * make sure that any rendering using this buffer's contents - * is completed. - */ - i915_gem_object_wait_rendering(obj); - - /* As we're writing through the gtt, flush - * any CPU writes before we write the relocations - */ - if (obj->write_domain & I915_GEM_DOMAIN_CPU) { - i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - obj->write_domain = 0; + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret != 0) { + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -EINVAL; } /* Map the page containing the relocation we're going to * perform. */ reloc_offset = obj_priv->gtt_offset + reloc.offset; - if (reloc_page == NULL || - (last_reloc_offset & ~(PAGE_SIZE - 1)) != - (reloc_offset & ~(PAGE_SIZE - 1))) { - if (reloc_page != NULL) - iounmap(reloc_page); - - reloc_page = ioremap_wc(dev->agp->base + - (reloc_offset & - ~(PAGE_SIZE - 1)), - PAGE_SIZE); - last_reloc_offset = reloc_offset; - if (reloc_page == NULL) { - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -ENOMEM; - } - } - + reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + (reloc_offset & + ~(PAGE_SIZE - 1))); reloc_entry = (uint32_t __iomem *)(reloc_page + - (reloc_offset & (PAGE_SIZE - 1))); + (reloc_offset & (PAGE_SIZE - 1))); reloc_val = target_obj_priv->gtt_offset + reloc.delta; #if WATCH_BUF @@ -1659,6 +1827,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, readl(reloc_entry), reloc_val); #endif writel(reloc_val, reloc_entry); + io_mapping_unmap_atomic(reloc_page); /* Write the updated presumed offset for this entry back out * to the user. @@ -1674,9 +1843,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, drm_gem_object_unreference(target_obj); } - if (reloc_page != NULL) - iounmap(reloc_page); - #if WATCH_BUF if (0) i915_gem_dump_object(obj, 128, __func__, ~0); @@ -1783,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, int ret, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains; + int pin_tries; #if WATCH_EXEC DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", @@ -1831,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -EBUSY; } - /* Zero the gloabl flush/invalidate flags. These - * will be modified as each object is bound to the - * gtt - */ - dev->invalidate_domains = 0; - dev->flush_domains = 0; - - /* Look up object handles and perform the relocations */ + /* Look up object handles */ for (i = 0; i < args->buffer_count; i++) { object_list[i] = drm_gem_object_lookup(dev, file_priv, exec_list[i].handle); @@ -1848,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = -EBADF; goto err; } + } - object_list[i]->pending_read_domains = 0; - object_list[i]->pending_write_domain = 0; - ret = i915_gem_object_pin_and_relocate(object_list[i], - file_priv, - &exec_list[i]); - if (ret) { - DRM_ERROR("object bind and relocate failed %d\n", ret); + /* Pin and relocate */ + for (pin_tries = 0; ; pin_tries++) { + ret = 0; + for (i = 0; i < args->buffer_count; i++) { + object_list[i]->pending_read_domains = 0; + object_list[i]->pending_write_domain = 0; + ret = i915_gem_object_pin_and_relocate(object_list[i], + file_priv, + &exec_list[i]); + if (ret) + break; + pinned = i + 1; + } + /* success */ + if (ret == 0) + break; + + /* error other than GTT full, or we've already tried again */ + if (ret != -ENOMEM || pin_tries >= 1) { + DRM_ERROR("Failed to pin buffers %d\n", ret); goto err; } - pinned = i + 1; + + /* unpin all of our buffers */ + for (i = 0; i < pinned; i++) + i915_gem_object_unpin(object_list[i]); + + /* evict everyone we can from the aperture */ + ret = i915_gem_evict_everything(dev); + if (ret) + goto err; } /* Set the pending read domains for the batch buffer to COMMAND */ @@ -1868,32 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_verify_inactive(dev, __FILE__, __LINE__); + /* Zero the global flush/invalidate flags. These + * will be modified as new domains are computed + * for each object + */ + dev->invalidate_domains = 0; + dev->flush_domains = 0; + for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - - if (obj_priv->gtt_space == NULL) { - /* We evicted the buffer in the process of validating - * our set of buffers in. We could try to recover by - * kicking them everything out and trying again from - * the start. - */ - ret = -ENOMEM; - goto err; - } - /* make sure all previous memory operations have passed */ - ret = i915_gem_object_set_domain(obj, - obj->pending_read_domains, - obj->pending_write_domain); - if (ret) - goto err; + /* Compute new gpu domains and update invalidate/flush */ + i915_gem_object_set_to_gpu_domain(obj, + obj->pending_read_domains, + obj->pending_write_domain); } i915_verify_inactive(dev, __FILE__, __LINE__); - /* Flush/invalidate caches and chipset buffer */ - flush_domains = i915_gem_dev_set_domain(dev); + if (dev->invalidate_domains | dev->flush_domains) { +#if WATCH_EXEC + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", + __func__, + dev->invalidate_domains, + dev->flush_domains); +#endif + i915_gem_flush(dev, + dev->invalidate_domains, + dev->flush_domains); + if (dev->flush_domains) + (void)i915_add_request(dev, dev->flush_domains); + } i915_verify_inactive(dev, __FILE__, __LINE__); @@ -1913,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ~0); #endif - (void)i915_add_request(dev, flush_domains); - /* Exec the batchbuffer */ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); if (ret) { @@ -1942,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_file_priv->mm.last_gem_seqno = seqno; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - i915_gem_object_move_to_active(obj); - obj_priv->last_rendering_seqno = seqno; + i915_gem_object_move_to_active(obj, seqno); #if WATCH_LRU DRM_INFO("%s: move to exec list %p\n", __func__, obj); #endif @@ -2076,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, /* XXX - flush the CPU caches for pinned objects * as the X server doesn't manage domains yet */ - if (obj->write_domain & I915_GEM_DOMAIN_CPU) { - i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - obj->write_domain = 0; - } + i915_gem_object_flush_cpu_write_domain(obj); args->offset = obj_priv->gtt_offset; drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); @@ -2182,29 +2361,6 @@ void i915_gem_free_object(struct drm_gem_object *obj) drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); } -static int -i915_gem_set_domain(struct drm_gem_object *obj, - struct drm_file *file_priv, - uint32_t read_domains, - uint32_t write_domain) -{ - struct drm_device *dev = obj->dev; - int ret; - uint32_t flush_domains; - - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); - - ret = i915_gem_object_set_domain(obj, read_domains, write_domain); - if (ret) - return ret; - flush_domains = i915_gem_dev_set_domain(obj->dev); - - if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) - (void) i915_add_request(dev, flush_domains); - - return 0; -} - /** Unbinds all objects that are on the given buffer list. */ static int i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) @@ -2299,29 +2455,52 @@ i915_gem_idle(struct drm_device *dev) i915_gem_retire_requests(dev); - /* Active and flushing should now be empty as we've - * waited for a sequence higher than any pending execbuffer - */ - BUG_ON(!list_empty(&dev_priv->mm.active_list)); - BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + if (!dev_priv->mm.wedged) { + /* Active and flushing should now be empty as we've + * waited for a sequence higher than any pending execbuffer + */ + WARN_ON(!list_empty(&dev_priv->mm.active_list)); + WARN_ON(!list_empty(&dev_priv->mm.flushing_list)); + /* Request should now be empty as we've also waited + * for the last request in the list + */ + WARN_ON(!list_empty(&dev_priv->mm.request_list)); + } - /* Request should now be empty as we've also waited - * for the last request in the list + /* Empty the active and flushing lists to inactive. If there's + * anything left at this point, it means that we're wedged and + * nothing good's going to happen by leaving them there. So strip + * the GPU domains and just stuff them onto inactive. */ - BUG_ON(!list_empty(&dev_priv->mm.request_list)); + while (!list_empty(&dev_priv->mm.active_list)) { + struct drm_i915_gem_object *obj_priv; + + obj_priv = list_first_entry(&dev_priv->mm.active_list, + struct drm_i915_gem_object, + list); + obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; + i915_gem_object_move_to_inactive(obj_priv->obj); + } + + while (!list_empty(&dev_priv->mm.flushing_list)) { + struct drm_i915_gem_object *obj_priv; - /* Move all buffers out of the GTT. */ + obj_priv = list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + list); + obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; + i915_gem_object_move_to_inactive(obj_priv->obj); + } + + + /* Move all inactive buffers out of the GTT. */ ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); + WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); if (ret) { mutex_unlock(&dev->struct_mutex); return ret; } - BUG_ON(!list_empty(&dev_priv->mm.active_list)); - BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); - BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); - BUG_ON(!list_empty(&dev_priv->mm.request_list)); - i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); @@ -2518,6 +2697,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, if (ret != 0) return ret; + dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, + dev->agp->agp_info.aper_size + * 1024 * 1024); + mutex_lock(&dev->struct_mutex); BUG_ON(!list_empty(&dev_priv->mm.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); @@ -2535,11 +2718,13 @@ int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + drm_i915_private_t *dev_priv = dev->dev_private; int ret; ret = i915_gem_idle(dev); drm_irq_uninstall(dev); + io_mapping_free(dev_priv->mm.gtt_mapping); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c index 93de15b4c9a..e8d5abe1250 100644 --- a/drivers/gpu/drm/i915/i915_gem_proc.c +++ b/drivers/gpu/drm/i915/i915_gem_proc.c @@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset, list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { - DRM_PROC_PRINT(" %d @ %d %08x\n", + DRM_PROC_PRINT(" %d @ %d\n", gem_request->seqno, - (int) (jiffies - gem_request->emitted_jiffies), - gem_request->flush_domains); + (int) (jiffies - gem_request->emitted_jiffies)); } if (len > request + offset) return request; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index e8b85ac4ca0..a8cb69469c6 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) dcc & DCC_CHANNEL_XOR_DISABLE) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; - } else if (IS_I965GM(dev) || IS_GM45(dev)) { - /* GM965 only does bit 11-based channel - * randomization + } else if ((IS_I965GM(dev) || IS_GM45(dev)) && + (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { + /* GM965/GM45 does either bit 11 or bit 17 + * swizzling. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; swizzle_y = I915_BIT_6_SWIZZLE_9_11; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 26f48932a51..69b9a42da95 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -33,11 +33,23 @@ #define MAX_NOPID ((u32)~0) -/** These are the interrupts used by the driver */ -#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \ - I915_ASLE_INTERRUPT | \ - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) +/** + * Interrupts that are always left unmasked. + * + * Since pipe events are edge-triggered from the PIPESTAT register to IIR, + * we leave them always unmasked in IMR and then control enabling them through + * PIPESTAT alone. + */ +#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) + +/** Interrupts that we mask and unmask at runtime. */ +#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) + +/** These are all of the interrupts used by the driver */ +#define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \ + I915_INTERRUPT_ENABLE_VAR) void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) @@ -59,6 +71,41 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) } } +static inline u32 +i915_pipestat(int pipe) +{ + if (pipe == 0) + return PIPEASTAT; + if (pipe == 1) + return PIPEBSTAT; + BUG(); +} + +void +i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) +{ + if ((dev_priv->pipestat[pipe] & mask) != mask) { + u32 reg = i915_pipestat(pipe); + + dev_priv->pipestat[pipe] |= mask; + /* Enable the interrupt, clear any pending status */ + I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); + (void) I915_READ(reg); + } +} + +void +i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) +{ + if ((dev_priv->pipestat[pipe] & mask) != 0) { + u32 reg = i915_pipestat(pipe); + + dev_priv->pipestat[pipe] &= ~mask; + I915_WRITE(reg, dev_priv->pipestat[pipe]); + (void) I915_READ(reg); + } +} + /** * i915_pipe_enabled - check if a pipe is enabled * @dev: DRM device @@ -80,211 +127,6 @@ i915_pipe_enabled(struct drm_device *dev, int pipe) return 0; } -/** - * Emit blits for scheduled buffer swaps. - * - * This function will be called with the HW lock held. - * Because this function must grab the ring mutex (dev->struct_mutex), - * it can no longer run at soft irq time. We'll fix this when we do - * the DRI2 swap buffer work. - */ -static void i915_vblank_tasklet(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - struct list_head *list, *tmp, hits, *hit; - int nhits, nrects, slice[2], upper[2], lower[2], i; - unsigned counter[2]; - struct drm_drawable_info *drw; - drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; - u32 cpp = dev_priv->cpp; - u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | - XY_SRC_COPY_BLT_WRITE_ALPHA | - XY_SRC_COPY_BLT_WRITE_RGB) - : XY_SRC_COPY_BLT_CMD; - u32 src_pitch = sarea_priv->pitch * cpp; - u32 dst_pitch = sarea_priv->pitch * cpp; - u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); - RING_LOCALS; - - mutex_lock(&dev->struct_mutex); - - if (IS_I965G(dev) && sarea_priv->front_tiled) { - cmd |= XY_SRC_COPY_BLT_DST_TILED; - dst_pitch >>= 2; - } - if (IS_I965G(dev) && sarea_priv->back_tiled) { - cmd |= XY_SRC_COPY_BLT_SRC_TILED; - src_pitch >>= 2; - } - - counter[0] = drm_vblank_count(dev, 0); - counter[1] = drm_vblank_count(dev, 1); - - DRM_DEBUG("\n"); - - INIT_LIST_HEAD(&hits); - - nhits = nrects = 0; - - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); - - /* Find buffer swaps scheduled for this vertical blank */ - list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { - drm_i915_vbl_swap_t *vbl_swap = - list_entry(list, drm_i915_vbl_swap_t, head); - int pipe = vbl_swap->pipe; - - if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) - continue; - - list_del(list); - dev_priv->swaps_pending--; - drm_vblank_put(dev, pipe); - - spin_unlock(&dev_priv->swaps_lock); - spin_lock(&dev->drw_lock); - - drw = drm_get_drawable_info(dev, vbl_swap->drw_id); - - list_for_each(hit, &hits) { - drm_i915_vbl_swap_t *swap_cmp = - list_entry(hit, drm_i915_vbl_swap_t, head); - struct drm_drawable_info *drw_cmp = - drm_get_drawable_info(dev, swap_cmp->drw_id); - - /* Make sure both drawables are still - * around and have some rectangles before - * we look inside to order them for the - * blts below. - */ - if (drw_cmp && drw_cmp->num_rects > 0 && - drw && drw->num_rects > 0 && - drw_cmp->rects[0].y1 > drw->rects[0].y1) { - list_add_tail(list, hit); - break; - } - } - - spin_unlock(&dev->drw_lock); - - /* List of hits was empty, or we reached the end of it */ - if (hit == &hits) - list_add_tail(list, hits.prev); - - nhits++; - - spin_lock(&dev_priv->swaps_lock); - } - - if (nhits == 0) { - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); - mutex_unlock(&dev->struct_mutex); - return; - } - - spin_unlock(&dev_priv->swaps_lock); - - i915_kernel_lost_context(dev); - - if (IS_I965G(dev)) { - BEGIN_LP_RING(4); - - OUT_RING(GFX_OP_DRAWRECT_INFO_I965); - OUT_RING(0); - OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16)); - OUT_RING(0); - ADVANCE_LP_RING(); - } else { - BEGIN_LP_RING(6); - - OUT_RING(GFX_OP_DRAWRECT_INFO); - OUT_RING(0); - OUT_RING(0); - OUT_RING(sarea_priv->width | sarea_priv->height << 16); - OUT_RING(sarea_priv->width | sarea_priv->height << 16); - OUT_RING(0); - - ADVANCE_LP_RING(); - } - - sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; - - upper[0] = upper[1] = 0; - slice[0] = max(sarea_priv->pipeA_h / nhits, 1); - slice[1] = max(sarea_priv->pipeB_h / nhits, 1); - lower[0] = sarea_priv->pipeA_y + slice[0]; - lower[1] = sarea_priv->pipeB_y + slice[0]; - - spin_lock(&dev->drw_lock); - - /* Emit blits for buffer swaps, partitioning both outputs into as many - * slices as there are buffer swaps scheduled in order to avoid tearing - * (based on the assumption that a single buffer swap would always - * complete before scanout starts). - */ - for (i = 0; i++ < nhits; - upper[0] = lower[0], lower[0] += slice[0], - upper[1] = lower[1], lower[1] += slice[1]) { - if (i == nhits) - lower[0] = lower[1] = sarea_priv->height; - - list_for_each(hit, &hits) { - drm_i915_vbl_swap_t *swap_hit = - list_entry(hit, drm_i915_vbl_swap_t, head); - struct drm_clip_rect *rect; - int num_rects, pipe; - unsigned short top, bottom; - - drw = drm_get_drawable_info(dev, swap_hit->drw_id); - - /* The drawable may have been destroyed since - * the vblank swap was queued - */ - if (!drw) - continue; - - rect = drw->rects; - pipe = swap_hit->pipe; - top = upper[pipe]; - bottom = lower[pipe]; - - for (num_rects = drw->num_rects; num_rects--; rect++) { - int y1 = max(rect->y1, top); - int y2 = min(rect->y2, bottom); - - if (y1 >= y2) - continue; - - BEGIN_LP_RING(8); - - OUT_RING(cmd); - OUT_RING(ropcpp | dst_pitch); - OUT_RING((y1 << 16) | rect->x1); - OUT_RING((y2 << 16) | rect->x2); - OUT_RING(sarea_priv->front_offset); - OUT_RING((y1 << 16) | rect->x1); - OUT_RING(src_pitch); - OUT_RING(sarea_priv->back_offset); - - ADVANCE_LP_RING(); - } - } - } - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - mutex_unlock(&dev->struct_mutex); - - list_for_each_safe(hit, tmp, &hits) { - drm_i915_vbl_swap_t *swap_hit = - list_entry(hit, drm_i915_vbl_swap_t, head); - - list_del(hit); - - drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER); - } -} - /* Called from drm generic code, passed a 'crtc', which * we use as a pipe index */ @@ -322,121 +164,106 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) return count; } -void -i915_vblank_work_handler(struct work_struct *work) -{ - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - vblank_work); - struct drm_device *dev = dev_priv->dev; - unsigned long irqflags; - - if (dev->lock.hw_lock == NULL) { - i915_vblank_tasklet(dev); - return; - } - - spin_lock_irqsave(&dev->tasklet_lock, irqflags); - dev->locked_tasklet_func = i915_vblank_tasklet; - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); - - /* Try to get the lock now, if this fails, the lock - * holder will execute the tasklet during unlock - */ - if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) - return; - - dev->lock.lock_time = jiffies; - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); - - spin_lock_irqsave(&dev->tasklet_lock, irqflags); - dev->locked_tasklet_func = NULL; - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); - - i915_vblank_tasklet(dev); - drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); -} - irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 iir; + u32 iir, new_iir; u32 pipea_stats, pipeb_stats; + u32 vblank_status; + u32 vblank_enable; int vblank = 0; + unsigned long irqflags; + int irq_received; + int ret = IRQ_NONE; atomic_inc(&dev_priv->irq_received); - if (dev->pdev->msi_enabled) - I915_WRITE(IMR, ~0); iir = I915_READ(IIR); - if (iir == 0) { - if (dev->pdev->msi_enabled) { - I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IMR); - } - return IRQ_NONE; + if (IS_I965G(dev)) { + vblank_status = I915_START_VBLANK_INTERRUPT_STATUS; + vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE; + } else { + vblank_status = I915_VBLANK_INTERRUPT_STATUS; + vblank_enable = I915_VBLANK_INTERRUPT_ENABLE; } - /* - * Clear the PIPE(A|B)STAT regs before the IIR otherwise - * we may get extra interrupts. - */ - if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) { + for (;;) { + irq_received = iir != 0; + + /* Can't rely on pipestat interrupt bit in iir as it might + * have been cleared after the pipestat interrupt was received. + * It doesn't set the bit in iir again, but it still produces + * interrupts (for non-MSI). + */ + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); pipea_stats = I915_READ(PIPEASTAT); - if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)) - pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | - PIPE_VBLANK_INTERRUPT_ENABLE); - else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| - PIPE_VBLANK_INTERRUPT_STATUS)) { + pipeb_stats = I915_READ(PIPEBSTAT); + /* + * Clear the PIPE(A|B)STAT regs before the IIR + */ + if (pipea_stats & 0x8000ffff) { + I915_WRITE(PIPEASTAT, pipea_stats); + irq_received = 1; + } + + if (pipeb_stats & 0x8000ffff) { + I915_WRITE(PIPEBSTAT, pipeb_stats); + irq_received = 1; + } + spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + + if (!irq_received) + break; + + ret = IRQ_HANDLED; + + I915_WRITE(IIR, iir); + new_iir = I915_READ(IIR); /* Flush posted writes */ + + if (dev_priv->sarea_priv) + dev_priv->sarea_priv->last_dispatch = + READ_BREADCRUMB(dev_priv); + + if (iir & I915_USER_INTERRUPT) { + dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); + DRM_WAKEUP(&dev_priv->irq_queue); + } + + if (pipea_stats & vblank_status) { vblank++; drm_handle_vblank(dev, 0); } - I915_WRITE(PIPEASTAT, pipea_stats); - } - if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) { - pipeb_stats = I915_READ(PIPEBSTAT); - /* Ack the event */ - I915_WRITE(PIPEBSTAT, pipeb_stats); - - /* The vblank interrupt gets enabled even if we didn't ask for - it, so make sure it's shut down again */ - if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)) - pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | - PIPE_VBLANK_INTERRUPT_ENABLE); - else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| - PIPE_VBLANK_INTERRUPT_STATUS)) { + if (pipeb_stats & vblank_status) { vblank++; drm_handle_vblank(dev, 1); } - if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) + if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || + (iir & I915_ASLE_INTERRUPT)) opregion_asle_intr(dev); - I915_WRITE(PIPEBSTAT, pipeb_stats); - } - - I915_WRITE(IIR, iir); - if (dev->pdev->msi_enabled) - I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IIR); /* Flush posted writes */ - - if (dev_priv->sarea_priv) - dev_priv->sarea_priv->last_dispatch = - READ_BREADCRUMB(dev_priv); - if (iir & I915_USER_INTERRUPT) { - dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); - DRM_WAKEUP(&dev_priv->irq_queue); + /* With MSI, interrupts are only generated when iir + * transitions from zero to nonzero. If another bit got + * set while we were handling the existing iir bits, then + * we would never get another interrupt. + * + * This is fine on non-MSI as well, as if we hit this path + * we avoid exiting the interrupt handler only to generate + * another one. + * + * Note that for MSI this could cause a stray interrupt report + * if an interrupt landed in the time between writing IIR and + * the posting read. This should be rare enough to never + * trigger the 99% of 100,000 interrupts test for disabling + * stray interrupts. + */ + iir = new_iir; } - if (iir & I915_ASLE_INTERRUPT) - opregion_asle_intr(dev); - - if (vblank && dev_priv->swaps_pending > 0) - schedule_work(&dev_priv->vblank_work); - - return IRQ_HANDLED; + return ret; } static int i915_emit_irq(struct drm_device * dev) @@ -454,12 +281,10 @@ static int i915_emit_irq(struct drm_device * dev) if (dev_priv->sarea_priv) dev_priv->sarea_priv->last_enqueue = dev_priv->counter; - BEGIN_LP_RING(6); + BEGIN_LP_RING(4); OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); - OUT_RING(0); - OUT_RING(0); OUT_RING(MI_USER_INTERRUPT); ADVANCE_LP_RING(); @@ -574,48 +399,16 @@ int i915_irq_wait(struct drm_device *dev, void *data, int i915_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 pipestat_reg = 0; - u32 pipestat; - u32 interrupt = 0; unsigned long irqflags; - switch (pipe) { - case 0: - pipestat_reg = PIPEASTAT; - interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; - break; - case 1: - pipestat_reg = PIPEBSTAT; - interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; - break; - default: - DRM_ERROR("tried to enable vblank on non-existent pipe %d\n", - pipe); - return 0; - } - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - /* Enabling vblank events in IMR comes before PIPESTAT write, or - * there's a race where the PIPESTAT vblank bit gets set to 1, so - * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in - * ISR flashes to 1, but the IIR bit doesn't get set to 1 because - * IMR masks it. It doesn't ever get set after we clear the masking - * in IMR because the ISR bit is edge, not level-triggered, on the - * OR of PIPESTAT bits. - */ - i915_enable_irq(dev_priv, interrupt); - pipestat = I915_READ(pipestat_reg); if (IS_I965G(dev)) - pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE; + i915_enable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_ENABLE); else - pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE; - /* Clear any stale interrupt status */ - pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | - PIPE_VBLANK_INTERRUPT_STATUS); - I915_WRITE(pipestat_reg, pipestat); - (void) I915_READ(pipestat_reg); /* Posting read */ + i915_enable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); - return 0; } @@ -625,37 +418,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) void i915_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 pipestat_reg = 0; - u32 pipestat; - u32 interrupt = 0; unsigned long irqflags; - switch (pipe) { - case 0: - pipestat_reg = PIPEASTAT; - interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; - break; - case 1: - pipestat_reg = PIPEBSTAT; - interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; - break; - default: - DRM_ERROR("tried to disable vblank on non-existent pipe %d\n", - pipe); - return; - break; - } - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - i915_disable_irq(dev_priv, interrupt); - pipestat = I915_READ(pipestat_reg); - pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | - PIPE_VBLANK_INTERRUPT_ENABLE); - /* Clear any stale interrupt status */ - pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | - PIPE_VBLANK_INTERRUPT_STATUS); - I915_WRITE(pipestat_reg, pipestat); - (void) I915_READ(pipestat_reg); /* Posting read */ + i915_disable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -696,123 +464,21 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data, int i915_vblank_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_vblank_swap_t *swap = data; - drm_i915_vbl_swap_t *vbl_swap, *vbl_old; - unsigned int pipe, seqtype, curseq; - unsigned long irqflags; - struct list_head *list; - int ret; - - if (!dev_priv || !dev_priv->sarea_priv) { - DRM_ERROR("%s called with no initialization\n", __func__); - return -EINVAL; - } - - if (dev_priv->sarea_priv->rotation) { - DRM_DEBUG("Rotation not supported\n"); - return -EINVAL; - } - - if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | - _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { - DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); - return -EINVAL; - } - - pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; - - seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); - - if (!(dev_priv->vblank_pipe & (1 << pipe))) { - DRM_ERROR("Invalid pipe %d\n", pipe); - return -EINVAL; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - if (!drm_get_drawable_info(dev, swap->drawable)) { - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); - return -EINVAL; - } - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - /* - * We take the ref here and put it when the swap actually completes - * in the tasklet. + /* The delayed swap mechanism was fundamentally racy, and has been + * removed. The model was that the client requested a delayed flip/swap + * from the kernel, then waited for vblank before continuing to perform + * rendering. The problem was that the kernel might wake the client + * up before it dispatched the vblank swap (since the lock has to be + * held while touching the ringbuffer), in which case the client would + * clear and start the next frame before the swap occurred, and + * flicker would occur in addition to likely missing the vblank. + * + * In the absence of this ioctl, userland falls back to a correct path + * of waiting for a vblank, then dispatching the swap on its own. + * Context switching to userland and back is plenty fast enough for + * meeting the requirements of vblank swapping. */ - ret = drm_vblank_get(dev, pipe); - if (ret) - return ret; - curseq = drm_vblank_count(dev, pipe); - - if (seqtype == _DRM_VBLANK_RELATIVE) - swap->sequence += curseq; - - if ((curseq - swap->sequence) <= (1<<23)) { - if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) { - swap->sequence = curseq + 1; - } else { - DRM_DEBUG("Missed target sequence\n"); - drm_vblank_put(dev, pipe); - return -EINVAL; - } - } - - vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER); - - if (!vbl_swap) { - DRM_ERROR("Failed to allocate memory to queue swap\n"); - drm_vblank_put(dev, pipe); - return -ENOMEM; - } - - vbl_swap->drw_id = swap->drawable; - vbl_swap->pipe = pipe; - vbl_swap->sequence = swap->sequence; - - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); - - list_for_each(list, &dev_priv->vbl_swaps.head) { - vbl_old = list_entry(list, drm_i915_vbl_swap_t, head); - - if (vbl_old->drw_id == swap->drawable && - vbl_old->pipe == pipe && - vbl_old->sequence == swap->sequence) { - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); - drm_vblank_put(dev, pipe); - drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); - DRM_DEBUG("Already scheduled\n"); - return 0; - } - } - - if (dev_priv->swaps_pending >= 10) { - DRM_DEBUG("Too many swaps queued\n"); - DRM_DEBUG(" pipe 0: %d pipe 1: %d\n", - drm_vblank_count(dev, 0), - drm_vblank_count(dev, 1)); - - list_for_each(list, &dev_priv->vbl_swaps.head) { - vbl_old = list_entry(list, drm_i915_vbl_swap_t, head); - DRM_DEBUG("\tdrw %x pipe %d seq %x\n", - vbl_old->drw_id, vbl_old->pipe, - vbl_old->sequence); - } - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); - drm_vblank_put(dev, pipe); - drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); - return -EBUSY; - } - - list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); - dev_priv->swaps_pending++; - - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); - - return 0; + return -EINVAL; } /* drm_dma.h hooks @@ -822,37 +488,35 @@ void i915_driver_irq_preinstall(struct drm_device * dev) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; I915_WRITE(HWSTAM, 0xeffe); + I915_WRITE(PIPEASTAT, 0); + I915_WRITE(PIPEBSTAT, 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); + (void) I915_READ(IER); } int i915_driver_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int ret, num_pipes = 2; - - spin_lock_init(&dev_priv->swaps_lock); - INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); - INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler); - dev_priv->swaps_pending = 0; - - /* Set initial unmasked IRQs to just the selected vblank pipes. */ - dev_priv->irq_mask_reg = ~0; - - ret = drm_vblank_init(dev, num_pipes); - if (ret) - return ret; dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; - dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; - dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK; + /* Unmask the interrupts that we always want on. */ + dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; + + dev_priv->pipestat[0] = 0; + dev_priv->pipestat[1] = 0; + + /* Disable pipe interrupt enables, clear pending pipe status */ + I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); + I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); + /* Clear pending interrupt status */ + I915_WRITE(IIR, I915_READ(IIR)); - I915_WRITE(IMR, dev_priv->irq_mask_reg); I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); + I915_WRITE(IMR, dev_priv->irq_mask_reg); (void) I915_READ(IER); opregion_enable_asle(dev); @@ -864,7 +528,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev) void i915_driver_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 temp; if (!dev_priv) return; @@ -872,13 +535,12 @@ void i915_driver_irq_uninstall(struct drm_device * dev) dev_priv->vblank_pipe = 0; I915_WRITE(HWSTAM, 0xffffffff); + I915_WRITE(PIPEASTAT, 0); + I915_WRITE(PIPEBSTAT, 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); - temp = I915_READ(PIPEASTAT); - I915_WRITE(PIPEASTAT, temp); - temp = I915_READ(PIPEBSTAT); - I915_WRITE(PIPEBSTAT, temp); - temp = I915_READ(IIR); - I915_WRITE(IIR, temp); + I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); + I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); + I915_WRITE(IIR, I915_READ(IIR)); } diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index 1787a0c7e3a..13ae731a33d 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c @@ -235,17 +235,15 @@ void opregion_enable_asle(struct drm_device *dev) struct opregion_asle *asle = dev_priv->opregion.asle; if (asle) { - u32 pipeb_stats = I915_READ(PIPEBSTAT); if (IS_MOBILE(dev)) { - /* Many devices trigger events with a write to the - legacy backlight controller, so we need to ensure - that it's able to generate interrupts */ - I915_WRITE(PIPEBSTAT, pipeb_stats |= - I915_LEGACY_BLC_EVENT_ENABLE); - i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); - } else - i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT); + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + i915_enable_pipestat(dev_priv, 1, + I915_LEGACY_BLC_EVENT_ENABLE); + spin_unlock_irqrestore(&dev_priv->user_irq_lock, + irqflags); + } asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | ASLE_PFMB_EN; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5c2d9f206d0..9d24aaeb8a4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -522,11 +522,15 @@ #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) #define DCC_ADDRESSING_MODE_MASK (3 << 0) #define DCC_CHANNEL_XOR_DISABLE (1 << 10) +#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) /** 965 MCH register controlling DRAM channel configuration */ #define C0DRB3 0x10206 #define C1DRB3 0x10606 +/** GM965 GM45 render standby register */ +#define MCHBAR_RENDER_STANDBY 0x111B8 + /* * Overlay regs */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 603fe742ccd..5d84027ee8f 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -240,6 +240,13 @@ int i915_save_state(struct drm_device *dev) pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); + /* Render Standby */ + if (IS_I965G(dev) && IS_MOBILE(dev)) + dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); + + /* Hardware status page */ + dev_priv->saveHWS = I915_READ(HWS_PGA); + /* Display arbitration control */ dev_priv->saveDSPARB = I915_READ(DSPARB); @@ -365,6 +372,14 @@ int i915_restore_state(struct drm_device *dev) pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); + /* Render Standby */ + if (IS_I965G(dev) && IS_MOBILE(dev)) + I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); + + /* Hardware status page */ + I915_WRITE(HWS_PGA, dev_priv->saveHWS); + + /* Display arbitration */ I915_WRITE(DSPARB, dev_priv->saveDSPARB); /* Pipe & plane A info */ diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index c1d12dbfa8d..b49c5ff2958 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c @@ -396,6 +396,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) int mga_driver_load(struct drm_device * dev, unsigned long flags) { drm_mga_private_t *dev_priv; + int ret; dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); if (!dev_priv) @@ -415,6 +416,13 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags) dev->types[7] = _DRM_STAT_PRIMARY; dev->types[8] = _DRM_STAT_SECONDARY; + ret = drm_vblank_init(dev, 1); + + if (ret) { + (void) mga_driver_unload(dev); + return ret; + } + return 0; } diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c index bab42f41188..daa6041a483 100644 --- a/drivers/gpu/drm/mga/mga_irq.c +++ b/drivers/gpu/drm/mga/mga_irq.c @@ -152,11 +152,6 @@ void mga_driver_irq_preinstall(struct drm_device * dev) int mga_driver_irq_postinstall(struct drm_device *dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - int ret; - - ret = drm_vblank_init(dev, 1); - if (ret) - return ret; DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 3265d53ba91..601f4c0e5da 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -45,6 +45,7 @@ static struct drm_driver driver = { DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .dev_priv_size = sizeof(drm_r128_buf_priv_t), + .load = r128_driver_load, .preclose = r128_driver_preclose, .lastclose = r128_driver_lastclose, .get_vblank_counter = r128_get_vblank_counter, @@ -84,6 +85,11 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +int r128_driver_load(struct drm_device * dev, unsigned long flags) +{ + return drm_vblank_init(dev, 1); +} + static int __init r128_init(void) { driver.num_ioctls = r128_max_ioctl; diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h index 5898b274279..797a26c42da 100644 --- a/drivers/gpu/drm/r128/r128_drv.h +++ b/drivers/gpu/drm/r128/r128_drv.h @@ -159,6 +159,7 @@ extern void r128_driver_irq_preinstall(struct drm_device * dev); extern int r128_driver_irq_postinstall(struct drm_device *dev); extern void r128_driver_irq_uninstall(struct drm_device * dev); extern void r128_driver_lastclose(struct drm_device * dev); +extern int r128_driver_load(struct drm_device * dev, unsigned long flags); extern void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c index d7349012a68..69810fb8ac4 100644 --- a/drivers/gpu/drm/r128/r128_irq.c +++ b/drivers/gpu/drm/r128/r128_irq.c @@ -102,7 +102,7 @@ void r128_driver_irq_preinstall(struct drm_device * dev) int r128_driver_irq_postinstall(struct drm_device *dev) { - return drm_vblank_init(dev, 1); + return 0; } void r128_driver_irq_uninstall(struct drm_device * dev) diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 59a2132a8f5..dcebb4bee7a 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -653,15 +653,16 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); /* Turn on bus mastering */ - if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || - ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { - /* rs400, rs690/rs740 */ - tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS400_BUS_MASTER_DIS; + /* rs600/rs690/rs740 */ + tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; RADEON_WRITE(RADEON_BUS_CNTL, tmp); - } else if (!(((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || - ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R423))) { - /* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */ + } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { + /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; RADEON_WRITE(RADEON_BUS_CNTL, tmp); } /* PCIE cards appears to not need this */ @@ -1750,6 +1751,18 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) else dev_priv->flags |= RADEON_IS_PCI; + ret = drm_addmap(dev, drm_get_resource_start(dev, 2), + drm_get_resource_len(dev, 2), _DRM_REGISTERS, + _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); + if (ret != 0) + return ret; + + ret = drm_vblank_init(dev, 2); + if (ret) { + radeon_driver_unload(dev); + return ret; + } + DRM_DEBUG("%s card detected\n", ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); return ret; @@ -1766,12 +1779,6 @@ int radeon_driver_firstopen(struct drm_device *dev) dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; - ret = drm_addmap(dev, drm_get_resource_start(dev, 2), - drm_get_resource_len(dev, 2), _DRM_REGISTERS, - _DRM_READ_ONLY, &dev_priv->mmio); - if (ret != 0) - return ret; - dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); ret = drm_addmap(dev, dev_priv->fb_aper_offset, drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, @@ -1787,6 +1794,9 @@ int radeon_driver_unload(struct drm_device *dev) drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); + + drm_rmmap(dev, dev_priv->mmio); + drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); dev->dev_private = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 4dbb813910c..3bbb871b25d 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -287,7 +287,6 @@ typedef struct drm_radeon_private { unsigned long gart_textures_offset; drm_local_map_t *sarea; - drm_local_map_t *mmio; drm_local_map_t *cp_ring; drm_local_map_t *ring_rptr; drm_local_map_t *gart_textures; @@ -300,7 +299,6 @@ typedef struct drm_radeon_private { atomic_t swi_emitted; int vblank_crtc; uint32_t irq_enable_reg; - int irq_enabled; uint32_t r500_disp_irq_reg; struct radeon_surface surfaces[RADEON_MAX_SURFACES]; @@ -318,6 +316,7 @@ typedef struct drm_radeon_private { int num_gb_pipes; int track_flush; + drm_local_map_t *mmio; } drm_radeon_private_t; typedef struct drm_radeon_buf_priv { @@ -447,12 +446,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, * handling, not bus mastering itself. */ #define RADEON_BUS_CNTL 0x0030 -/* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */ +/* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ # define RADEON_BUS_MASTER_DIS (1 << 6) -/* rs400, rs690/rs740 */ -# define RS400_BUS_MASTER_DIS (1 << 14) -# define RS400_MSI_REARM (1 << 20) -/* see RS480_MSI_REARM in AIC_CNTL for rs480 */ +/* rs600/rs690/rs740 */ +# define RS600_BUS_MASTER_DIS (1 << 14) +# define RS600_MSI_REARM (1 << 20) +/* see RS400_MSI_REARM in AIC_CNTL for rs480 */ #define RADEON_BUS_CNTL1 0x0034 # define RADEON_PMI_BM_DIS (1 << 2) @@ -937,7 +936,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, #define RADEON_AIC_CNTL 0x01d0 # define RADEON_PCIGART_TRANSLATE_EN (1 << 0) -# define RS480_MSI_REARM (1 << 3) +# define RS400_MSI_REARM (1 << 3) #define RADEON_AIC_STAT 0x01d4 #define RADEON_AIC_PT_BASE 0x01d8 #define RADEON_AIC_LO_ADDR 0x01dc diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 5079f7054a2..99be11418ac 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c @@ -44,7 +44,8 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) else dev_priv->irq_enable_reg &= ~mask; - RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); + if (!dev->irq_enabled) + RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); } static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) @@ -56,7 +57,8 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) else dev_priv->r500_disp_irq_reg &= ~mask; - RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); + if (!dev->irq_enabled) + RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); } int radeon_enable_vblank(struct drm_device *dev, int crtc) @@ -337,15 +339,10 @@ int radeon_driver_irq_postinstall(struct drm_device *dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; - int ret; atomic_set(&dev_priv->swi_emitted, 0); DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); - ret = drm_vblank_init(dev, 2); - if (ret) - return ret; - dev->max_vblank_count = 0x001fffff; radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); @@ -360,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev) if (!dev_priv) return; - dev_priv->irq_enabled = 0; - if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) RADEON_WRITE(R500_DxMODE_INT_MASK, 0); /* Disable *all* interrupts */ diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c index 665d319b927..c248c1d3726 100644 --- a/drivers/gpu/drm/via/via_irq.c +++ b/drivers/gpu/drm/via/via_irq.c @@ -314,7 +314,6 @@ int via_driver_irq_postinstall(struct drm_device *dev) if (!dev_priv) return -EINVAL; - drm_vblank_init(dev, 1); status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL | dev_priv->irq_enable_mask); diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c index a967556be01..2c4f0b48579 100644 --- a/drivers/gpu/drm/via/via_map.c +++ b/drivers/gpu/drm/via/via_map.c @@ -107,8 +107,17 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset) ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); if (ret) { drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); + return ret; } - return ret; + + ret = drm_vblank_init(dev, 1); + if (ret) { + drm_sman_takedown(&dev_priv->sman); + drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); + return ret; + } + + return 0; } int via_driver_unload(struct drm_device *dev) |