Commit 7db2ccdd authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel:
  drm/i915: use PIPE_CONTROL instruction on Ironlake and Sandy Bridge
  drm/i915: cleanup FBC buffers at unload time
  drm/i915: fix tiling limits for i915 class hw v2
  drm/i915: set DIDL using the ACPI video output device _ADR method return.
  drm/i915: Fix 82854 PCI ID, and treat it like other 85X
  drm/i915: Attempt to fix watermark setup on 85x (v2)
parents a2b297ff e552eb70
......@@ -1357,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->cfb_size = size;
dev_priv->compressed_fb = compressed_fb;
if (IS_GM45(dev)) {
g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
......@@ -1364,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size)
i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
dev_priv->compressed_llb = compressed_llb;
}
DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
ll_base, size >> 20);
}
static void i915_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_mm_put_block(dev_priv->compressed_fb);
if (!IS_GM45(dev))
drm_mm_put_block(dev_priv->compressed_llb);
}
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
......@@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->vram);
i915_gem_lastclose(dev);
......
......@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = {
};
const static struct intel_device_info intel_i85x_info = {
.is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
.is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
const static struct intel_device_info intel_i865g_info = {
......@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
......
......@@ -195,6 +195,7 @@ struct intel_overlay;
struct intel_device_info {
u8 is_mobile : 1;
u8 is_i8xx : 1;
u8 is_i85x : 1;
u8 is_i915g : 1;
u8 is_i9xx : 1;
u8 is_i945gm : 1;
......@@ -235,11 +236,14 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
void *hw_status_page;
void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
unsigned int status_gfx_addr;
unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
struct drm_gem_object *hws_obj;
struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
struct resource mch_res;
......@@ -630,6 +634,9 @@ typedef struct drm_i915_private {
u8 max_delay;
enum no_fbc_reason no_fbc_reason;
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
......@@ -1070,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
......@@ -1135,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
IS_GEN6(dev))
#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
......
......@@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
}
#define PIPE_CONTROL_FLUSH(addr) \
OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
PIPE_CONTROL_DEPTH_STALL); \
OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
OUT_RING(0); \
OUT_RING(0); \
/**
* Creates a new sequence number, emitting a write of it to the status page
* plus an interrupt, which will trigger i915_user_interrupt_handler.
......@@ -1622,6 +1629,39 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if (dev_priv->mm.next_gem_seqno == 0)
dev_priv->mm.next_gem_seqno++;
if (HAS_PIPE_CONTROL(dev)) {
u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
/*
* Workaround qword write incoherence by flushing the
* PIPE_NOTIFY buffers out to memory before requesting
* an interrupt.
*/
BEGIN_LP_RING(32);
OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
OUT_RING(seqno);
OUT_RING(0);
PIPE_CONTROL_FLUSH(scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
PIPE_CONTROL_FLUSH(scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(scratch_addr);
OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
PIPE_CONTROL_NOTIFY);
OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
OUT_RING(seqno);
OUT_RING(0);
ADVANCE_LP_RING();
} else {
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
......@@ -1629,6 +1669,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
}
DRM_DEBUG_DRIVER("%d\n", seqno);
......@@ -1752,6 +1793,9 @@ i915_get_gem_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (IS_I965G(dev))
return ((volatile u32 *)(dev_priv->seqno_page))[0];
else
return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
}
......@@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
pitch_val = obj_priv->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
if (obj_priv->tiling_mode == I915_TILING_Y &&
HAS_128_BYTE_Y_TILING(dev))
WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
else
WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
......@@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
/*
* 965+ support PIPE_CONTROL commands, which provide finer grained control
* over cache flushing.
*/
static int
i915_gem_init_pipe_control(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
obj = drm_gem_object_alloc(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
goto err;
}
obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
ret = i915_gem_object_pin(obj, 4096);
if (ret)
goto err_unref;
dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
dev_priv->seqno_page = kmap(obj_priv->pages[0]);
if (dev_priv->seqno_page == NULL)
goto err_unpin;
dev_priv->seqno_obj = obj;
memset(dev_priv->seqno_page, 0, PAGE_SIZE);
return 0;
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
drm_gem_object_unreference(obj);
err:
return ret;
}
static int
i915_gem_init_hws(struct drm_device *dev)
{
......@@ -4563,7 +4656,8 @@ i915_gem_init_hws(struct drm_device *dev)
obj = drm_gem_object_alloc(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
return -ENOMEM;
ret = -ENOMEM;
goto err;
}
obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
......@@ -4571,7 +4665,7 @@ i915_gem_init_hws(struct drm_device *dev)
ret = i915_gem_object_pin(obj, 4096);
if (ret != 0) {
drm_gem_object_unreference(obj);
return ret;
goto err_unref;
}
dev_priv->status_gfx_addr = obj_priv->gtt_offset;
......@@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev)
if (dev_priv->hw_status_page == NULL) {
DRM_ERROR("Failed to map status page.\n");
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
return -EINVAL;
ret = -EINVAL;
goto err_unpin;
}
if (HAS_PIPE_CONTROL(dev)) {
ret = i915_gem_init_pipe_control(dev);
if (ret)
goto err_unpin;
}
dev_priv->hws_obj = obj;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
if (IS_GEN6(dev)) {
......@@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev)
DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
drm_gem_object_unreference(obj);
err:
return 0;
}
static void
i915_gem_cleanup_pipe_control(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj = dev_priv->seqno_obj;
obj_priv = to_intel_bo(obj);
kunmap(obj_priv->pages[0]);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
dev_priv->seqno_obj = NULL;
dev_priv->seqno_page = NULL;
}
static void
......@@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
dev_priv->hw_status_page = NULL;
if (HAS_PIPE_CONTROL(dev))
i915_gem_cleanup_pipe_control(dev);
/* Write high address into HWS_PGA when disabling. */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
......
......@@ -202,22 +202,18 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else if (IS_I9XX(dev)) {
uint32_t pitch_val = ffs(stride / tile_width) - 1;
} else if (IS_GEN3(dev) || IS_GEN2(dev)) {
if (stride > 8192)
return false;
/* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
* instead of 4 (2KB) on 945s.
*/
if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
size > (I830_FENCE_MAX_SIZE_VAL << 20))
if (IS_GEN3(dev)) {
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
uint32_t pitch_val = ffs(stride / tile_width) - 1;
if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
size > (I830_FENCE_MAX_SIZE_VAL << 19))
if (size > I830_FENCE_MAX_SIZE_VAL << 19)
return false;
}
}
/* 965+ just needs multiples of tile width */
if (IS_I965G(dev)) {
......
......@@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
READ_BREADCRUMB(dev_priv);
}
if (gt_iir & GT_USER_INTERRUPT) {
if (gt_iir & GT_PIPE_NOTIFY) {
u32 seqno = i915_get_gem_seqno(dev);
dev_priv->mm.irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
......@@ -1005,7 +1005,7 @@ void i915_user_irq_get(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
if (HAS_PCH_SPLIT(dev))
ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
}
......@@ -1021,7 +1021,7 @@ void i915_user_irq_put(struct drm_device *dev)
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
if (HAS_PCH_SPLIT(dev))
ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
}
......@@ -1305,7 +1305,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
u32 render_mask = GT_USER_INTERRUPT;
u32 render_mask = GT_PIPE_NOTIFY;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
......
......@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
struct drm_connector *connector;
acpi_handle handle;
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
int i = 0;
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
return;
if (acpi_is_video_device(acpi_dev))
acpi_video_bus = acpi_dev;
else {
list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
if (acpi_is_video_device(acpi_cdev)) {
acpi_video_bus = acpi_cdev;
break;
}
}
}
if (!acpi_video_bus) {
printk(KERN_WARNING "No ACPI video bus found\n");
return;
}
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
if (i >= 8) {
dev_printk (KERN_ERR, &dev->pdev->dev,
"More than 8 outputs detected\n");
return;
}
status =
acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
NULL, &device_id);
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
i++;
}
}
end:
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
opregion->acpi->didl[i] = 0;
return;
blind_set:
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
......@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev)
opregion->acpi->didl[i] |= (1<<31) | output_type | i;
i++;
}
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
opregion->acpi->didl[i] = 0;
goto end;
}
int intel_opregion_init(struct drm_device *dev, int resume)
......
......@@ -230,6 +230,16 @@
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
#define PIPE_CONTROL_WC_FLUSH (1<<12)
#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
#define PIPE_CONTROL_ISP_DIS (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
/*
* Fence registers
......@@ -241,7 +251,7 @@
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
#define I915_FENCE_MAX_PITCH_VAL 0x10
#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
#define I830_FENCE_MAX_SIZE_VAL (1<<8)
......@@ -2285,6 +2295,7 @@
#define DEIER 0x4400c
/* GT interrupt */
#define GT_PIPE_NOTIFY (1 << 4)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
......
......@@ -4853,17 +4853,18 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = g4x_update_wm;
else if (IS_I965G(dev))
dev_priv->display.update_wm = i965_update_wm;
else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
else if (IS_I9XX(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
} else {
if (IS_I85X(dev))
} else if (IS_I85X(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i85x_get_fifo_size;
else if (IS_845G(dev))
} else {
dev_priv->display.update_wm = i830_update_wm;
if (IS_845G(dev))
dev_priv->display.get_fifo_size = i845_get_fifo_size;
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
dev_priv->display.update_wm = i830_update_wm;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment