Commit 94e0fb08 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel

* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (57 commits)
  drm/i915: Handle ERESTARTSYS during page fault
  drm/i915: Warn before mmaping a purgeable buffer.
  drm/i915: Track purged state.
  drm/i915: Remove eviction debug spam
  drm/i915: Immediately discard any backing storage for uneeded objects
  drm/i915: Do not mis-classify clean objects as purgeable
  drm/i915: Whitespace correction for madv
  drm/i915: BUG_ON page refleak during unbind
  drm/i915: Search harder for a reusable object
  drm/i915: Clean up evict from list.
  drm/i915: Add tracepoints
  drm/i915: framebuffer compression for GM45+
  drm/i915: split display functions by chip type
  drm/i915: Skip the sanity checks if the current relocation is valid
  drm/i915: Check that the relocation points to within the target
  drm/i915: correct FBC update when pipe base update occurs
  drm/i915: blacklist Acer AspireOne lid status
  ACPI: make ACPI button funcs no-ops if not built in
  drm/i915: prevent FIFO calculation overflows on 32 bits with high dotclocks
  drm/i915: intel_display.c handle latency variable efficiently
  ...

Fix up trivial conflicts in drivers/gpu/drm/i915/{i915_dma.c|i915_drv.h}
parents b7f21bb2 c715089f
......@@ -144,6 +144,7 @@ void clflush_cache_range(void *vaddr, unsigned int size)
mb();
}
EXPORT_SYMBOL_GPL(clflush_cache_range);
static void __cpa_flush_all(void *arg)
{
......
......@@ -115,6 +115,9 @@ static const struct file_operations acpi_button_state_fops = {
.release = single_release,
};
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
static struct acpi_device *lid_device;
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
......@@ -231,11 +234,38 @@ static int acpi_button_remove_fs(struct acpi_device *device)
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
int acpi_lid_notifier_register(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&acpi_lid_notifier, nb);
}
EXPORT_SYMBOL(acpi_lid_notifier_register);
int acpi_lid_notifier_unregister(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb);
}
EXPORT_SYMBOL(acpi_lid_notifier_unregister);
int acpi_lid_open(void)
{
acpi_status status;
unsigned long long state;
status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL,
&state);
if (ACPI_FAILURE(status))
return -ENODEV;
return !!state;
}
EXPORT_SYMBOL(acpi_lid_open);
static int acpi_lid_send_state(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
unsigned long long state;
acpi_status status;
int ret;
status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state);
if (ACPI_FAILURE(status))
......@@ -244,7 +274,12 @@ static int acpi_lid_send_state(struct acpi_device *device)
/* input layer checks if event is redundant */
input_report_switch(button->input, SW_LID, !state);
input_sync(button->input);
return 0;
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
if (ret == NOTIFY_DONE)
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
device);
return ret;
}
static void acpi_button_notify(struct acpi_device *device, u32 event)
......@@ -366,8 +401,14 @@ static int acpi_button_add(struct acpi_device *device)
error = input_register_device(input);
if (error)
goto err_remove_fs;
if (button->type == ACPI_BUTTON_TYPE_LID)
if (button->type == ACPI_BUTTON_TYPE_LID) {
acpi_lid_send_state(device);
/*
* This assumes there's only one lid device, or if there are
* more we only care about the last one...
*/
lid_device = device;
}
if (device->wakeup.flags.valid) {
/* Button's GPE is run-wake GPE */
......
......@@ -46,6 +46,8 @@
#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
......@@ -91,6 +93,7 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
......@@ -804,23 +807,39 @@ static void intel_i830_setup_flush(void)
if (!intel_private.i8xx_page)
return;
/* make page uncached */
map_page_into_agp(intel_private.i8xx_page);
intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
if (!intel_private.i8xx_flush_page)
intel_i830_fini_flush();
}
static void
do_wbinvd(void *null)
{
wbinvd();
}
/* The chipset_flush interface needs to get data that has already been
* flushed out of the CPU all the way out to main memory, because the GPU
* doesn't snoop those buffers.
*
* The 8xx series doesn't have the same lovely interface for flushing the
* chipset write buffers that the later chips do. According to the 865
* specs, it's 64 octwords, or 1KB. So, to get those previous things in
* that buffer out, we just fill 1KB and clflush it out, on the assumption
* that it'll push whatever was in there out. It appears to work.
*/
static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
{
unsigned int *pg = intel_private.i8xx_flush_page;
int i;
for (i = 0; i < 256; i += 2)
*(pg + i) = i;
memset(pg, 0, 1024);
wmb();
if (cpu_has_clflush) {
clflush_cache_range(pg, 1024);
} else {
if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
printk(KERN_ERR "Timed out waiting for cache flush.\n");
}
}
/* The intel i830 automatically initializes the agp aperture during POST.
......@@ -1341,6 +1360,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
case PCI_DEVICE_ID_INTEL_Q45_HB:
case PCI_DEVICE_ID_INTEL_G45_HB:
case PCI_DEVICE_ID_INTEL_G41_HB:
case PCI_DEVICE_ID_INTEL_B43_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
......@@ -2335,6 +2355,8 @@ static const struct intel_driver_description {
"Q45/Q43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
"G45/G43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0,
"B43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
"G41", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
......@@ -2535,6 +2557,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
ID(PCI_DEVICE_ID_INTEL_G45_HB),
ID(PCI_DEVICE_ID_INTEL_G41_HB),
ID(PCI_DEVICE_ID_INTEL_B43_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
......
......@@ -102,6 +102,7 @@ config DRM_I915
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
......
......@@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
if (IS_ERR(obj->filp))
goto free;
/* Basically we want to disable the OOM killer and handle ENOMEM
* ourselves by sacrificing pages from cached buffers.
* XXX shmem_file_[gs]et_gfp_mask()
*/
mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
GFP_HIGHUSER |
__GFP_COLD |
__GFP_FS |
__GFP_RECLAIMABLE |
__GFP_NORETRY |
__GFP_NOWARN |
__GFP_NOMEMALLOC);
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
......
......@@ -9,6 +9,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
......
......@@ -96,11 +96,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_gem_object *obj = obj_priv->obj;
seq_printf(m, " %p: %s %08x %08x %d",
seq_printf(m, " %p: %s %8zd %08x %08x %d %s",
obj,
get_pin_flag(obj_priv),
obj->size,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
obj_priv->last_rendering_seqno,
obj_priv->dirty ? "dirty" : "");
if (obj->name)
seq_printf(m, " (name: %d)", obj->name);
......
......@@ -33,6 +33,7 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include <linux/vgaarb.h>
/* Really want an OS-independent resettable timer. Would like to have
......@@ -50,14 +51,18 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
int i;
trace_i915_ring_wait_begin (dev);
for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
if (ring->space >= n)
if (ring->space >= n) {
trace_i915_ring_wait_end (dev);
return 0;
}
if (dev->primary->master) {
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
......@@ -77,6 +82,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
}
trace_i915_ring_wait_end (dev);
return -EBUSY;
}
......@@ -922,7 +928,8 @@ static int i915_get_bridge_dev(struct drm_device *dev)
* how much was set aside so we can use it for our own purposes.
*/
static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
uint32_t *preallocated_size)
uint32_t *preallocated_size,
uint32_t *start)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 tmp = 0;
......@@ -1009,10 +1016,159 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
return -1;
}
*preallocated_size = stolen - overhead;
*start = overhead;
return 0;
}
#define PTE_ADDRESS_MASK 0xfffff000
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
#define PTE_MAPPING_TYPE_CACHED (3 << 1)
#define PTE_MAPPING_TYPE_MASK (3 << 1)
#define PTE_VALID (1 << 0)
/**
* i915_gtt_to_phys - take a GTT address and turn it into a physical one
* @dev: drm device
* @gtt_addr: address to translate
*
* Some chip functions require allocations from stolen space but need the
* physical address of the memory in question. We use this routine
* to get a physical address suitable for register programming from a given
* GTT address.
*/
static unsigned long i915_gtt_to_phys(struct drm_device *dev,
unsigned long gtt_addr)
{
unsigned long *gtt;
unsigned long entry, phys;
int gtt_bar = IS_I9XX(dev) ? 0 : 1;
int gtt_offset, gtt_size;
if (IS_I965G(dev)) {
if (IS_G4X(dev) || IS_IGDNG(dev)) {
gtt_offset = 2*1024*1024;
gtt_size = 2*1024*1024;
} else {
gtt_offset = 512*1024;
gtt_size = 512*1024;
}
} else {
gtt_bar = 3;
gtt_offset = 0;
gtt_size = pci_resource_len(dev->pdev, gtt_bar);
}
gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
gtt_size);
if (!gtt) {
DRM_ERROR("ioremap of GTT failed\n");
return 0;
}
entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
/* Mask out these reserved bits on this hardware. */
if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
IS_I945G(dev) || IS_I945GM(dev)) {
entry &= ~PTE_ADDRESS_MASK_HIGH;
}
/* If it's not a mapping type we know, then bail. */
if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
(entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
iounmap(gtt);
return 0;
}
if (!(entry & PTE_VALID)) {
DRM_ERROR("bad GTT entry in stolen space\n");
iounmap(gtt);
return 0;
}
iounmap(gtt);
phys =(entry & PTE_ADDRESS_MASK) |
((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
return phys;
}
static void i915_warn_stolen(struct drm_device *dev)
{
DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
}
static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *compressed_llb;
unsigned long cfb_base, ll_base;
/* Leave 1M for line length buffer & misc. */
compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
if (!compressed_fb) {
i915_warn_stolen(dev);
return;
}
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb) {
i915_warn_stolen(dev);
return;
}
cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
if (!cfb_base) {
DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
drm_mm_put_block(compressed_fb);
}
if (!IS_GM45(dev)) {
compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
4096, 0);
if (!compressed_llb) {
i915_warn_stolen(dev);
return;
}
compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
if (!compressed_llb) {
i915_warn_stolen(dev);
return;
}
ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
if (!ll_base) {
DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
drm_mm_put_block(compressed_fb);
drm_mm_put_block(compressed_llb);
}
}
dev_priv->cfb_size = size;
if (IS_GM45(dev)) {
g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
}
DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
ll_base, size >> 20);
}
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
......@@ -1027,6 +1183,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
}
static int i915_load_modeset_init(struct drm_device *dev,
unsigned long prealloc_start,
unsigned long prealloc_size,
unsigned long agp_size)
{
......@@ -1047,6 +1204,10 @@ static int i915_load_modeset_init(struct drm_device *dev,
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
/* We're off and running w/KMS */
dev_priv->mm.suspended = 0;
/* Let GEM Manage from end of prealloc space to end of aperture.
*
......@@ -1059,10 +1220,25 @@ static int i915_load_modeset_init(struct drm_device *dev,
*/
i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
mutex_lock(&dev->struct_mutex);
ret = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
if (ret)
goto out;
/* Try to set up FBC with a reasonable compressed buffer size */
if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) &&
i915_powersave) {
int cfb_size;
/* Try to get an 8M buffer... */
if (prealloc_size > (9*1024*1024))
cfb_size = 8*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
}
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->allow_batchbuffer = 1;
......@@ -1180,7 +1356,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
struct drm_i915_private *dev_priv = dev->dev_private;
resource_size_t base, size;
int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
uint32_t agp_size, prealloc_size;
uint32_t agp_size, prealloc_size, prealloc_start;
/* i915 has 4 more counters */
dev->counters += 4;
......@@ -1234,7 +1410,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
"performance may suffer.\n");
}
ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
if (ret)
goto out_iomapfree;
......@@ -1300,8 +1476,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return ret;
}
/* Start out suspended */
dev_priv->mm.suspended = 1;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
ret = i915_load_modeset_init(dev, prealloc_start,
prealloc_size, agp_size);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_workqueue_free;
......@@ -1313,6 +1493,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_IGDNG(dev))
intel_opregion_init(dev, 0);
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
return 0;
out_workqueue_free:
......@@ -1333,6 +1515,7 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
destroy_workqueue(dev_priv->wq);
del_timer_sync(&dev_priv->hangcheck_timer);
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
......@@ -1472,6 +1655,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
......
......@@ -89,6 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
dev_priv->suspended = 1;
return 0;
}
......@@ -97,8 +99,6 @@ static int i915_resume(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
if (pci_enable_device(dev->pdev))
return -1;
pci_set_master(dev->pdev);
......@@ -124,9 +124,135 @@ static int i915_resume(struct drm_device *dev)
drm_helper_resume_force_mode(dev);
}
dev_priv->suspended = 0;
return ret;
}
/**
* i965_reset - reset chip after a hang
* @dev: drm device to reset
* @flags: reset domains
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
*
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
* - re-init hardware status page
* - re-init ring buffer
* - re-init interrupt state
* - re-init display
*/
int i965_reset(struct drm_device *dev, u8 flags)
{
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long timeout;
u8 gdrst;
/*
* We really should only reset the display subsystem if we actually
* need to
*/
bool need_display = true;
mutex_lock(&dev->struct_mutex);
/*
* Clear request list
*/
i915_gem_retire_requests(dev);
if (need_display)
i915_save_display(dev);
if (IS_I965G(dev) || IS_G4X(dev)) {
/*
* Set the domains we want to reset, then the reset bit (bit 0).
* Clear the reset bit after a while and wait for hardware status
* bit (bit 1) to be set
*/
pci_read_config_byte(dev->pdev, GDRST, &gdrst);
pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
udelay(50);
pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
/* ...we don't want to loop forever though, 500ms should be plenty */
timeout = jiffies + msecs_to_jiffies(500);
do {
udelay(100);
pci_read_config_byte(dev->pdev, GDRST, &gdrst);
} while ((gdrst & 0x1) && time_after(timeout, jiffies));
if (gdrst & 0x1) {
WARN(true, "i915: Failed to reset chip\n");
mutex_unlock(&dev->struct_mutex);
return -EIO;
}
} else {
DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
return -ENODEV;
}
/* Ok, now get things going again... */
/*
* Everything depends on having the GTT running, so we need to start
* there. Fortunately we don't need to do this unless we reset the
* chip at a PCI level.
*
* Next we need to restore the context, but we don't use those
* yet either...
*
* Ring buffer needs to be re-initialized in the KMS case, or if X
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
drm_i915_ring_buffer_t *ring = &dev_priv->ring;
struct drm_gem_object *obj = ring->ring_obj;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
dev_priv->mm.suspended = 0;
/* Stop the ring if it's running. */
I915_WRITE(PRB0_CTL, 0);
I915_WRITE(PRB0_TAIL, 0);
I915_WRITE(PRB0_HEAD, 0);
/* Initialize the ring. */
I915_WRITE(PRB0_START, obj_priv->gtt_offset);
I915_WRITE(PRB0_CTL,
((obj->size - 4096) & RING_NR_PAGES) |
RING_NO_REPORT |
RING_VALID);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
else {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
}
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
mutex_lock(&dev->struct_mutex);
}
/*
* Display needs restore too...
*/
if (need_display)
i915_restore_display(dev);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int __devinit
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
......@@ -234,6 +360,8 @@ static int __init i915_init(void)
{
driver.num_ioctls = i915_max_ioctl;
i915_gem_shrinker_init();
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
......@@ -260,6 +388,7 @@ static int __init i915_init(void)
static void __exit i915_exit(void)
{
i915_gem_shrinker_exit();
drm_exit(&driver);
}
......
......@@ -48,6 +48,11 @@ enum pipe {
PIPE_B,
};
enum plane {
PLANE_A = 0,
PLANE_B,
};
#define I915_NUM_PIPE 2
/* Interface history:
......@@ -148,6 +153,23 @@ struct drm_i915_error_state {
struct timeval time;
};
struct drm_i915_display_funcs {
void (*dpms)(struct drm_crtc *crtc, int mode);
bool (*fbc_enabled)(struct drm_crtc *crtc);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
void (*update_wm)(struct drm_device *dev, int planea_clock,
int planeb_clock, int sr_hdisplay, int pixel_size);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
/* display clock increase/decrease */
/* pll clock increase/decrease */
/* clock gating init */
};
typedef struct drm_i915_private {
struct drm_device *dev;
......@@ -198,10 +220,21 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
bool cursor_needs_physical;
struct drm_mm vram;
unsigned long cfb_size;
unsigned long cfb_pitch;
int cfb_fence;
int cfb_plane;
int irq_enabled;
struct intel_opregion opregion;
......@@ -222,6 +255,8 @@ typedef struct drm_i915_private {
unsigned int edp_support:1;
int lvds_ssc_freq;
struct notifier_block lid_notifier;
int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
......@@ -234,7 +269,11 @@ typedef struct drm_i915_private {
struct work_struct error_work;
struct workqueue_struct *wq;
/* Display functions */
struct drm_i915_display_funcs display;
/* Register state */
bool suspended;
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
......@@ -349,6 +388,15 @@ typedef struct drm_i915_private {
struct io_mapping *gtt_mapping;
int gtt_mtrr;
/**
* Membership on list of all loaded devices, used to evict
* inactive buffers under memory pressure.
*
* Modifications should only be done whilst holding the
* shrink_list_lock spinlock.
*/
struct list_head shrink_list;
/**
* List of objects currently involved in rendering from the
* ringbuffer.
......@@ -432,7 +480,7 @@ typedef struct drm_i915_private {
* It prevents command submission from occuring and makes
* every pending request fail
*/
int wedged;
atomic_t wedged;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
......@@ -491,10 +539,7 @@ struct drm_i915_gem_object {
* This is the same as gtt_space->start
*/
uint32_t gtt_offset;
/**
* Required alignment for the object
*/
uint32_t gtt_alignment;
/**
* Fake offset for use by mmap(2)
*/
......@@ -541,6 +586,11 @@ struct drm_i915_gem_object {
* in an execbuffer object list.
*/
int in_execbuffer;
/**
* Advice: are the backing pages purgeable?
*/
int madv;
};
/**
......@@ -585,6 +635,8 @@ extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
extern void i915_save_display(struct drm_device *dev);
extern void i915_restore_display(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
......@@ -604,8 +656,10 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
extern int i965_reset(struct drm_device *dev, u8 flags);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
......@@ -676,6 +730,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
......@@ -695,6 +751,7 @@ int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev);
......@@ -720,6 +777,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
......@@ -767,6 +827,8 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void i8xx_disable_fbc(struct drm_device *dev);
extern void g4x_disable_fbc(struct drm_device *dev);
/**
* Lock test for when it's just for synchronization of ring access.
......@@ -864,6 +926,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22 || \
(dev)->pci_device == 0x2E32 || \
(dev)->pci_device == 0x2E42 || \
(dev)->pci_device == 0x0042 || \
(dev)->pci_device == 0x0046)
......@@ -876,6 +939,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22 || \
(dev)->pci_device == 0x2E32 || \
(dev)->pci_device == 0x2E42 || \
IS_GM45(dev))
#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
......@@ -909,12 +973,13 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev)))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
......
This diff is collapsed.
......@@ -31,6 +31,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#define MAX_NOPID ((u32)~0)
......@@ -279,7 +280,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
}
if (gt_iir & GT_USER_INTERRUPT) {
dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
u32 seqno = i915_get_gem_seqno(dev);
dev_priv->mm.irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
DRM_WAKEUP(&dev_priv->irq_queue);
}
......@@ -302,12 +305,25 @@ static void i915_error_work_func(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
error_work);
struct drm_device *dev = dev_priv->dev;
char *event_string = "ERROR=1";
char *envp[] = { event_string, NULL };
char *error_event[] = { "ERROR=1", NULL };
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
DRM_DEBUG("generating error event\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
if (atomic_read(&dev_priv->mm.wedged)) {
if (IS_I965G(dev)) {
DRM_DEBUG("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
if (!i965_reset(dev, GDRST_RENDER)) {
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
} else {
printk("reboot required\n");
}
}
}
/**
......@@ -372,7 +388,7 @@ out:
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
static void i915_handle_error(struct drm_device *dev)
static void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 eir = I915_READ(EIR);
......@@ -482,6 +498,16 @@ static void i915_handle_error(struct drm_device *dev)
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
if (wedged) {
atomic_set(&dev_priv->mm.wedged, 1);
/*
* Wakeup waiting processes so they don't hang
*/
printk("i915: Waking up sleeping processes\n");
DRM_WAKEUP(&dev_priv->irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
}
......@@ -527,7 +553,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
pipeb_stats = I915_READ(PIPEBSTAT);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev);
i915_handle_error(dev, false);
/*
* Clear the PIPE(A|B)STAT regs before the IIR
......@@ -599,8 +625,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT) {
dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
u32 seqno = i915_get_gem_seqno(dev);
dev_priv->mm.irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
DRM_WAKEUP(&dev_priv->irq_queue);
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
if (pipea_stats & vblank_status) {
......@@ -880,6 +910,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
drm_i915_private_t *dev_priv = dev->dev_private;
return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
}
/**
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. The first time this is called we simply record
* ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
* again, we assume the chip is wedged and try to fix it.
*/
void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd;
if (!IS_I965G(dev))
acthd = I915_READ(ACTHD);
else
acthd = I915_READ(ACTHD_I965);
/* If all work is done then ACTHD clearly hasn't advanced. */
if (list_empty(&dev_priv->mm.request_list) ||
i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
dev_priv->hangcheck_count = 0;
return;
}
if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
i915_handle_error(dev, true);
return;
}
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
if (acthd != dev_priv->last_acthd)
dev_priv->hangcheck_count = 0;
else
dev_priv->hangcheck_count++;
dev_priv->last_acthd = acthd;
}
/* drm_dma.h hooks
*/
static void igdng_irq_preinstall(struct drm_device *dev)
......
......@@ -148,6 +148,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 blc_pwm_ctl, blc_pwm_ctl2;
u32 max_backlight, level, shift;
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAIL;
......@@ -157,14 +158,25 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return ASLE_BACKLIGHT_FAIL;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
else
I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
else {
if (IS_IGD(dev)) {
blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT;
shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
} else {
blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
}
level = (bclp * max_backlight) / 255;
I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
}
asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
return 0;
......
......@@ -86,6 +86,10 @@
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define LBB 0xf4
#define GDRST 0xc0
#define GDRST_FULL (0<<2)
#define GDRST_RENDER (1<<2)
#define GDRST_MEDIA (3<<2)
/* VGA stuff */
......@@ -344,9 +348,37 @@
#define FBC_CTL_PLANEA (0<<0)
#define FBC_CTL_PLANEB (1<<0)
#define FBC_FENCE_OFF 0x0321b
#define FBC_TAG 0x03300
#define FBC_LL_SIZE (1536)
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE 0x3200
#define DPFC_CONTROL 0x3208
#define DPFC_CTL_EN (1<<31)
#define DPFC_CTL_PLANEA (0<<30)
#define DPFC_CTL_PLANEB (1<<30)
#define DPFC_CTL_FENCE_EN (1<<29)
#define DPFC_SR_EN (1<<10)
#define DPFC_CTL_LIMIT_1X (0<<6)
#define DPFC_CTL_LIMIT_2X (1<<6)
#define DPFC_CTL_LIMIT_4X (2<<6)
#define DPFC_RECOMP_CTL 0x320c
#define DPFC_RECOMP_STALL_EN (1<<27)
#define DPFC_RECOMP_STALL_WM_SHIFT (16)
#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
#define DPFC_STATUS 0x3210
#define DPFC_INVAL_SEG_SHIFT (16)
#define DPFC_INVAL_SEG_MASK (0x07ff0000)
#define DPFC_COMP_SEG_SHIFT (0)
#define DPFC_COMP_SEG_MASK (0x000003ff)
#define DPFC_STATUS2 0x3214
#define DPFC_FENCE_YOFF 0x3218
#define DPFC_CHICKEN 0x3224
#define DPFC_HT_MODIFY (1<<31)
/*
* GPIO regs
*/
......@@ -2000,6 +2032,8 @@
#define PF_ENABLE (1<<31)
#define PFA_WIN_SZ 0x68074
#define PFB_WIN_SZ 0x68874
#define PFA_WIN_POS 0x68070
#define PFB_WIN_POS 0x68870
/* legacy palette */
#define LGC_PALETTE_A 0x4a000
......
......@@ -228,6 +228,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
......@@ -285,6 +286,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
return;
}
static void i915_restore_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -379,19 +381,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
return;
}
int i915_save_state(struct drm_device *dev)
void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Render Standby */
if (IS_I965G(dev) && IS_MOBILE(dev))
dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
/* Display arbitration control */
dev_priv->saveDSPARB = I915_READ(DSPARB);
......@@ -399,6 +392,7 @@ int i915_save_state(struct drm_device *dev)
/* This is only meaningful in non-KMS mode */
/* Don't save them in KMS mode */
i915_save_modeset_reg(dev);
/* Cursor state */
dev_priv->saveCURACNTR = I915_READ(CURACNTR);
dev_priv->saveCURAPOS = I915_READ(CURAPOS);
......@@ -448,81 +442,22 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
/* Interrupt state */
dev_priv->saveIIR = I915_READ(IIR);
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
/* VGA state */
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
/* Clock gating state */
dev_priv->saveD_STATE = I915_READ(D_STATE);
dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
/* Fences */
if (IS_I965G(dev)) {
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
} else {
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
}
i915_save_vga(dev);
return 0;
}
int i915_restore_state(struct drm_device *dev)
void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
/* Render Standby */
if (IS_I965G(dev) && IS_MOBILE(dev))
I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
/* Display arbitration */
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
/* Fences */
if (IS_I965G(dev)) {
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
} else {
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
}
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
......@@ -534,9 +469,11 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
}
/* This is only meaningful in non-KMS mode */
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
/* Cursor state */
I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
......@@ -586,6 +523,95 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
DRM_UDELAY(150);
i915_restore_vga(dev);
}
int i915_save_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Render Standby */
if (IS_I965G(dev) && IS_MOBILE(dev))
dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
i915_save_display(dev);
/* Interrupt state */
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
/* Clock gating state */
dev_priv->saveD_STATE = I915_READ(D_STATE);
dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
/* Fences */
if (IS_I965G(dev)) {
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
} else {
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
}
return 0;
}
int i915_restore_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
/* Render Standby */
if (IS_I965G(dev) && IS_MOBILE(dev))
I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
/* Fences */
if (IS_I965G(dev)) {
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
} else {
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
}
i915_restore_display(dev);
/* Interrupt state */
I915_WRITE (IER, dev_priv->saveIER);
I915_WRITE (IMR, dev_priv->saveIMR);
/* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
......@@ -603,8 +629,6 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
i915_restore_vga(dev);
return 0;
}
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _I915_TRACE_H_
#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <drm/drmP.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
#define TRACE_INCLUDE_FILE i915_trace
/* object tracking */
TRACE_EVENT(i915_gem_object_create,
TP_PROTO(struct drm_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
__field(struct drm_gem_object *, obj)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
__entry->size = obj->size;
),
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
);
TRACE_EVENT(i915_gem_object_bind,
TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
TP_ARGS(obj, gtt_offset),
TP_STRUCT__entry(
__field(struct drm_gem_object *, obj)
__field(u32, gtt_offset)
),
TP_fast_assign(
__entry->obj = obj;
__entry->gtt_offset = gtt_offset;
),
TP_printk("obj=%p, gtt_offset=%08x",
__entry->obj, __entry->gtt_offset)
);
TRACE_EVENT(i915_gem_object_clflush,
TP_PROTO(struct drm_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
__field(struct drm_gem_object *, obj)
),
TP_fast_assign(
__entry->obj = obj;
),
TP_printk("obj=%p", __entry->obj)
);
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
TP_ARGS(obj, old_read_domains, old_write_domain),
TP_STRUCT__entry(
__field(struct drm_gem_object *, obj)
__field(u32, read_domains)
__field(u32, write_domain)
),
TP_fast_assign(
__entry->obj = obj;
__entry->read_domains = obj->read_domains | (old_read_domains << 16);
__entry->write_domain = obj->write_domain | (old_write_domain << 16);
),
TP_printk("obj=%p, read=%04x, write=%04x",
__entry->obj,
__entry->read_domains, __entry->write_domain)
);
TRACE_EVENT(i915_gem_object_get_fence,
TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
TP_ARGS(obj, fence, tiling_mode),
TP_STRUCT__entry(
__field(struct drm_gem_object *, obj)
__field(int, fence)
__field(int, tiling_mode)
),
TP_fast_assign(
__entry->obj = obj;
__entry->fence = fence;
__entry->tiling_mode = tiling_mode;
),
TP_printk("obj=%p, fence=%d, tiling=%d",
__entry->obj, __entry->fence, __entry->tiling_mode)
);
TRACE_EVENT(i915_gem_object_unbind,
TP_PROTO(struct drm_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
__field(struct drm_gem_object *, obj)
),
TP_fast_assign(
__entry->obj = obj;
),
TP_printk("obj=%p", __entry->obj)
);
TRACE_EVENT(i915_gem_object_destroy,
TP_PROTO(struct drm_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
__field(struct drm_gem_object *, obj)
),
TP_fast_assign(
__entry->obj = obj;
),
TP_printk("obj=%p", __entry->obj)
);
/* batch tracing */
TRACE_EVENT(i915_gem_request_submit,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(struct drm_device *, dev)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev;
__entry->seqno = seqno;
),
TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_gem_request_flush,
TP_PROTO(struct drm_device *dev, u32 seqno,
u32 flush_domains, u32 invalidate_domains),
TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
TP_STRUCT__entry(
__field(struct drm_device *, dev)
__field(u32, seqno)
__field(u32, flush_domains)
__field(u32, invalidate_domains)
),
TP_fast_assign(
__entry->dev = dev;
__entry->seqno = seqno;
__entry->flush_domains = flush_domains;
__entry->invalidate_domains = invalidate_domains;
),
TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x",
__entry->dev, __entry->seqno,
__entry->flush_domains, __entry->invalidate_domains)
);
TRACE_EVENT(i915_gem_request_complete,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(struct drm_device *, dev)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev;
__entry->seqno = seqno;
),
TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_gem_request_retire,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(struct drm_device *, dev)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev;
__entry->seqno = seqno;
),
TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_gem_request_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(struct drm_device *, dev)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev;
__entry->seqno = seqno;
),
TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_gem_request_wait_end,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(struct drm_device *, dev)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev;
__entry->seqno = seqno;
),
TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_ring_wait_begin,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
__field(struct drm_device *, dev)
),
TP_fast_assign(
__entry->dev = dev;
),
TP_printk("dev=%p", __entry->dev)
);
TRACE_EVENT(i915_ring_wait_end,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
__field(struct drm_device *, dev)
),
TP_fast_assign(
__entry->dev = dev;
),
TP_printk("dev=%p", __entry->dev)
);
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
#include <trace/define_trace.h>
/*
* Copyright © 2009 Intel Corporation
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "i915_drv.h"
#define CREATE_TRACE_POINTS
#include "i915_trace.h"
......@@ -217,6 +217,9 @@ parse_general_features(struct drm_i915_private *dev_priv,
if (IS_I85X(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
else if (IS_IGDNG(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 120;
else
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 96;
......
......@@ -179,13 +179,10 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa, temp;
u32 adpa;
bool ret;
temp = adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_DAC_ENABLE;
I915_WRITE(PCH_ADPA, adpa);
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
......@@ -212,8 +209,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
else
ret = false;
/* restore origin register */
I915_WRITE(PCH_ADPA, temp);
return ret;
}
......
This diff is collapsed.
......@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
#include "i915_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
......@@ -111,8 +112,8 @@ struct intel_output {
struct intel_crtc {
struct drm_crtc base;
int pipe;
int plane;
enum pipe pipe;
enum plane plane;
struct drm_gem_object *cursor_bo;
uint32_t cursor_addr;
u8 lut_r[256], lut_g[256], lut_b[256];
......
......@@ -27,6 +27,7 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <acpi/button.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
#include "drmP.h"
......@@ -295,6 +296,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
goto out;
}
/* full screen scale for now */
if (IS_IGDNG(dev))
goto out;
/* 965+ wants fuzzy fitting */
if (IS_I965G(dev))
pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
......@@ -322,8 +327,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
if (!IS_IGDNG(dev)) {
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
}
switch (lvds_priv->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
......@@ -572,7 +579,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* settings.
*/
/* No panel fitting yet, fixme */
if (IS_IGDNG(dev))
return;
......@@ -585,15 +591,33 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
}
/* Some lid devices report incorrect lid status, assume they're connected */
static const struct dmi_system_id bad_lid_status[] = {
{
.ident = "Aspire One",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
},
},
{ }
};
/**
* Detect the LVDS connection.
*
* This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have
* been set up if the LVDS was actually connected anyway.
* Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
* connected and closed means disconnected. We also send hotplug events as
* needed, using lid status notification from the input layer.
*/
static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
{
return connector_status_connected;
enum drm_connector_status status = connector_status_connected;
if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
status = connector_status_disconnected;
return status;
}
/**
......@@ -632,6 +656,24 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 0;
}
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, lid_notifier);
struct drm_device *dev = dev_priv->dev;
if (acpi_lid_open() && !dev_priv->suspended) {
mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
mutex_unlock(&dev->mode_config.mutex);
}
drm_sysfs_hotplug_event(dev_priv->dev);
return NOTIFY_OK;
}
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
......@@ -641,10 +683,14 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_output *intel_output = to_intel_output(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
......@@ -1011,6 +1057,11 @@ out:
pwm |= PWM_PCH_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
}
dev_priv->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
DRM_DEBUG("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL;
}
drm_sysfs_connector_add(connector);
return;
......
This diff is collapsed.
#ifndef ACPI_BUTTON_H
#define ACPI_BUTTON_H
#include <linux/notifier.h>
#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE)
extern int acpi_lid_notifier_register(struct notifier_block *nb);
extern int acpi_lid_notifier_unregister(struct notifier_block *nb);
extern int acpi_lid_open(void);
#else
static inline int acpi_lid_notifier_register(struct notifier_block *nb)
{
return 0;
}
static inline int acpi_lid_notifier_unregister(struct notifier_block *nb)
{
return 0;
}
static inline int acpi_lid_open(void)
{
return 1;
}
#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */
#endif /* ACPI_BUTTON_H */
......@@ -552,6 +552,7 @@
{0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
......
......@@ -185,6 +185,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
#define DRM_I915_GEM_MADVISE 0x26
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
......@@ -221,6 +222,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id)
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
......@@ -667,4 +669,21 @@ struct drm_i915_get_pipe_from_crtc_id {
__u32 pipe;
};
#define I915_MADV_WILLNEED 0
#define I915_MADV_DONTNEED 1
#define __I915_MADV_PURGED 2 /* internal state */
struct drm_i915_gem_madvise {
/** Handle of the buffer to change the backing store advice */
__u32 handle;
/* Advice: either the buffer will be needed again in the near future,
* or wont be and could be discarded under memory pressure.
*/
__u32 madv;
/** Whether the backing store still exists. */
__u32 retained;
};
#endif /* _I915_DRM_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment