obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
-obj-$(CONFIG_VIOTAPE) += viotape.o
obj-$(CONFIG_IBM_BSR) += bsr.o
obj-$(CONFIG_SGI_MBCS) += mbcs.o
obj-$(CONFIG_BFIN_OTP) += bfin-otp.o
obj-$(CONFIG_TELCLOCK) += tlclk.o
obj-$(CONFIG_MWAVE) += mwave/
- obj-$(CONFIG_AGP) += agp/
+ obj-y += agp/
obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
#include "intel-agp.h"
#include <drm/intel-gtt.h>
- int intel_agp_enabled;
- EXPORT_SYMBOL(intel_agp_enabled);
-
static int intel_fetch_size(void)
{
int i;
static int intel_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_16 *current_size;
pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
static int intel_815_configure(void)
{
- u32 temp, addr;
+ u32 addr;
u8 temp2;
struct aper_size_info_8 *current_size;
current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr);
addr &= INTEL_815_ATTBASE_MASK;
static int intel_820_configure(void)
{
- u32 temp;
u8 temp2;
struct aper_size_info_8 *current_size;
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
static int intel_840_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_8 *current_size;
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
static int intel_845_configure(void)
{
- u32 temp;
u8 temp2;
struct aper_size_info_8 *current_size;
agp_bridge->apbase_config);
} else {
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- agp_bridge->apbase_config = temp;
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+ agp_bridge->apbase_config = agp_bridge->gart_bus_addr;
}
/* attbase - aperture base */
static int intel_850_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_8 *current_size;
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
static int intel_860_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_8 *current_size;
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
static int intel_830mp_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_8 *current_size;
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
static int intel_7505_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_8 *current_size;
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
found_gmch:
pci_set_drvdata(pdev, bridge);
err = agp_add_bridge(bridge);
- if (!err)
- intel_agp_enabled = 1;
return err;
}
struct pci_dev *pcidev; /* device one */
struct pci_dev *bridge_dev;
u8 __iomem *registers;
- phys_addr_t gtt_bus_addr;
+ phys_addr_t gtt_phys_addr;
u32 PGETBL_save;
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
#define IS_IRONLAKE intel_private.driver->is_ironlake
#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
+ #if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_gtt_map_memory(struct page **pages,
unsigned int num_entries,
struct sg_table *st)
__free_pages(page, 2);
atomic_dec(&agp_bridge->current_memory_agp);
}
+ #endif
#define I810_GTT_ORDER 4
static int i810_setup(void)
{
- u32 reg_addr;
+ phys_addr_t reg_addr;
char *gtt_table;
/* i81x does not preallocate the gtt. It's always 64kb in size. */
return -ENOMEM;
intel_private.i81x_gtt_table = gtt_table;
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr);
- reg_addr &= 0xfff80000;
+ reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
intel_private.registers = ioremap(reg_addr, KB(64));
if (!intel_private.registers)
writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
intel_private.registers+I810_PGETBL_CTL);
- intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+ intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
if ((readl(intel_private.registers+I810_DRAM_CTL)
& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
}
+ #if IS_ENABLED(CONFIG_AGP_INTEL)
static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
}
kfree(curr);
}
+ #endif
static int intel_gtt_setup_scratch_page(void)
{
static int intel_gtt_init(void)
{
- u32 gma_addr;
u32 gtt_map_size;
- int ret;
+ int ret, bar;
ret = intel_private.driver->setup();
if (ret != 0)
intel_private.gtt = NULL;
if (intel_gtt_can_wc())
- intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
+ intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
gtt_map_size);
if (intel_private.gtt == NULL)
- intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+ intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
gtt_map_size);
if (intel_private.gtt == NULL) {
intel_private.driver->cleanup();
return -ENOMEM;
}
+ #if IS_ENABLED(CONFIG_AGP_INTEL)
global_cache_flush(); /* FIXME: ? */
+ #endif
intel_private.stolen_size = intel_gtt_stolen_size();
}
if (INTEL_GTT_GEN <= 2)
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
- &gma_addr);
+ bar = I810_GMADR_BAR;
else
- pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
- &gma_addr);
-
- intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+ bar = I915_GMADR_BAR;
+ intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
return 0;
}
+ #if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_fetch_size(void)
{
int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
return 0;
}
+ #endif
static void i830_cleanup(void)
{
static int i830_setup(void)
{
- u32 reg_addr;
+ phys_addr_t reg_addr;
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr);
- reg_addr &= 0xfff80000;
+ reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
intel_private.registers = ioremap(reg_addr, KB(64));
if (!intel_private.registers)
return -ENOMEM;
- intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+ intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
return 0;
}
+ #if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
{
agp_bridge->gatt_table_real = NULL;
return 0;
}
+ #endif
static bool i830_check_flags(unsigned int flags)
{
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+ #if IS_ENABLED(CONFIG_AGP_INTEL)
static void intel_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
struct page **pages,
mem->is_flushed = true;
return ret;
}
+ #endif
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
{
}
EXPORT_SYMBOL(intel_gtt_clear_range);
+ #if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
off_t pg_start, int type)
{
/* always return NULL for other allocation types for now */
return NULL;
}
+ #endif
static int intel_alloc_chipset_flush_resource(void)
{
static int i9xx_setup(void)
{
- u32 reg_addr, gtt_addr;
+ phys_addr_t reg_addr;
int size = KB(512);
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr);
-
- reg_addr &= 0xfff80000;
+ reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
switch (INTEL_GTT_GEN) {
case 3:
- pci_read_config_dword(intel_private.pcidev,
- I915_PTEADDR, >t_addr);
- intel_private.gtt_bus_addr = gtt_addr;
+ intel_private.gtt_phys_addr =
+ pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
break;
case 5:
- intel_private.gtt_bus_addr = reg_addr + MB(2);
+ intel_private.gtt_phys_addr = reg_addr + MB(2);
break;
default:
- intel_private.gtt_bus_addr = reg_addr + KB(512);
+ intel_private.gtt_phys_addr = reg_addr + KB(512);
break;
}
return 0;
}
+ #if IS_ENABLED(CONFIG_AGP_INTEL)
static const struct agp_bridge_driver intel_fake_agp_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
};
+ #endif
static const struct intel_gtt_driver i81x_gtt_driver = {
.gen = 1,
intel_private.refcount++;
+ #if IS_ENABLED(CONFIG_AGP_INTEL)
if (bridge) {
bridge->driver = &intel_fake_agp_driver;
bridge->dev_private_data = &intel_private;
bridge->dev = bridge_pdev;
}
+ #endif
intel_private.bridge_dev = pci_dev_get(bridge_pdev);
&args->offset);
}
- static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
- struct file *filp)
- {
- struct drm_file *file_priv;
-
- /* find current process's drm_file from filelist. */
- list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
- if (file_priv->filp == filp)
- return file_priv;
-
- WARN_ON(1);
-
- return ERR_PTR(-EFAULT);
- }
-
- static int exynos_drm_gem_mmap_buffer(struct file *filp,
+ int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
- struct drm_file *file_priv;
unsigned long vm_size;
int ret;
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = obj;
vma->vm_ops = drm_dev->driver->gem_vm_ops;
- /* restore it to driver's fops. */
- filp->f_op = fops_get(drm_dev->driver->fops);
-
- file_priv = exynos_drm_find_drm_file(drm_dev, filp);
- if (IS_ERR(file_priv))
- return PTR_ERR(file_priv);
-
- /* restore it to drm_file. */
- filp->private_data = file_priv;
-
update_vm_cache_attr(exynos_gem_obj, vma);
vm_size = vma->vm_end - vma->vm_start;
return 0;
}
- static const struct file_operations exynos_drm_gem_fops = {
- .mmap = exynos_drm_gem_mmap_buffer,
- };
-
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_exynos_file_private *exynos_file_priv;
struct drm_exynos_gem_mmap *args = data;
struct drm_gem_object *obj;
+ struct file *anon_filp;
unsigned long addr;
if (!(dev->driver->driver_features & DRIVER_GEM)) {
return -ENODEV;
}
+ mutex_lock(&dev->struct_mutex);
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
- /*
- * We have to use gem object and its fops for specific mmaper,
- * but vm_mmap() can deliver only filp. So we have to change
- * filp->f_op and filp->private_data temporarily, then restore
- * again. So it is important to keep lock until restoration the
- * settings to prevent others from misuse of filp->f_op or
- * filp->private_data.
- */
- mutex_lock(&dev->struct_mutex);
-
- /*
- * Set specific mmper's fops. And it will be restored by
- * exynos_drm_gem_mmap_buffer to dev->driver->fops.
- * This is used to call specific mapper temporarily.
- */
- file_priv->filp->f_op = &exynos_drm_gem_fops;
-
- /*
- * Set gem object to private_data so that specific mmaper
- * can get the gem object. And it will be restored by
- * exynos_drm_gem_mmap_buffer to drm_file.
- */
- file_priv->filp->private_data = obj;
+ exynos_file_priv = file_priv->driver_priv;
+ anon_filp = exynos_file_priv->anon_filp;
+ anon_filp->private_data = obj;
- addr = vm_mmap(file_priv->filp, 0, args->size,
- PROT_READ | PROT_WRITE, MAP_SHARED, 0);
+ addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, 0);
drm_gem_object_unreference(obj);
if (IS_ERR_VALUE(addr)) {
- /* check filp->f_op, filp->private_data are restored */
- if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
- file_priv->filp->f_op = fops_get(dev->driver->fops);
- file_priv->filp->private_data = file_priv;
- }
mutex_unlock(&dev->struct_mutex);
return (int)addr;
}
int ret;
/*
- * alocate memory to be used for framebuffer.
+ * allocate memory to be used for framebuffer.
* - this callback would be called by user application
* with DRM_IOCTL_MODE_CREATE_DUMB command.
*/
* @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
- * P.S. this object would be transfered to user as kms_bo.handle so
+ * P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
*/
struct exynos_drm_gem_obj {
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+ int exynos_drm_gem_mmap_buffer(struct file *filp,
+ struct vm_area_struct *vma);
+
/* map user space allocated by malloc to pages. */
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o \
- i915_debugfs.o \
i915_gpu_error.o \
i915_suspend.o \
i915_gem.o \
intel_ringbuffer.o \
intel_overlay.o \
intel_sprite.o \
- intel_opregion.o \
intel_sideband.o \
intel_uncore.o \
dvo_ch7xxx.o \
i915-$(CONFIG_COMPAT) += i915_ioc32.o
-i915-$(CONFIG_ACPI) += intel_acpi.o
+i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)
};
#define port_name(p) ((p) + 'A')
+ #define I915_NUM_PHYS_VLV 1
+
+ enum dpio_channel {
+ DPIO_CH0,
+ DPIO_CH1
+ };
+
+ enum dpio_phy {
+ DPIO_PHY0,
+ DPIO_PHY1
+ };
+
enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
u32 instps[I915_NUM_RINGS];
u32 extra_instdone[I915_NUM_INSTDONE_REG];
u32 seqno[I915_NUM_RINGS];
- u64 bbaddr;
+ u64 bbaddr[I915_NUM_RINGS];
u32 fault_reg[I915_NUM_RINGS];
u32 done_reg;
u32 faddr[I915_NUM_RINGS];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_ring {
+ bool valid;
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
+ struct intel_connector;
struct intel_crtc_config;
struct intel_crtc;
struct intel_limit;
struct drm_i915_display_funcs {
bool (*fbc_enabled)(struct drm_device *dev);
- void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+ void (*enable_fbc)(struct drm_crtc *crtc);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
/* render clock increase/decrease */
/* display clock increase/decrease */
/* pll clock increase/decrease */
+
+ int (*setup_backlight)(struct intel_connector *connector);
+ uint32_t (*get_backlight)(struct intel_connector *connector);
+ void (*set_backlight)(struct intel_connector *connector,
+ uint32_t level);
+ void (*disable_backlight)(struct intel_connector *connector);
+ void (*enable_backlight)(struct intel_connector *connector);
};
struct intel_uncore_funcs {
- void (*force_wake_get)(struct drm_i915_private *dev_priv);
- void (*force_wake_put)(struct drm_i915_private *dev_priv);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv,
+ int fw_engine);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv,
+ int fw_engine);
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
unsigned fifo_count;
unsigned forcewake_count;
+ unsigned fw_rendercount;
+ unsigned fw_mediacount;
+
struct delayed_work force_wake_work;
};
struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
- int interval;
} *fbc_work;
enum no_fbc_reason {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
- #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
u32 saveBLC_HIST_CTL_B;
- u32 saveBLC_PWM_CTL_B;
- u32 saveBLC_PWM_CTL2_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
/* Power well structure for haswell */
struct i915_power_well {
+ const char *name;
+ bool always_on;
/* power well enable/disable usage count */
int count;
+ unsigned long domains;
+ void *data;
+ void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
+ bool enable);
+ bool (*is_enabled)(struct drm_device *dev,
+ struct i915_power_well *power_well);
};
- #define I915_MAX_POWER_WELLS 1
-
struct i915_power_domains {
/*
* Power wells needed for initialization at driver init and suspend
* time are on. They are kept on until after the first modeset.
*/
bool init_power_on;
+ int power_well_count;
struct mutex lock;
- struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
+ int domain_use_count[POWER_DOMAIN_NUM];
+ struct i915_power_well *power_wells;
};
struct i915_dri1_state {
unsigned long missed_irq_rings;
/**
- * State variable and reset counter controlling the reset flow
+ * State variable controlling the reset flow and count
*
- * Upper bits are for the reset counter. This counter is used by the
- * wait_seqno code to race-free noticed that a reset event happened and
- * that it needs to restart the entire ioctl (since most likely the
- * seqno it waited for won't ever signal anytime soon).
+ * This is a counter which gets incremented when reset is triggered,
+ * and again when reset has been handled. So odd values (lowest bit set)
+ * means that reset is in progress and even values that
+ * (reset_counter >> 1):th reset was successfully completed.
+ *
+ * If reset is not completed succesfully, the I915_WEDGE bit is
+ * set meaning that hardware is terminally sour and there is no
+ * recovery. All waiters on the reset_queue will be woken when
+ * that happens.
+ *
+ * This counter is used by the wait_seqno code to notice that reset
+ * event happened and it needs to restart the entire ioctl (since most
+ * likely the seqno it waited for won't ever signal anytime soon).
*
* This is important for lock-free wait paths, where no contended lock
* naturally enforces the correct ordering between the bail-out of the
* waiter and the gpu reset work code.
- *
- * Lowest bit controls the reset state machine: Set means a reset is in
- * progress. This state will (presuming we don't have any bugs) decay
- * into either unset (successful reset) or the special WEDGED value (hw
- * terminally sour). All waiters on the reset_queue will be woken when
- * that happens.
*/
atomic_t reset_counter;
- /**
- * Special values/flags for reset_counter
- *
- * Note that the code relies on
- * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
- * being true.
- */
#define I915_RESET_IN_PROGRESS_FLAG 1
- #define I915_WEDGED 0xffffffff
+ #define I915_WEDGED (1 << 31)
/**
* Waitqueue to signal when the reset has completed. Used by clients
int edp_bpp;
struct edp_power_seq edp_pps;
+ struct {
+ u16 pwm_freq_hz;
+ bool active_low_pwm;
+ } backlight;
+
/* MIPI DSI */
struct {
u16 panel_id;
uint32_t fbc_val;
};
- struct hsw_wm_values {
+ struct ilk_wm_values {
uint32_t wm_pipe[3];
uint32_t wm_lp[3];
uint32_t wm_lp_spr[3];
} regsave;
};
+ struct i915_runtime_pm {
+ bool suspended;
+ };
+
enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
/* overlay */
struct intel_overlay *overlay;
- unsigned int sprite_scaling_enabled;
- /* backlight */
- struct {
- int level;
- bool enabled;
- spinlock_t lock; /* bl registers and the above bl fields */
- struct backlight_device *device;
- } backlight;
+ /* backlight registers and fields in struct intel_panel */
+ spinlock_t backlight_lock;
/* LVDS info */
bool no_aux_handshake;
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
+ int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/* Reclocking support */
bool render_reclock_avail;
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
- bool hw_contexts_disabled;
uint32_t hw_context_size;
struct list_head context_list;
uint16_t cur_latency[5];
/* current hardware state */
- struct hsw_wm_values hw;
+ struct ilk_wm_values hw;
} wm;
struct i915_package_c8 pc8;
+ struct i915_runtime_pm pm;
+
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
- #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+ #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
- #define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
+ #define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_init(struct drm_device *dev);
- extern void intel_uncore_clear_errors(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+ void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
- & I915_RESET_IN_PROGRESS_FLAG);
+ & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
- return atomic_read(&error->reset_counter) == I915_WEDGED;
+ return atomic_read(&error->reset_counter) & I915_WEDGED;
+ }
+
+ static inline u32 i915_reset_count(struct i915_gpu_error *error)
+ {
+ return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
}
void i915_gem_reset(struct drm_device *dev);
}
/* i915_gem_context.c */
- void i915_gem_context_init(struct drm_device *dev);
+ int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
/* intel_opregion.c */
struct intel_encoder;
-extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
+extern int intel_opregion_setup(struct drm_device *dev);
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
extern int intel_opregion_notify_adapter(struct drm_device *dev,
pci_power_t state);
#else
+static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+ int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
* must be set to prevent GT core from power down and stale values being
* returned.
*/
- void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
- void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
+ void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination);
+ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
+ void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+
+ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
+ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
+
+ void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+ void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+
+ #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+ (((reg) >= 0x2000 && (reg) < 0x4000) ||\
+ ((reg) >= 0x5000 && (reg) < 0x8000) ||\
+ ((reg) >= 0xB000 && (reg) < 0x12000) ||\
+ ((reg) >= 0x2E000 && (reg) < 0x30000))
+
+ #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
+ (((reg) >= 0x12000 && (reg) < 0x14000) ||\
+ ((reg) >= 0x22000 && (reg) < 0x24000) ||\
+ ((reg) >= 0x30000 && (reg) < 0x40000))
+
+ #define FORCEWAKE_RENDER (1 << 0)
+ #define FORCEWAKE_MEDIA (1 << 1)
+ #define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
- int vlv_gpu_freq(int ddr_freq, int val);
- int vlv_freq_opcode(int ddr_freq, int val);
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
for_each_ring(ring, dev_priv, j) {
ret = gen8_write_pdp(ring, i, addr);
if (ret)
- return ret;
+ goto err_out;
}
}
return 0;
+
+ err_out:
+ for_each_ring(ring, dev_priv, j)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+ return ret;
}
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
struct sg_page_iter sg_iter;
- pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+ pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
- dma_addr_t page_addr;
+ if (pt_vaddr == NULL)
+ pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
- page_addr = sg_dma_address(sg_iter.sg) +
- (sg_iter.sg_pgoffset << PAGE_SHIFT);
- pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
- true);
+ pt_vaddr[act_pte] =
+ gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
+ cache_level, true);
if (++act_pte == GEN8_PTES_PER_PAGE) {
kunmap_atomic(pt_vaddr);
+ pt_vaddr = NULL;
act_pt++;
- pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
act_pte = 0;
-
}
}
- kunmap_atomic(pt_vaddr);
+ if (pt_vaddr)
+ kunmap_atomic(pt_vaddr);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
container_of(vm, struct i915_hw_ppgtt, base);
int i, j;
+ drm_mm_takedown(&vm->mm);
+
for (i = 0; i < ppgtt->num_pd_pages ; i++) {
if (ppgtt->pd_dma_addr[i]) {
pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
struct sg_page_iter sg_iter;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
- dma_addr_t page_addr;
+ if (pt_vaddr == NULL)
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
- page_addr = sg_page_iter_dma_address(&sg_iter);
- pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
+ pt_vaddr[act_pte] =
+ vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
+ cache_level, true);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
+ pt_vaddr = NULL;
act_pt++;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
act_pte = 0;
-
}
}
- kunmap_atomic(pt_vaddr);
+ if (pt_vaddr)
+ kunmap_atomic(pt_vaddr);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
if (!ppgtt->pt_pages)
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
- list_add(&vma->vma_link, &obj->vma_list);
}
dev_priv->gtt.base.start = start;
size_t gtt_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- phys_addr_t gtt_bus_addr;
+ phys_addr_t gtt_phys_addr;
int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
- gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
+ gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2);
- dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+ dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
{
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+
+ drm_mm_takedown(&vm->mm);
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
}
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
+ if (unlikely(dev_priv->gtt.do_idle_maps))
+ DRM_INFO("applying Ironlake quirks for intel_iommu\n");
+
return 0;
}
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include <acpi/video.h>
#include <drm/drmP.h>
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
- } __attribute__((packed));
+ } __packed;
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
u8 rsvd2[60];
- } __attribute__((packed));
+ } __packed;
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
u32 parm; /* command parameters */
u32 dslp; /* driver sleep time-out */
u8 rsvd[244];
- } __attribute__((packed));
+ } __packed;
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
u32 srot; /* supported rotation angles */
u32 iuer; /* IUER events */
u8 rsvd[86];
- } __attribute__((packed));
+ } __packed;
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- struct intel_connector *intel_connector = NULL;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+ struct intel_connector *intel_connector;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
- u32 ret = 0;
- bool found = false;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
return ASLC_BACKLIGHT_FAILED;
mutex_lock(&dev->mode_config.mutex);
+
/*
- * Could match the OpRegion connector here instead, but we'd also need
- * to verify the connector could handle a backlight call.
+ * Update backlight on all connectors that support backlight (usually
+ * only one).
*/
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc == crtc) {
- found = true;
- break;
- }
-
- if (!found) {
- ret = ASLC_BACKLIGHT_FAILED;
- goto out;
- }
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- intel_connector = to_intel_connector(connector);
-
- if (!intel_connector) {
- ret = ASLC_BACKLIGHT_FAILED;
- goto out;
- }
-
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
- intel_panel_set_backlight(intel_connector, bclp, 255);
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
+ intel_panel_set_backlight(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
- out:
mutex_unlock(&dev->mode_config.mutex);
- return ret;
+
+ return 0;
}
static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on ARCH_MSM
- depends on ARCH_MSM8960
+ depends on MSM_IOMMU
+ depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST)
select DRM_KMS_HELPER
select SHMEM
select TMPFS
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/video.h>
-#include <acpi/acpi.h>
#include <linux/mxm-wmi.h>
-
#include <linux/vga_switcheroo.h>
-
#include <drm/drm_edid.h>
+#include <acpi/video.h>
#include "nouveau_drm.h"
#include "nouveau_acpi.h"
#define NOUVEAU_DSM_HAS_MUX 0x1
#define NOUVEAU_DSM_HAS_OPT 0x2
+ #ifdef CONFIG_VGA_SWITCHEROO
static const char nouveau_dsm_muid[] = {
0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_object_list input;
- union acpi_object params[4];
+ int i;
union acpi_object *obj;
- int i, err;
char args_buff[4];
+ union acpi_object argv4 = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = 4,
+ .buffer.pointer = args_buff
+ };
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(nouveau_op_dsm_muid);
- params[0].buffer.pointer = (char *)nouveau_op_dsm_muid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 0x00000100;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_BUFFER;
- params[3].buffer.length = 4;
/* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */
for (i = 0; i < 4; i++)
args_buff[i] = (arg >> i * 8) & 0xFF;
- params[3].buffer.pointer = args_buff;
-
- err = acpi_evaluate_object(handle, "_DSM", &input, &output);
- if (err) {
- printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
- return err;
- }
-
- obj = (union acpi_object *)output.pointer;
- if (obj->type == ACPI_TYPE_INTEGER)
- if (obj->integer.value == 0x80000002) {
- return -ENODEV;
- }
-
- if (obj->type == ACPI_TYPE_BUFFER) {
- if (obj->buffer.length == 4 && result) {
- *result = 0;
+ *result = 0;
+ obj = acpi_evaluate_dsm_typed(handle, nouveau_op_dsm_muid, 0x00000100,
+ func, &argv4, ACPI_TYPE_BUFFER);
+ if (!obj) {
+ acpi_handle_info(handle, "failed to evaluate _DSM\n");
+ return AE_ERROR;
+ } else {
+ if (obj->buffer.length == 4) {
*result |= obj->buffer.pointer[0];
*result |= (obj->buffer.pointer[1] << 8);
*result |= (obj->buffer.pointer[2] << 16);
*result |= (obj->buffer.pointer[3] << 24);
}
+ ACPI_FREE(obj);
}
- kfree(output.pointer);
return 0;
}
-static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
+static int nouveau_dsm(acpi_handle handle, int func, int arg)
{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_object_list input;
- union acpi_object params[4];
+ int ret = 0;
union acpi_object *obj;
- int err;
-
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(nouveau_dsm_muid);
- params[0].buffer.pointer = (char *)nouveau_dsm_muid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 0x00000102;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_INTEGER;
- params[3].integer.value = arg;
-
- err = acpi_evaluate_object(handle, "_DSM", &input, &output);
- if (err) {
- printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
- return err;
- }
-
- obj = (union acpi_object *)output.pointer;
-
- if (obj->type == ACPI_TYPE_INTEGER)
+ union acpi_object argv4 = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = arg,
+ };
+
+ obj = acpi_evaluate_dsm_typed(handle, nouveau_dsm_muid, 0x00000102,
+ func, &argv4, ACPI_TYPE_INTEGER);
+ if (!obj) {
+ acpi_handle_info(handle, "failed to evaluate _DSM\n");
+ return AE_ERROR;
+ } else {
if (obj->integer.value == 0x80000002)
- return -ENODEV;
-
- if (obj->type == ACPI_TYPE_BUFFER) {
- if (obj->buffer.length == 4 && result) {
- *result = 0;
- *result |= obj->buffer.pointer[0];
- *result |= (obj->buffer.pointer[1] << 8);
- *result |= (obj->buffer.pointer[2] << 16);
- *result |= (obj->buffer.pointer[3] << 24);
- }
+ ret = -ENODEV;
+ ACPI_FREE(obj);
}
- kfree(output.pointer);
- return 0;
-}
-
-/* Returns 1 if a DSM function is usable and 0 otherwise */
-static int nouveau_test_dsm(acpi_handle test_handle,
- int (*dsm_func)(acpi_handle, int, int, uint32_t *),
- int sfnc)
-{
- u32 result = 0;
-
- /* Function 0 returns a Buffer containing available functions. The args
- * parameter is ignored for function 0, so just put 0 in it */
- if (dsm_func(test_handle, 0, 0, &result))
- return 0;
-
- /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If
- * the n-th bit is enabled, function n is supported */
- return result & 1 && result & (1 << sfnc);
+ return ret;
}
static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
- return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
+ return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id);
}
static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
arg = NOUVEAU_DSM_POWER_SPEED;
else
arg = NOUVEAU_DSM_POWER_STAMINA;
- nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
+ nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg);
return 0;
}
nouveau_dsm_priv.other_handle = dhandle;
return false;
}
- if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
+ if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
+ 1 << NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
- if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
- NOUVEAU_DSM_OPTIMUS_CAPS))
+ if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100,
+ 1 << NOUVEAU_DSM_OPTIMUS_CAPS))
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval & NOUVEAU_DSM_HAS_OPT) {
if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
vga_switcheroo_unregister_handler();
}
+ #else
+ void nouveau_register_dsm_handler(void) {}
+ void nouveau_unregister_dsm_handler(void) {}
+ void nouveau_switcheroo_optimus_dsm(void) {}
+ #endif
/* retrieve the ROM in 4k blocks */
static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
static int do_unregister_framebuffer(struct fb_info *fb_info);
#define VGA_FB_PHYS 0xA0000
- static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+ static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
{
- int i;
+ int i, ret;
/* check all firmware fbs and kick off if the base addr overlaps */
for (i = 0 ; i < FB_MAX; i++) {
printk(KERN_INFO "fb: conflicting fb hw usage "
"%s vs %s - removing generic driver\n",
name, registered_fb[i]->fix.id);
- do_unregister_framebuffer(registered_fb[i]);
+ ret = do_unregister_framebuffer(registered_fb[i]);
+ if (ret)
+ return ret;
}
}
+
+ return 0;
}
static int do_register_framebuffer(struct fb_info *fb_info)
{
- int i;
+ int i, ret;
struct fb_event event;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
return -ENOSYS;
- do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
- fb_is_primary_device(fb_info));
+ ret = do_remove_conflicting_framebuffers(fb_info->apertures,
+ fb_info->fix.id,
+ fb_is_primary_device(fb_info));
+ if (ret)
+ return ret;
if (num_registered_fb == FB_MAX)
return -ENXIO;
}
EXPORT_SYMBOL(unlink_framebuffer);
- void remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+ int remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
{
+ int ret;
+
mutex_lock(®istration_lock);
- do_remove_conflicting_framebuffers(a, name, primary);
+ ret = do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(®istration_lock);
+
+ return ret;
}
EXPORT_SYMBOL(remove_conflicting_framebuffers);
options = opt + name_len + 1;
}
}
+ /* No match, pass global option */
+ if (!options && option && fb_mode_option)
+ options = kstrdup(fb_mode_option, GFP_KERNEL);
if (options && !strncmp(options, "off", 3))
retval = 1;
/* revert to the OMAP4 mechanism of DISPC Smart Standby operation */
bool mstandby_workaround:1;
+
+ bool set_max_preload:1;
};
#define DISPC_MAX_NR_FIFOS 5
dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
+
+ /*
+ * configure the preload to the pipeline's high threhold, if HT it's too
+ * large for the preload field, set the threshold to the maximum value
+ * that can be held by the preload register
+ */
+ if (dss_has_feature(FEAT_PRELOAD) && dispc.feat->set_max_preload &&
+ plane != OMAP_DSS_WB)
+ dispc_write_reg(DISPC_OVL_PRELOAD(plane), min(high, 0xfffu));
}
+EXPORT_SYMBOL(dispc_ovl_set_fifo_threshold);
void dispc_enable_fifomerge(bool enable)
{
*fifo_high = total_fifo_size - buf_unit;
}
}
+EXPORT_SYMBOL(dispc_ovl_compute_fifo_thresholds);
static void dispc_ovl_set_fir(enum omap_plane plane,
int hinc, int vinc,
*/
static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *t, u16 pos_x,
- u16 width, u16 height, u16 out_width, u16 out_height)
+ u16 width, u16 height, u16 out_width, u16 out_height,
+ bool five_taps)
{
const int ds = DIV_ROUND_UP(height, out_height);
unsigned long nonactive;
if (blank <= limits[i])
return -EINVAL;
+ /* FIXME add checks for 3-tap filter once the limitations are known */
+ if (!five_taps)
+ return 0;
+
/*
* Pixel data should be prepared before visible display point starts.
* So, atleast DS-2 lines must have already been fetched by DISPC
do {
in_height = DIV_ROUND_UP(height, *decim_y);
in_width = DIV_ROUND_UP(width, *decim_x);
- *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
- in_width, in_height, out_width, out_height, color_mode);
-
- error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
- pos_x, in_width, in_height, out_width,
- out_height);
+ *five_taps = in_height > out_height;
if (in_width > maxsinglelinewidth)
if (in_height > out_height &&
in_height < out_height * 2)
*five_taps = false;
- if (!*five_taps)
+again:
+ if (*five_taps)
+ *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
+ in_width, in_height, out_width,
+ out_height, color_mode);
+ else
*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height,
mem_to_mem);
+ error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
+ pos_x, in_width, in_height, out_width,
+ out_height, *five_taps);
+ if (error && *five_taps) {
+ *five_taps = false;
+ goto again;
+ }
+
error = (error || in_width > maxsinglelinewidth * 2 ||
(in_width > maxsinglelinewidth && *five_taps) ||
!*core_clk || *core_clk > dispc_core_clk_rate());
} while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error);
if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, width,
- height, out_width, out_height)){
+ height, out_width, out_height, *five_taps)) {
DSSERR("horizontal timing too tight\n");
return -EINVAL;
}
DUMPREG(DISPC_CONTROL3);
DUMPREG(DISPC_CONFIG3);
}
+ if (dss_has_feature(FEAT_MFLAG))
+ DUMPREG(DISPC_GLOBAL_MFLAG_ATTRIBUTE);
#undef DUMPREG
DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
if (dss_has_feature(FEAT_PRELOAD))
DUMPREG(i, DISPC_OVL_PRELOAD);
+ if (dss_has_feature(FEAT_MFLAG))
+ DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
}
#undef DISPC_REG
.calc_core_clk = calc_core_clk_24xx,
.num_fifos = 3,
.no_framedone_tv = true,
+ .set_max_preload = false,
};
static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
.no_framedone_tv = true,
+ .set_max_preload = false,
};
static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
.no_framedone_tv = true,
+ .set_max_preload = false,
};
static const struct dispc_features omap44xx_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
.gfx_fifo_workaround = true,
+ .set_max_preload = true,
};
static const struct dispc_features omap54xx_dispc_feats __initconst = {
.num_fifos = 5,
.gfx_fifo_workaround = true,
.mstandby_workaround = true,
+ .set_max_preload = true,
};
static int __init dispc_init_features(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_irq_safe(&pdev->dev);
r = dispc_runtime_get();
if (r)
static int dispc_runtime_resume(struct device *dev)
{
+ _omap_dispc_initial_config();
+
dispc_restore_context();
return 0;