Commit 6a12235c authored by David Woodhouse's avatar David Woodhouse

agp: kill phys_to_gart() and gart_to_phys()

There seems to be no reason for these -- they're a 1:1 mapping on all
platforms.
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent f692775d
......@@ -9,10 +9,6 @@
#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
......
......@@ -17,10 +17,6 @@
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
......
......@@ -11,10 +11,6 @@
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
......
......@@ -8,10 +8,6 @@
#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
......
......@@ -7,10 +7,6 @@
#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
......
......@@ -22,10 +22,6 @@
*/
#define flush_agp_cache() wbinvd()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
......
......@@ -318,9 +318,6 @@ void agp3_generic_cleanup(void);
#define AGP_GENERIC_SIZES_ENTRIES 11
extern const struct aper_size_info_16 agp3_generic_sizes[];
#define virt_to_gart(x) (phys_to_gart(virt_to_phys(x)))
#define gart_to_virt(x) (phys_to_virt(gart_to_phys(x)))
extern int agp_off;
extern int agp_try_unsupported_boot;
......
......@@ -152,7 +152,7 @@ static struct page *m1541_alloc_page(struct agp_bridge_data *bridge)
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN ));
page_to_phys(page)) | ALI_CACHE_FLUSH_EN ));
return page;
}
......@@ -180,7 +180,7 @@ static void m1541_destroy_page(struct page *page, int flags)
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN));
page_to_phys(page)) | ALI_CACHE_FLUSH_EN));
}
agp_generic_destroy_page(page, flags);
}
......
......@@ -44,7 +44,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
#ifndef CONFIG_X86
SetPageReserved(virt_to_page(page_map->real));
global_cache_flush();
page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real));
......@@ -160,7 +160,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
......@@ -173,7 +173,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1,
writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
......@@ -325,7 +325,7 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_generic_mask_memory(agp_bridge,
phys_to_gart(page_to_phys(mem->pages[i])),
page_to_phys(mem->pages[i]),
mem->type),
cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
......
......@@ -79,7 +79,7 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
tmp = agp_bridge->driver->mask_memory(agp_bridge,
phys_to_gart(page_to_phys(mem->pages[i])),
page_to_phys(mem->pages[i]),
mask_type);
BUG_ON(tmp & 0xffffff0000000ffcULL);
......@@ -178,7 +178,7 @@ static const struct aper_size_info_32 amd_8151_sizes[7] =
static int amd_8151_configure(void)
{
unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
int i;
/* Configure AGP regs in each x86-64 host bridge. */
......@@ -558,7 +558,7 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
release_mem_region(virt_to_gart(bridge->gatt_table_real),
release_mem_region(virt_to_phys(bridge->gatt_table_real),
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
......
......@@ -302,7 +302,7 @@ static int ati_insert_memory(struct agp_memory * mem,
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(agp_bridge,
phys_to_gart(page_to_phys(mem->pages[i])),
page_to_phys(mem->pages[i]),
mem->type),
cur_gatt+GET_GATT_OFF(addr));
}
......@@ -360,7 +360,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Write out the size register */
current_size = A_SIZE_LVL2(agp_bridge->current_size);
......@@ -390,7 +390,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1,
writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
......
......@@ -159,7 +159,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
goto err_out_nounmap;
}
} else {
bridge->scratch_page_dma = phys_to_gart(page_to_phys(page));
bridge->scratch_page_dma = page_to_phys(page);
}
bridge->scratch_page = bridge->driver->mask_memory(bridge,
......
......@@ -67,7 +67,7 @@ static const struct gatt_mask efficeon_generic_masks[] =
/* This function does the same thing as mask_memory() for this chipset... */
static inline unsigned long efficeon_mask_memory(struct page *page)
{
unsigned long addr = phys_to_gart(page_to_phys(page));
unsigned long addr = page_to_phys(page);
return addr | 0x00000001;
}
......@@ -226,7 +226,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
efficeon_private.l1_table[index] = page;
value = virt_to_gart((unsigned long *)page) | pati | present | index;
value = virt_to_phys((unsigned long *)page) | pati | present | index;
pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, value);
......
......@@ -988,7 +988,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
set_memory_uc((unsigned long)table, 1 << page_order);
bridge->gatt_table = (void *)table;
#else
bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush();
#endif
......@@ -1001,7 +1001,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
return -ENOMEM;
}
bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
/* AK: bogus, should encode addresses > 4GB */
for (i = 0; i < num_entries; i++) {
......@@ -1142,7 +1142,7 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(bridge->driver->mask_memory(bridge,
phys_to_gart(page_to_phys(mem->pages[i])),
page_to_phys(mem->pages[i]),
mask_type),
bridge->gatt_table+j);
}
......
......@@ -107,7 +107,7 @@ static int __init hp_zx1_ioc_shared(void)
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
hp->io_pdir = gart_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
......@@ -246,7 +246,7 @@ hp_zx1_configure (void)
agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
if (hp->io_pdir_owner) {
writel(virt_to_gart(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
readl(hp->ioc_regs+HP_ZX1_TCNFG);
......
......@@ -325,7 +325,7 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem,
io_page_size = 1UL << I460_IO_PAGE_SHIFT;
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
paddr = phys_to_gart(page_to_phys(mem->pages[i]));
paddr = page_to_phys(mem->pages[i]);
for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
}
......@@ -382,7 +382,7 @@ static int i460_alloc_large_page (struct lp_desc *lp)
return -ENOMEM;
}
lp->paddr = phys_to_gart(page_to_phys(lp->page));
lp->paddr = page_to_phys(lp->page);
lp->refcount = 0;
atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
return 0;
......
......@@ -288,7 +288,7 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
phys_to_gart(page_to_phys(mem->pages[i])), mask_type),
page_to_phys(mem->pages[i]), mask_type),
intel_private.gtt+j);
}
......@@ -470,8 +470,7 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
global_cache_flush();
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
phys_to_gart(page_to_phys(mem->pages[i])),
mask_type),
page_to_phys(mem->pages[i]), mask_type),
intel_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
......@@ -977,7 +976,7 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
phys_to_gart(page_to_phys(mem->pages[i])), mask_type),
page_to_phys(mem->pages[i]), mask_type),
intel_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
......
......@@ -225,7 +225,7 @@ static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
phys_to_gart(page_to_phys(mem->pages[i])), mask_type),
page_to_phys(mem->pages[i]), mask_type),
agp_bridge->gatt_table+nvidia_private.pg_offset+j);
}
......
......@@ -190,7 +190,7 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
table[j] =
bridge->driver->mask_memory(bridge,
phys_to_gart(page_to_phys(mem->pages[i])),
page_to_phys(mem->pages[i]),
mem->type);
}
......
......@@ -155,7 +155,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
/* Create a fake scratch directory */
for (i = 0; i < 1024; i++) {
writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
}
retval = serverworks_create_gatt_pages(value->num_entries / 1024);
......@@ -167,7 +167,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
......@@ -179,7 +179,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++)
writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
return 0;
}
......@@ -350,7 +350,7 @@ static int serverworks_insert_memory(struct agp_memory *mem,
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(agp_bridge,
phys_to_gart(page_to_phys(mem->pages[i])), mem->type),
page_to_phys(mem->pages[i]), mem->type),
cur_gatt+GET_GATT_OFF(addr));
}
serverworks_tlbflush(mem);
......
......@@ -431,7 +431,7 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_table_real = (u32 *) table;
bridge->gatt_table = (u32 *)table;
bridge->gatt_bus_addr = virt_to_gart(table);
bridge->gatt_bus_addr = virt_to_phys(table);
for (i = 0; i < num_entries; i++)
bridge->gatt_table[i] = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment