From 17d36707dd9c5c3c4ef09a278ee7444cfc60481e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 15 Oct 2007 23:28:19 +0200 Subject: [PATCH] x86: unify include/asm/agp_32/64.h Same file, except for whitespace, comment formatting and the usage of wbinvd() instead of asm volatile("wbinvd":::"memory"), which is the same. Signed-off-by: Thomas Gleixner --- include/asm-x86/agp.h | 43 ++++++++++++++++++++++++++++++++++++---- include/asm-x86/agp_32.h | 36 --------------------------------- include/asm-x86/agp_64.h | 34 ------------------------------- 3 files changed, 39 insertions(+), 74 deletions(-) delete mode 100644 include/asm-x86/agp_32.h delete mode 100644 include/asm-x86/agp_64.h diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h index 9348f1e4f6f1..62df2a9e7130 100644 --- a/include/asm-x86/agp.h +++ b/include/asm-x86/agp.h @@ -1,5 +1,40 @@ -#ifdef CONFIG_X86_32 -# include "agp_32.h" -#else -# include "agp_64.h" +#ifndef _ASM_X86_AGP_H +#define _ASM_X86_AGP_H + +#include +#include + +/* + * Functions to keep the agpgart mappings coherent with the MMU. The + * GART gives the CPU a physical alias of pages in memory. The alias + * region is mapped uncacheable. Make sure there are no conflicting + * mappings with different cachability attributes for the same + * page. This avoids data corruption on some CPUs. + */ + +/* + * Caller's responsibility to call global_flush_tlb() for performance + * reasons + */ +#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) +#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL) +#define flush_agp_mappings() global_flush_tlb() + +/* + * Could use CLFLUSH here if the cpu supports it. But then it would + * need to be called for each cacheline of the whole page so it may + * not be worth it. Would need a page for it. + */ +#define flush_agp_cache() wbinvd() + +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + #endif diff --git a/include/asm-x86/agp_32.h b/include/asm-x86/agp_32.h deleted file mode 100644 index 6af173dbf123..000000000000 --- a/include/asm-x86/agp_32.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef AGP_H -#define AGP_H 1 - -#include -#include - -/* - * Functions to keep the agpgart mappings coherent with the MMU. - * The GART gives the CPU a physical alias of pages in memory. The alias region is - * mapped uncacheable. Make sure there are no conflicting mappings - * with different cachability attributes for the same page. This avoids - * data corruption on some CPUs. - */ - -/* Caller's responsibility to call global_flush_tlb() for - * performance reasons */ -#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) -#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL) -#define flush_agp_mappings() global_flush_tlb() - -/* Could use CLFLUSH here if the cpu supports it. But then it would - need to be called for each cacheline of the whole page so it may not be - worth it. Would need a page for it. */ -#define flush_agp_cache() wbinvd() - -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - -/* GATT allocation. Returns/accepts GATT kernel virtual address. */ -#define alloc_gatt_pages(order) \ - ((char *)__get_free_pages(GFP_KERNEL, (order))) -#define free_gatt_pages(table, order) \ - free_pages((unsigned long)(table), (order)) - -#endif diff --git a/include/asm-x86/agp_64.h b/include/asm-x86/agp_64.h deleted file mode 100644 index de338666f3f9..000000000000 --- a/include/asm-x86/agp_64.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef AGP_H -#define AGP_H 1 - -#include - -/* - * Functions to keep the agpgart mappings coherent. - * The GART gives the CPU a physical alias of memory. The alias is - * mapped uncacheable. Make sure there are no conflicting mappings - * with different cachability attributes for the same page. - */ - -/* Caller's responsibility to call global_flush_tlb() for - * performance reasons */ -#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) -#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL) -#define flush_agp_mappings() global_flush_tlb() - -/* Could use CLFLUSH here if the cpu supports it. But then it would - need to be called for each cacheline of the whole page so it may not be - worth it. Would need a page for it. */ -#define flush_agp_cache() asm volatile("wbinvd":::"memory") - -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - -/* GATT allocation. Returns/accepts GATT kernel virtual address. */ -#define alloc_gatt_pages(order) \ - ((char *)__get_free_pages(GFP_KERNEL, (order))) -#define free_gatt_pages(table, order) \ - free_pages((unsigned long)(table), (order)) - -#endif