From: Paul Mundt Date: Mon, 18 Jan 2010 12:21:32 +0000 (+0900) Subject: sh: Merge _32/_64 ioremap implementations. X-Git-Url: http://drtracing.org/?a=commitdiff_plain;h=0b59e38ffaf7b201ff6afe5b736365d16848c7e3;p=deliverable%2Flinux.git sh: Merge _32/_64 ioremap implementations. There is nothing of interest in the _64 version anymore, so the _32 one can be renamed and used unconditionally. Signed-off-by: Paul Mundt --- diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 89ba56c20ade..0027cdea2c20 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -15,7 +15,7 @@ obj-y += $(cacheops-y) mmu-y := nommu.o extable_32.o mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ - ioremap_$(BITS).o kmap.o pgtable.o tlbflush_$(BITS).o + ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o obj-y += $(mmu-y) obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c new file mode 100644 index 000000000000..24f6ba6bff71 --- /dev/null +++ b/arch/sh/mm/ioremap.c @@ -0,0 +1,171 @@ +/* + * arch/sh/mm/ioremap.c + * + * (C) Copyright 1995 1996 Linus Torvalds + * (C) Copyright 2005 - 2010 Paul Mundt + * + * Re-map IO memory to kernel address space so that we can access it. + * This is needed for high PCI addresses that aren't mapped in the + * 640k-1MB IO memory area on PC's + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ +void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, + unsigned long flags, void *caller) +{ + struct vm_struct *area; + unsigned long offset, last_addr, addr, orig_addr; + pgprot_t pgprot; + + /* Don't allow wraparound or zero size */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * If we're in the fixed PCI memory range, mapping through page + * tables is not only pointless, but also fundamentally broken. + * Just return the physical address instead. + * + * For boards that map a small PCI memory aperture somewhere in + * P1/P2 space, ioremap() will already do the right thing, + * and we'll never get this far. + */ + if (is_pci_memory_fixed_range(phys_addr, size)) + return (void __iomem *)phys_addr; + + /* + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr+1) - phys_addr; + + /* + * If we can't yet use the regular approach, go the fixmap route. + */ + if (!mem_init_done) + return ioremap_fixed(phys_addr, size, __pgprot(flags)); + + /* + * Ok, go for it.. + */ + area = get_vm_area_caller(size, VM_IOREMAP, caller); + if (!area) + return NULL; + area->phys_addr = phys_addr; + orig_addr = addr = (unsigned long)area->addr; + +#ifdef CONFIG_PMB + /* + * First try to remap through the PMB once a valid VMA has been + * established. Smaller allocations (or the rest of the size + * remaining after a PMB mapping due to the size not being + * perfectly aligned on a PMB size boundary) are then mapped + * through the UTLB using conventional page tables. + * + * PMB entries are all pre-faulted. + */ + if (unlikely(phys_addr >= P1SEG)) { + unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); + + if (likely(mapped)) { + addr += mapped; + phys_addr += mapped; + size -= mapped; + } + } +#endif + + pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); + if (likely(size)) + if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { + vunmap((void *)orig_addr); + return NULL; + } + + return (void __iomem *)(offset + (char *)orig_addr); +} +EXPORT_SYMBOL(__ioremap_caller); + +/* + * Simple checks for non-translatable mappings. + */ +static inline int iomapping_nontranslatable(unsigned long offset) +{ +#ifdef CONFIG_29BIT + /* + * In 29-bit mode this includes the fixed P1/P2 areas, as well as + * parts of P3. + */ + if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) + return 1; +#endif + + if (is_pci_memory_fixed_range(offset, 0)) + return 1; + + return 0; +} + +void __iounmap(void __iomem *addr) +{ + unsigned long vaddr = (unsigned long __force)addr; + struct vm_struct *p; + + /* + * Nothing to do if there is no translatable mapping. + */ + if (iomapping_nontranslatable(vaddr)) + return; + +#ifdef CONFIG_PMB + /* + * Purge any PMB entries that may have been established for this + * mapping, then proceed with conventional VMA teardown. + * + * XXX: Note that due to the way that remove_vm_area() does + * matching of the resultant VMA, we aren't able to fast-forward + * the address past the PMB space until the end of the VMA where + * the page tables reside. As such, unmap_vm_area() will be + * forced to linearly scan over the area until it finds the page + * tables where PTEs that need to be unmapped actually reside, + * which is far from optimal. Perhaps we need to use a separate + * VMA for the PMB mappings? + * -- PFM. + */ + pmb_unmap(vaddr); +#endif + + p = remove_vm_area((void *)(vaddr & PAGE_MASK)); + if (!p) { + printk(KERN_ERR "%s: bad address %p\n", __func__, addr); + return; + } + + kfree(p); +} +EXPORT_SYMBOL(__iounmap); diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c deleted file mode 100644 index 24f6ba6bff71..000000000000 --- a/arch/sh/mm/ioremap_32.c +++ /dev/null @@ -1,171 +0,0 @@ -/* - * arch/sh/mm/ioremap.c - * - * (C) Copyright 1995 1996 Linus Torvalds - * (C) Copyright 2005 - 2010 Paul Mundt - * - * Re-map IO memory to kernel address space so that we can access it. - * This is needed for high PCI addresses that aren't mapped in the - * 640k-1MB IO memory area on PC's - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file "COPYING" in the main directory of this - * archive for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, - unsigned long flags, void *caller) -{ - struct vm_struct *area; - unsigned long offset, last_addr, addr, orig_addr; - pgprot_t pgprot; - - /* Don't allow wraparound or zero size */ - last_addr = phys_addr + size - 1; - if (!size || last_addr < phys_addr) - return NULL; - - /* - * If we're in the fixed PCI memory range, mapping through page - * tables is not only pointless, but also fundamentally broken. - * Just return the physical address instead. - * - * For boards that map a small PCI memory aperture somewhere in - * P1/P2 space, ioremap() will already do the right thing, - * and we'll never get this far. - */ - if (is_pci_memory_fixed_range(phys_addr, size)) - return (void __iomem *)phys_addr; - - /* - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr+1) - phys_addr; - - /* - * If we can't yet use the regular approach, go the fixmap route. - */ - if (!mem_init_done) - return ioremap_fixed(phys_addr, size, __pgprot(flags)); - - /* - * Ok, go for it.. - */ - area = get_vm_area_caller(size, VM_IOREMAP, caller); - if (!area) - return NULL; - area->phys_addr = phys_addr; - orig_addr = addr = (unsigned long)area->addr; - -#ifdef CONFIG_PMB - /* - * First try to remap through the PMB once a valid VMA has been - * established. Smaller allocations (or the rest of the size - * remaining after a PMB mapping due to the size not being - * perfectly aligned on a PMB size boundary) are then mapped - * through the UTLB using conventional page tables. - * - * PMB entries are all pre-faulted. - */ - if (unlikely(phys_addr >= P1SEG)) { - unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); - - if (likely(mapped)) { - addr += mapped; - phys_addr += mapped; - size -= mapped; - } - } -#endif - - pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); - if (likely(size)) - if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { - vunmap((void *)orig_addr); - return NULL; - } - - return (void __iomem *)(offset + (char *)orig_addr); -} -EXPORT_SYMBOL(__ioremap_caller); - -/* - * Simple checks for non-translatable mappings. - */ -static inline int iomapping_nontranslatable(unsigned long offset) -{ -#ifdef CONFIG_29BIT - /* - * In 29-bit mode this includes the fixed P1/P2 areas, as well as - * parts of P3. - */ - if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) - return 1; -#endif - - if (is_pci_memory_fixed_range(offset, 0)) - return 1; - - return 0; -} - -void __iounmap(void __iomem *addr) -{ - unsigned long vaddr = (unsigned long __force)addr; - struct vm_struct *p; - - /* - * Nothing to do if there is no translatable mapping. - */ - if (iomapping_nontranslatable(vaddr)) - return; - -#ifdef CONFIG_PMB - /* - * Purge any PMB entries that may have been established for this - * mapping, then proceed with conventional VMA teardown. - * - * XXX: Note that due to the way that remove_vm_area() does - * matching of the resultant VMA, we aren't able to fast-forward - * the address past the PMB space until the end of the VMA where - * the page tables reside. As such, unmap_vm_area() will be - * forced to linearly scan over the area until it finds the page - * tables where PTEs that need to be unmapped actually reside, - * which is far from optimal. Perhaps we need to use a separate - * VMA for the PMB mappings? - * -- PFM. - */ - pmb_unmap(vaddr); -#endif - - p = remove_vm_area((void *)(vaddr & PAGE_MASK)); - if (!p) { - printk(KERN_ERR "%s: bad address %p\n", __func__, addr); - return; - } - - kfree(p); -} -EXPORT_SYMBOL(__iounmap); diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c deleted file mode 100644 index fb0aa457c71e..000000000000 --- a/arch/sh/mm/ioremap_64.c +++ /dev/null @@ -1,47 +0,0 @@ -/* - * arch/sh/mm/ioremap_64.c - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 - 2007 Paul Mundt - * - * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly - * derived from arch/i386/mm/ioremap.c . - * - * (C) Copyright 1995 1996 Linus Torvalds - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, - unsigned long flags, void *caller) -{ - pgprot_t prot; - - prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | - _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); - - return ioremap_fixed(offset, size, prot); -} -EXPORT_SYMBOL(__ioremap_caller); - -void __iounmap(void __iomem *virtual) -{ - iounmap_fixed(virtual); -} -EXPORT_SYMBOL(__iounmap);