blob: 0c99ec2e7ed8ade5d04614fc7468ecd60464a4c2 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * arch/sh/mm/ioremap.c
3 *
4 * (C) Copyright 1995 1996 Linus Torvalds
5 * (C) Copyright 2005 - 2010 Paul Mundt
6 *
7 * Re-map IO memory to kernel address space so that we can access it.
8 * This is needed for high PCI addresses that aren't mapped in the
9 * 640k-1MB IO memory area on PC's
10 *
11 * This file is subject to the terms and conditions of the GNU General
12 * Public License. See the file "COPYING" in the main directory of this
13 * archive for more details.
14 */
15#include <linux/vmalloc.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/mm.h>
19#include <linux/pci.h>
20#include <linux/io.h>
21#include <asm/page.h>
22#include <asm/pgalloc.h>
23#include <asm/addrspace.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm/mmu.h>
27
28/*
29 * Remap an arbitrary physical address space into the kernel virtual
30 * address space. Needed when the kernel wants to access high addresses
31 * directly.
32 *
33 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
34 * have to convert them into an offset in a page-aligned mapping, but the
35 * caller shouldn't need to know that small detail.
36 */
37void __iomem * __init_refok
38__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
39 pgprot_t pgprot, void *caller)
40{
41 struct vm_struct *area;
42 unsigned long offset, last_addr, addr, orig_addr;
43 void __iomem *mapped;
44
45 /* Don't allow wraparound or zero size */
46 last_addr = phys_addr + size - 1;
47 if (!size || last_addr < phys_addr)
48 return NULL;
49
50 /*
51 * If we can't yet use the regular approach, go the fixmap route.
52 */
53 if (!mem_init_done)
54 return ioremap_fixed(phys_addr, size, pgprot);
55
56 /*
57 * First try to remap through the PMB.
58 * PMB entries are all pre-faulted.
59 */
60 mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
61 if (mapped && !IS_ERR(mapped))
62 return mapped;
63
64 /*
65 * Mappings have to be page-aligned
66 */
67 offset = phys_addr & ~PAGE_MASK;
68 phys_addr &= PAGE_MASK;
69 size = PAGE_ALIGN(last_addr+1) - phys_addr;
70
71 /*
72 * Ok, go for it..
73 */
74 area = get_vm_area_caller(size, VM_IOREMAP, caller);
75 if (!area)
76 return NULL;
77 area->phys_addr = phys_addr;
78 orig_addr = addr = (unsigned long)area->addr;
79
80 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
81 vunmap((void *)orig_addr);
82 return NULL;
83 }
84
85 return (void __iomem *)(offset + (char *)orig_addr);
86}
87EXPORT_SYMBOL(__ioremap_caller);
88
89/*
90 * Simple checks for non-translatable mappings.
91 */
92static inline int iomapping_nontranslatable(unsigned long offset)
93{
94#ifdef CONFIG_29BIT
95 /*
96 * In 29-bit mode this includes the fixed P1/P2 areas, as well as
97 * parts of P3.
98 */
99 if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
100 return 1;
101#endif
102
103 return 0;
104}
105
106void __iounmap(void __iomem *addr)
107{
108 unsigned long vaddr = (unsigned long __force)addr;
109 struct vm_struct *p;
110
111 /*
112 * Nothing to do if there is no translatable mapping.
113 */
114 if (iomapping_nontranslatable(vaddr))
115 return;
116
117 /*
118 * There's no VMA if it's from an early fixed mapping.
119 */
120 if (iounmap_fixed(addr) == 0)
121 return;
122
123 /*
124 * If the PMB handled it, there's nothing else to do.
125 */
126 if (pmb_unmap(addr) == 0)
127 return;
128
129 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
130 if (!p) {
131 printk(KERN_ERR "%s: bad address %p\n", __func__, addr);
132 return;
133 }
134
135 kfree(p);
136}
137EXPORT_SYMBOL(__iounmap);