pinctrl: at91: enhance (debugfs) at91_gpio_dbg_show
[cascardo/linux.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #include "physaddr.h"
26
27 /*
28  * Fix up the linear direct mapping of the kernel to avoid cache attribute
29  * conflicts.
30  */
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32                                unsigned long prot_val)
33 {
34         unsigned long nrpages = size >> PAGE_SHIFT;
35         int err;
36
37         switch (prot_val) {
38         case _PAGE_CACHE_UC:
39         default:
40                 err = _set_memory_uc(vaddr, nrpages);
41                 break;
42         case _PAGE_CACHE_WC:
43                 err = _set_memory_wc(vaddr, nrpages);
44                 break;
45         case _PAGE_CACHE_WB:
46                 err = _set_memory_wb(vaddr, nrpages);
47                 break;
48         }
49
50         return err;
51 }
52
53 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
54                                void *arg)
55 {
56         unsigned long i;
57
58         for (i = 0; i < nr_pages; ++i)
59                 if (pfn_valid(start_pfn + i) &&
60                     !PageReserved(pfn_to_page(start_pfn + i)))
61                         return 1;
62
63         WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
64
65         return 0;
66 }
67
68 /*
69  * Remap an arbitrary physical address space into the kernel virtual
70  * address space. Needed when the kernel wants to access high addresses
71  * directly.
72  *
73  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74  * have to convert them into an offset in a page-aligned mapping, but the
75  * caller shouldn't need to know that small detail.
76  */
77 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
78                 unsigned long size, unsigned long prot_val, void *caller)
79 {
80         unsigned long offset, vaddr;
81         resource_size_t pfn, last_pfn, last_addr;
82         const resource_size_t unaligned_phys_addr = phys_addr;
83         const unsigned long unaligned_size = size;
84         struct vm_struct *area;
85         unsigned long new_prot_val;
86         pgprot_t prot;
87         int retval;
88         void __iomem *ret_addr;
89         int ram_region;
90
91         /* Don't allow wraparound or zero size */
92         last_addr = phys_addr + size - 1;
93         if (!size || last_addr < phys_addr)
94                 return NULL;
95
96         if (!phys_addr_valid(phys_addr)) {
97                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
98                        (unsigned long long)phys_addr);
99                 WARN_ON_ONCE(1);
100                 return NULL;
101         }
102
103         /*
104          * Don't remap the low PCI/ISA area, it's always mapped..
105          */
106         if (is_ISA_range(phys_addr, last_addr))
107                 return (__force void __iomem *)phys_to_virt(phys_addr);
108
109         /*
110          * Don't allow anybody to remap normal RAM that we're using..
111          */
112         /* First check if whole region can be identified as RAM or not */
113         ram_region = region_is_ram(phys_addr, size);
114         if (ram_region > 0) {
115                 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
116                                 (unsigned long int)phys_addr,
117                                 (unsigned long int)last_addr);
118                 return NULL;
119         }
120
121         /* If could not be identified(-1), check page by page */
122         if (ram_region < 0) {
123                 pfn      = phys_addr >> PAGE_SHIFT;
124                 last_pfn = last_addr >> PAGE_SHIFT;
125                 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
126                                           __ioremap_check_ram) == 1)
127                         return NULL;
128         }
129         /*
130          * Mappings have to be page-aligned
131          */
132         offset = phys_addr & ~PAGE_MASK;
133         phys_addr &= PHYSICAL_PAGE_MASK;
134         size = PAGE_ALIGN(last_addr+1) - phys_addr;
135
136         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
137                                                 prot_val, &new_prot_val);
138         if (retval) {
139                 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
140                 return NULL;
141         }
142
143         if (prot_val != new_prot_val) {
144                 if (!is_new_memtype_allowed(phys_addr, size,
145                                             prot_val, new_prot_val)) {
146                         printk(KERN_ERR
147                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
148                                 (unsigned long long)phys_addr,
149                                 (unsigned long long)(phys_addr + size),
150                                 prot_val, new_prot_val);
151                         goto err_free_memtype;
152                 }
153                 prot_val = new_prot_val;
154         }
155
156         switch (prot_val) {
157         case _PAGE_CACHE_UC:
158         default:
159                 prot = PAGE_KERNEL_IO_NOCACHE;
160                 break;
161         case _PAGE_CACHE_UC_MINUS:
162                 prot = PAGE_KERNEL_IO_UC_MINUS;
163                 break;
164         case _PAGE_CACHE_WC:
165                 prot = PAGE_KERNEL_IO_WC;
166                 break;
167         case _PAGE_CACHE_WB:
168                 prot = PAGE_KERNEL_IO;
169                 break;
170         }
171
172         /*
173          * Ok, go for it..
174          */
175         area = get_vm_area_caller(size, VM_IOREMAP, caller);
176         if (!area)
177                 goto err_free_memtype;
178         area->phys_addr = phys_addr;
179         vaddr = (unsigned long) area->addr;
180
181         if (kernel_map_sync_memtype(phys_addr, size, prot_val))
182                 goto err_free_area;
183
184         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
185                 goto err_free_area;
186
187         ret_addr = (void __iomem *) (vaddr + offset);
188         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
189
190         /*
191          * Check if the request spans more than any BAR in the iomem resource
192          * tree.
193          */
194         WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
195                   KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
196
197         return ret_addr;
198 err_free_area:
199         free_vm_area(area);
200 err_free_memtype:
201         free_memtype(phys_addr, phys_addr + size);
202         return NULL;
203 }
204
205 /**
206  * ioremap_nocache     -   map bus memory into CPU space
207  * @phys_addr:    bus address of the memory
208  * @size:      size of the resource to map
209  *
210  * ioremap_nocache performs a platform specific sequence of operations to
211  * make bus memory CPU accessible via the readb/readw/readl/writeb/
212  * writew/writel functions and the other mmio helpers. The returned
213  * address is not guaranteed to be usable directly as a virtual
214  * address.
215  *
216  * This version of ioremap ensures that the memory is marked uncachable
217  * on the CPU as well as honouring existing caching rules from things like
218  * the PCI bus. Note that there are other caches and buffers on many
219  * busses. In particular driver authors should read up on PCI writes
220  *
221  * It's useful if some control registers are in such an area and
222  * write combining or read caching is not desirable:
223  *
224  * Must be freed with iounmap.
225  */
226 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
227 {
228         /*
229          * Ideally, this should be:
230          *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
231          *
232          * Till we fix all X drivers to use ioremap_wc(), we will use
233          * UC MINUS.
234          */
235         unsigned long val = _PAGE_CACHE_UC_MINUS;
236
237         return __ioremap_caller(phys_addr, size, val,
238                                 __builtin_return_address(0));
239 }
240 EXPORT_SYMBOL(ioremap_nocache);
241
242 /**
243  * ioremap_wc   -       map memory into CPU space write combined
244  * @phys_addr:  bus address of the memory
245  * @size:       size of the resource to map
246  *
247  * This version of ioremap ensures that the memory is marked write combining.
248  * Write combining allows faster writes to some hardware devices.
249  *
250  * Must be freed with iounmap.
251  */
252 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
253 {
254         if (pat_enabled)
255                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
256                                         __builtin_return_address(0));
257         else
258                 return ioremap_nocache(phys_addr, size);
259 }
260 EXPORT_SYMBOL(ioremap_wc);
261
262 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
263 {
264         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
265                                 __builtin_return_address(0));
266 }
267 EXPORT_SYMBOL(ioremap_cache);
268
269 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
270                                 unsigned long prot_val)
271 {
272         return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
273                                 __builtin_return_address(0));
274 }
275 EXPORT_SYMBOL(ioremap_prot);
276
277 /**
278  * iounmap - Free a IO remapping
279  * @addr: virtual address from ioremap_*
280  *
281  * Caller must ensure there is only one unmapping for the same pointer.
282  */
283 void iounmap(volatile void __iomem *addr)
284 {
285         struct vm_struct *p, *o;
286
287         if ((void __force *)addr <= high_memory)
288                 return;
289
290         /*
291          * __ioremap special-cases the PCI/ISA range by not instantiating a
292          * vm_area and by simply returning an address into the kernel mapping
293          * of ISA space.   So handle that here.
294          */
295         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
296             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
297                 return;
298
299         addr = (volatile void __iomem *)
300                 (PAGE_MASK & (unsigned long __force)addr);
301
302         mmiotrace_iounmap(addr);
303
304         /* Use the vm area unlocked, assuming the caller
305            ensures there isn't another iounmap for the same address
306            in parallel. Reuse of the virtual address is prevented by
307            leaving it in the global lists until we're done with it.
308            cpa takes care of the direct mappings. */
309         p = find_vm_area((void __force *)addr);
310
311         if (!p) {
312                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
313                 dump_stack();
314                 return;
315         }
316
317         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
318
319         /* Finally remove it */
320         o = remove_vm_area((void __force *)addr);
321         BUG_ON(p != o || o == NULL);
322         kfree(p);
323 }
324 EXPORT_SYMBOL(iounmap);
325
326 /*
327  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
328  * access
329  */
330 void *xlate_dev_mem_ptr(unsigned long phys)
331 {
332         void *addr;
333         unsigned long start = phys & PAGE_MASK;
334
335         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
336         if (page_is_ram(start >> PAGE_SHIFT))
337                 return __va(phys);
338
339         addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
340         if (addr)
341                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
342
343         return addr;
344 }
345
346 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
347 {
348         if (page_is_ram(phys >> PAGE_SHIFT))
349                 return;
350
351         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
352         return;
353 }
354
355 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
356
357 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
358 {
359         /* Don't assume we're using swapper_pg_dir at this point */
360         pgd_t *base = __va(read_cr3());
361         pgd_t *pgd = &base[pgd_index(addr)];
362         pud_t *pud = pud_offset(pgd, addr);
363         pmd_t *pmd = pmd_offset(pud, addr);
364
365         return pmd;
366 }
367
368 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
369 {
370         return &bm_pte[pte_index(addr)];
371 }
372
373 bool __init is_early_ioremap_ptep(pte_t *ptep)
374 {
375         return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
376 }
377
378 void __init early_ioremap_init(void)
379 {
380         pmd_t *pmd;
381
382 #ifdef CONFIG_X86_64
383         BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
384 #else
385         WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
386 #endif
387
388         early_ioremap_setup();
389
390         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
391         memset(bm_pte, 0, sizeof(bm_pte));
392         pmd_populate_kernel(&init_mm, pmd, bm_pte);
393
394         /*
395          * The boot-ioremap range spans multiple pmds, for which
396          * we are not prepared:
397          */
398 #define __FIXADDR_TOP (-PAGE_SIZE)
399         BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
400                      != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
401 #undef __FIXADDR_TOP
402         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
403                 WARN_ON(1);
404                 printk(KERN_WARNING "pmd %p != %p\n",
405                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
406                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
407                         fix_to_virt(FIX_BTMAP_BEGIN));
408                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
409                         fix_to_virt(FIX_BTMAP_END));
410
411                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
412                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
413                        FIX_BTMAP_BEGIN);
414         }
415 }
416
417 void __init __early_set_fixmap(enum fixed_addresses idx,
418                                phys_addr_t phys, pgprot_t flags)
419 {
420         unsigned long addr = __fix_to_virt(idx);
421         pte_t *pte;
422
423         if (idx >= __end_of_fixed_addresses) {
424                 BUG();
425                 return;
426         }
427         pte = early_ioremap_pte(addr);
428
429         if (pgprot_val(flags))
430                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
431         else
432                 pte_clear(&init_mm, addr, pte);
433         __flush_tlb_one(addr);
434 }