Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #include "physaddr.h"
26
27 /*
28  * Fix up the linear direct mapping of the kernel to avoid cache attribute
29  * conflicts.
30  */
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32                         enum page_cache_mode pcm)
33 {
34         unsigned long nrpages = size >> PAGE_SHIFT;
35         int err;
36
37         switch (pcm) {
38         case _PAGE_CACHE_MODE_UC:
39         default:
40                 err = _set_memory_uc(vaddr, nrpages);
41                 break;
42         case _PAGE_CACHE_MODE_WC:
43                 err = _set_memory_wc(vaddr, nrpages);
44                 break;
45         case _PAGE_CACHE_MODE_WB:
46                 err = _set_memory_wb(vaddr, nrpages);
47                 break;
48         }
49
50         return err;
51 }
52
53 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
54                                void *arg)
55 {
56         unsigned long i;
57
58         for (i = 0; i < nr_pages; ++i)
59                 if (pfn_valid(start_pfn + i) &&
60                     !PageReserved(pfn_to_page(start_pfn + i)))
61                         return 1;
62
63         WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
64
65         return 0;
66 }
67
68 /*
69  * Remap an arbitrary physical address space into the kernel virtual
70  * address space. It transparently creates kernel huge I/O mapping when
71  * the physical address is aligned by a huge page size (1GB or 2MB) and
72  * the requested size is at least the huge page size.
73  *
74  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
75  * Therefore, the mapping code falls back to use a smaller page toward 4KB
76  * when a mapping range is covered by non-WB type of MTRRs.
77  *
78  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
79  * have to convert them into an offset in a page-aligned mapping, but the
80  * caller shouldn't need to know that small detail.
81  */
82 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
83                 unsigned long size, enum page_cache_mode pcm, void *caller)
84 {
85         unsigned long offset, vaddr;
86         resource_size_t pfn, last_pfn, last_addr;
87         const resource_size_t unaligned_phys_addr = phys_addr;
88         const unsigned long unaligned_size = size;
89         struct vm_struct *area;
90         enum page_cache_mode new_pcm;
91         pgprot_t prot;
92         int retval;
93         void __iomem *ret_addr;
94         int ram_region;
95
96         /* Don't allow wraparound or zero size */
97         last_addr = phys_addr + size - 1;
98         if (!size || last_addr < phys_addr)
99                 return NULL;
100
101         if (!phys_addr_valid(phys_addr)) {
102                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
103                        (unsigned long long)phys_addr);
104                 WARN_ON_ONCE(1);
105                 return NULL;
106         }
107
108         /*
109          * Don't remap the low PCI/ISA area, it's always mapped..
110          */
111         if (is_ISA_range(phys_addr, last_addr))
112                 return (__force void __iomem *)phys_to_virt(phys_addr);
113
114         /*
115          * Don't allow anybody to remap normal RAM that we're using..
116          */
117         /* First check if whole region can be identified as RAM or not */
118         ram_region = region_is_ram(phys_addr, size);
119         if (ram_region > 0) {
120                 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
121                                 (unsigned long int)phys_addr,
122                                 (unsigned long int)last_addr);
123                 return NULL;
124         }
125
126         /* If could not be identified(-1), check page by page */
127         if (ram_region < 0) {
128                 pfn      = phys_addr >> PAGE_SHIFT;
129                 last_pfn = last_addr >> PAGE_SHIFT;
130                 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
131                                           __ioremap_check_ram) == 1)
132                         return NULL;
133         }
134         /*
135          * Mappings have to be page-aligned
136          */
137         offset = phys_addr & ~PAGE_MASK;
138         phys_addr &= PHYSICAL_PAGE_MASK;
139         size = PAGE_ALIGN(last_addr+1) - phys_addr;
140
141         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
142                                                 pcm, &new_pcm);
143         if (retval) {
144                 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
145                 return NULL;
146         }
147
148         if (pcm != new_pcm) {
149                 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
150                         printk(KERN_ERR
151                 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
152                                 (unsigned long long)phys_addr,
153                                 (unsigned long long)(phys_addr + size),
154                                 pcm, new_pcm);
155                         goto err_free_memtype;
156                 }
157                 pcm = new_pcm;
158         }
159
160         prot = PAGE_KERNEL_IO;
161         switch (pcm) {
162         case _PAGE_CACHE_MODE_UC:
163         default:
164                 prot = __pgprot(pgprot_val(prot) |
165                                 cachemode2protval(_PAGE_CACHE_MODE_UC));
166                 break;
167         case _PAGE_CACHE_MODE_UC_MINUS:
168                 prot = __pgprot(pgprot_val(prot) |
169                                 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
170                 break;
171         case _PAGE_CACHE_MODE_WC:
172                 prot = __pgprot(pgprot_val(prot) |
173                                 cachemode2protval(_PAGE_CACHE_MODE_WC));
174                 break;
175         case _PAGE_CACHE_MODE_WB:
176                 break;
177         }
178
179         /*
180          * Ok, go for it..
181          */
182         area = get_vm_area_caller(size, VM_IOREMAP, caller);
183         if (!area)
184                 goto err_free_memtype;
185         area->phys_addr = phys_addr;
186         vaddr = (unsigned long) area->addr;
187
188         if (kernel_map_sync_memtype(phys_addr, size, pcm))
189                 goto err_free_area;
190
191         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
192                 goto err_free_area;
193
194         ret_addr = (void __iomem *) (vaddr + offset);
195         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
196
197         /*
198          * Check if the request spans more than any BAR in the iomem resource
199          * tree.
200          */
201         WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
202                   KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
203
204         return ret_addr;
205 err_free_area:
206         free_vm_area(area);
207 err_free_memtype:
208         free_memtype(phys_addr, phys_addr + size);
209         return NULL;
210 }
211
212 /**
213  * ioremap_nocache     -   map bus memory into CPU space
214  * @phys_addr:    bus address of the memory
215  * @size:      size of the resource to map
216  *
217  * ioremap_nocache performs a platform specific sequence of operations to
218  * make bus memory CPU accessible via the readb/readw/readl/writeb/
219  * writew/writel functions and the other mmio helpers. The returned
220  * address is not guaranteed to be usable directly as a virtual
221  * address.
222  *
223  * This version of ioremap ensures that the memory is marked uncachable
224  * on the CPU as well as honouring existing caching rules from things like
225  * the PCI bus. Note that there are other caches and buffers on many
226  * busses. In particular driver authors should read up on PCI writes
227  *
228  * It's useful if some control registers are in such an area and
229  * write combining or read caching is not desirable:
230  *
231  * Must be freed with iounmap.
232  */
233 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
234 {
235         /*
236          * Ideally, this should be:
237          *      pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
238          *
239          * Till we fix all X drivers to use ioremap_wc(), we will use
240          * UC MINUS.
241          */
242         enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
243
244         return __ioremap_caller(phys_addr, size, pcm,
245                                 __builtin_return_address(0));
246 }
247 EXPORT_SYMBOL(ioremap_nocache);
248
249 /**
250  * ioremap_wc   -       map memory into CPU space write combined
251  * @phys_addr:  bus address of the memory
252  * @size:       size of the resource to map
253  *
254  * This version of ioremap ensures that the memory is marked write combining.
255  * Write combining allows faster writes to some hardware devices.
256  *
257  * Must be freed with iounmap.
258  */
259 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
260 {
261         if (pat_enabled)
262                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
263                                         __builtin_return_address(0));
264         else
265                 return ioremap_nocache(phys_addr, size);
266 }
267 EXPORT_SYMBOL(ioremap_wc);
268
269 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
270 {
271         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
272                                 __builtin_return_address(0));
273 }
274 EXPORT_SYMBOL(ioremap_cache);
275
276 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
277                                 unsigned long prot_val)
278 {
279         return __ioremap_caller(phys_addr, size,
280                                 pgprot2cachemode(__pgprot(prot_val)),
281                                 __builtin_return_address(0));
282 }
283 EXPORT_SYMBOL(ioremap_prot);
284
285 /**
286  * iounmap - Free a IO remapping
287  * @addr: virtual address from ioremap_*
288  *
289  * Caller must ensure there is only one unmapping for the same pointer.
290  */
291 void iounmap(volatile void __iomem *addr)
292 {
293         struct vm_struct *p, *o;
294
295         if ((void __force *)addr <= high_memory)
296                 return;
297
298         /*
299          * __ioremap special-cases the PCI/ISA range by not instantiating a
300          * vm_area and by simply returning an address into the kernel mapping
301          * of ISA space.   So handle that here.
302          */
303         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
304             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
305                 return;
306
307         addr = (volatile void __iomem *)
308                 (PAGE_MASK & (unsigned long __force)addr);
309
310         mmiotrace_iounmap(addr);
311
312         /* Use the vm area unlocked, assuming the caller
313            ensures there isn't another iounmap for the same address
314            in parallel. Reuse of the virtual address is prevented by
315            leaving it in the global lists until we're done with it.
316            cpa takes care of the direct mappings. */
317         p = find_vm_area((void __force *)addr);
318
319         if (!p) {
320                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
321                 dump_stack();
322                 return;
323         }
324
325         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
326
327         /* Finally remove it */
328         o = remove_vm_area((void __force *)addr);
329         BUG_ON(p != o || o == NULL);
330         kfree(p);
331 }
332 EXPORT_SYMBOL(iounmap);
333
334 int arch_ioremap_pud_supported(void)
335 {
336 #ifdef CONFIG_X86_64
337         return cpu_has_gbpages;
338 #else
339         return 0;
340 #endif
341 }
342
343 int arch_ioremap_pmd_supported(void)
344 {
345         return cpu_has_pse;
346 }
347
348 /*
349  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
350  * access
351  */
352 void *xlate_dev_mem_ptr(phys_addr_t phys)
353 {
354         unsigned long start  = phys &  PAGE_MASK;
355         unsigned long offset = phys & ~PAGE_MASK;
356         unsigned long vaddr;
357
358         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
359         if (page_is_ram(start >> PAGE_SHIFT))
360                 return __va(phys);
361
362         vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
363         /* Only add the offset on success and return NULL if the ioremap() failed: */
364         if (vaddr)
365                 vaddr += offset;
366
367         return (void *)vaddr;
368 }
369
370 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
371 {
372         if (page_is_ram(phys >> PAGE_SHIFT))
373                 return;
374
375         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
376         return;
377 }
378
379 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
380
381 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
382 {
383         /* Don't assume we're using swapper_pg_dir at this point */
384         pgd_t *base = __va(read_cr3());
385         pgd_t *pgd = &base[pgd_index(addr)];
386         pud_t *pud = pud_offset(pgd, addr);
387         pmd_t *pmd = pmd_offset(pud, addr);
388
389         return pmd;
390 }
391
392 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
393 {
394         return &bm_pte[pte_index(addr)];
395 }
396
397 bool __init is_early_ioremap_ptep(pte_t *ptep)
398 {
399         return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
400 }
401
402 void __init early_ioremap_init(void)
403 {
404         pmd_t *pmd;
405
406 #ifdef CONFIG_X86_64
407         BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
408 #else
409         WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
410 #endif
411
412         early_ioremap_setup();
413
414         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
415         memset(bm_pte, 0, sizeof(bm_pte));
416         pmd_populate_kernel(&init_mm, pmd, bm_pte);
417
418         /*
419          * The boot-ioremap range spans multiple pmds, for which
420          * we are not prepared:
421          */
422 #define __FIXADDR_TOP (-PAGE_SIZE)
423         BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
424                      != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
425 #undef __FIXADDR_TOP
426         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
427                 WARN_ON(1);
428                 printk(KERN_WARNING "pmd %p != %p\n",
429                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
430                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
431                         fix_to_virt(FIX_BTMAP_BEGIN));
432                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
433                         fix_to_virt(FIX_BTMAP_END));
434
435                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
436                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
437                        FIX_BTMAP_BEGIN);
438         }
439 }
440
441 void __init __early_set_fixmap(enum fixed_addresses idx,
442                                phys_addr_t phys, pgprot_t flags)
443 {
444         unsigned long addr = __fix_to_virt(idx);
445         pte_t *pte;
446
447         if (idx >= __end_of_fixed_addresses) {
448                 BUG();
449                 return;
450         }
451         pte = early_ioremap_pte(addr);
452
453         if (pgprot_val(flags))
454                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
455         else
456                 pte_clear(&init_mm, addr, pte);
457         __flush_tlb_one(addr);
458 }