Merge branches 'core/signal' and 'x86/spinlocks' into x86/xen
[cascardo/linux.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #ifdef CONFIG_X86_64
26
27 unsigned long __phys_addr(unsigned long x)
28 {
29         if (x >= __START_KERNEL_map)
30                 return x - __START_KERNEL_map + phys_base;
31         return x - PAGE_OFFSET;
32 }
33 EXPORT_SYMBOL(__phys_addr);
34
35 static inline int phys_addr_valid(unsigned long addr)
36 {
37         return addr < (1UL << boot_cpu_data.x86_phys_bits);
38 }
39
40 #else
41
42 static inline int phys_addr_valid(unsigned long addr)
43 {
44         return 1;
45 }
46
47 #endif
48
49 int page_is_ram(unsigned long pagenr)
50 {
51         resource_size_t addr, end;
52         int i;
53
54         /*
55          * A special case is the first 4Kb of memory;
56          * This is a BIOS owned area, not kernel ram, but generally
57          * not listed as such in the E820 table.
58          */
59         if (pagenr == 0)
60                 return 0;
61
62         /*
63          * Second special case: Some BIOSen report the PC BIOS
64          * area (640->1Mb) as ram even though it is not.
65          */
66         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
67                     pagenr < (BIOS_END >> PAGE_SHIFT))
68                 return 0;
69
70         for (i = 0; i < e820.nr_map; i++) {
71                 /*
72                  * Not usable memory:
73                  */
74                 if (e820.map[i].type != E820_RAM)
75                         continue;
76                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
77                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
78
79
80                 if ((pagenr >= addr) && (pagenr < end))
81                         return 1;
82         }
83         return 0;
84 }
85
86 int pagerange_is_ram(unsigned long start, unsigned long end)
87 {
88         int ram_page = 0, not_rampage = 0;
89         unsigned long page_nr;
90
91         for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
92              ++page_nr) {
93                 if (page_is_ram(page_nr))
94                         ram_page = 1;
95                 else
96                         not_rampage = 1;
97
98                 if (ram_page == not_rampage)
99                         return -1;
100         }
101
102         return ram_page;
103 }
104
105 /*
106  * Fix up the linear direct mapping of the kernel to avoid cache attribute
107  * conflicts.
108  */
109 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
110                                unsigned long prot_val)
111 {
112         unsigned long nrpages = size >> PAGE_SHIFT;
113         int err;
114
115         switch (prot_val) {
116         case _PAGE_CACHE_UC:
117         default:
118                 err = _set_memory_uc(vaddr, nrpages);
119                 break;
120         case _PAGE_CACHE_WC:
121                 err = _set_memory_wc(vaddr, nrpages);
122                 break;
123         case _PAGE_CACHE_WB:
124                 err = _set_memory_wb(vaddr, nrpages);
125                 break;
126         }
127
128         return err;
129 }
130
131 /*
132  * Remap an arbitrary physical address space into the kernel virtual
133  * address space. Needed when the kernel wants to access high addresses
134  * directly.
135  *
136  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
137  * have to convert them into an offset in a page-aligned mapping, but the
138  * caller shouldn't need to know that small detail.
139  */
140 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
141                 unsigned long size, unsigned long prot_val, void *caller)
142 {
143         unsigned long pfn, offset, vaddr;
144         resource_size_t last_addr;
145         const resource_size_t unaligned_phys_addr = phys_addr;
146         const unsigned long unaligned_size = size;
147         struct vm_struct *area;
148         unsigned long new_prot_val;
149         pgprot_t prot;
150         int retval;
151         void __iomem *ret_addr;
152
153         /* Don't allow wraparound or zero size */
154         last_addr = phys_addr + size - 1;
155         if (!size || last_addr < phys_addr)
156                 return NULL;
157
158         if (!phys_addr_valid(phys_addr)) {
159                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
160                        (unsigned long long)phys_addr);
161                 WARN_ON_ONCE(1);
162                 return NULL;
163         }
164
165         /*
166          * Don't remap the low PCI/ISA area, it's always mapped..
167          */
168         if (is_ISA_range(phys_addr, last_addr))
169                 return (__force void __iomem *)phys_to_virt(phys_addr);
170
171         /*
172          * Don't allow anybody to remap normal RAM that we're using..
173          */
174         for (pfn = phys_addr >> PAGE_SHIFT;
175                                 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
176                                 pfn++) {
177
178                 int is_ram = page_is_ram(pfn);
179
180                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
181                         return NULL;
182                 WARN_ON_ONCE(is_ram);
183         }
184
185         /*
186          * Mappings have to be page-aligned
187          */
188         offset = phys_addr & ~PAGE_MASK;
189         phys_addr &= PAGE_MASK;
190         size = PAGE_ALIGN(last_addr+1) - phys_addr;
191
192         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
193                                                 prot_val, &new_prot_val);
194         if (retval) {
195                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
196                 return NULL;
197         }
198
199         if (prot_val != new_prot_val) {
200                 /*
201                  * Do not fallback to certain memory types with certain
202                  * requested type:
203                  * - request is uc-, return cannot be write-back
204                  * - request is uc-, return cannot be write-combine
205                  * - request is write-combine, return cannot be write-back
206                  */
207                 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
208                      (new_prot_val == _PAGE_CACHE_WB ||
209                       new_prot_val == _PAGE_CACHE_WC)) ||
210                     (prot_val == _PAGE_CACHE_WC &&
211                      new_prot_val == _PAGE_CACHE_WB)) {
212                         pr_debug(
213                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
214                                 (unsigned long long)phys_addr,
215                                 (unsigned long long)(phys_addr + size),
216                                 prot_val, new_prot_val);
217                         free_memtype(phys_addr, phys_addr + size);
218                         return NULL;
219                 }
220                 prot_val = new_prot_val;
221         }
222
223         switch (prot_val) {
224         case _PAGE_CACHE_UC:
225         default:
226                 prot = PAGE_KERNEL_NOCACHE;
227                 break;
228         case _PAGE_CACHE_UC_MINUS:
229                 prot = PAGE_KERNEL_UC_MINUS;
230                 break;
231         case _PAGE_CACHE_WC:
232                 prot = PAGE_KERNEL_WC;
233                 break;
234         case _PAGE_CACHE_WB:
235                 prot = PAGE_KERNEL;
236                 break;
237         }
238
239         /*
240          * Ok, go for it..
241          */
242         area = get_vm_area_caller(size, VM_IOREMAP, caller);
243         if (!area)
244                 return NULL;
245         area->phys_addr = phys_addr;
246         vaddr = (unsigned long) area->addr;
247         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
248                 free_memtype(phys_addr, phys_addr + size);
249                 free_vm_area(area);
250                 return NULL;
251         }
252
253         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
254                 free_memtype(phys_addr, phys_addr + size);
255                 vunmap(area->addr);
256                 return NULL;
257         }
258
259         ret_addr = (void __iomem *) (vaddr + offset);
260         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
261
262         return ret_addr;
263 }
264
265 /**
266  * ioremap_nocache     -   map bus memory into CPU space
267  * @offset:    bus address of the memory
268  * @size:      size of the resource to map
269  *
270  * ioremap_nocache performs a platform specific sequence of operations to
271  * make bus memory CPU accessible via the readb/readw/readl/writeb/
272  * writew/writel functions and the other mmio helpers. The returned
273  * address is not guaranteed to be usable directly as a virtual
274  * address.
275  *
276  * This version of ioremap ensures that the memory is marked uncachable
277  * on the CPU as well as honouring existing caching rules from things like
278  * the PCI bus. Note that there are other caches and buffers on many
279  * busses. In particular driver authors should read up on PCI writes
280  *
281  * It's useful if some control registers are in such an area and
282  * write combining or read caching is not desirable:
283  *
284  * Must be freed with iounmap.
285  */
286 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
287 {
288         /*
289          * Ideally, this should be:
290          *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
291          *
292          * Till we fix all X drivers to use ioremap_wc(), we will use
293          * UC MINUS.
294          */
295         unsigned long val = _PAGE_CACHE_UC_MINUS;
296
297         return __ioremap_caller(phys_addr, size, val,
298                                 __builtin_return_address(0));
299 }
300 EXPORT_SYMBOL(ioremap_nocache);
301
302 /**
303  * ioremap_wc   -       map memory into CPU space write combined
304  * @offset:     bus address of the memory
305  * @size:       size of the resource to map
306  *
307  * This version of ioremap ensures that the memory is marked write combining.
308  * Write combining allows faster writes to some hardware devices.
309  *
310  * Must be freed with iounmap.
311  */
312 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
313 {
314         if (pat_enabled)
315                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
316                                         __builtin_return_address(0));
317         else
318                 return ioremap_nocache(phys_addr, size);
319 }
320 EXPORT_SYMBOL(ioremap_wc);
321
322 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
323 {
324         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
325                                 __builtin_return_address(0));
326 }
327 EXPORT_SYMBOL(ioremap_cache);
328
329 static void __iomem *ioremap_default(resource_size_t phys_addr,
330                                         unsigned long size)
331 {
332         unsigned long flags;
333         void *ret;
334         int err;
335
336         /*
337          * - WB for WB-able memory and no other conflicting mappings
338          * - UC_MINUS for non-WB-able memory with no other conflicting mappings
339          * - Inherit from confliting mappings otherwise
340          */
341         err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
342         if (err < 0)
343                 return NULL;
344
345         ret = (void *) __ioremap_caller(phys_addr, size, flags,
346                                         __builtin_return_address(0));
347
348         free_memtype(phys_addr, phys_addr + size);
349         return (void __iomem *)ret;
350 }
351
352 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
353                                 unsigned long prot_val)
354 {
355         return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
356                                 __builtin_return_address(0));
357 }
358 EXPORT_SYMBOL(ioremap_prot);
359
360 /**
361  * iounmap - Free a IO remapping
362  * @addr: virtual address from ioremap_*
363  *
364  * Caller must ensure there is only one unmapping for the same pointer.
365  */
366 void iounmap(volatile void __iomem *addr)
367 {
368         struct vm_struct *p, *o;
369
370         if ((void __force *)addr <= high_memory)
371                 return;
372
373         /*
374          * __ioremap special-cases the PCI/ISA range by not instantiating a
375          * vm_area and by simply returning an address into the kernel mapping
376          * of ISA space.   So handle that here.
377          */
378         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
379             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
380                 return;
381
382         addr = (volatile void __iomem *)
383                 (PAGE_MASK & (unsigned long __force)addr);
384
385         mmiotrace_iounmap(addr);
386
387         /* Use the vm area unlocked, assuming the caller
388            ensures there isn't another iounmap for the same address
389            in parallel. Reuse of the virtual address is prevented by
390            leaving it in the global lists until we're done with it.
391            cpa takes care of the direct mappings. */
392         read_lock(&vmlist_lock);
393         for (p = vmlist; p; p = p->next) {
394                 if (p->addr == (void __force *)addr)
395                         break;
396         }
397         read_unlock(&vmlist_lock);
398
399         if (!p) {
400                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
401                 dump_stack();
402                 return;
403         }
404
405         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
406
407         /* Finally remove it */
408         o = remove_vm_area((void __force *)addr);
409         BUG_ON(p != o || o == NULL);
410         kfree(p);
411 }
412 EXPORT_SYMBOL(iounmap);
413
414 /*
415  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
416  * access
417  */
418 void *xlate_dev_mem_ptr(unsigned long phys)
419 {
420         void *addr;
421         unsigned long start = phys & PAGE_MASK;
422
423         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
424         if (page_is_ram(start >> PAGE_SHIFT))
425                 return __va(phys);
426
427         addr = (void __force *)ioremap_default(start, PAGE_SIZE);
428         if (addr)
429                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
430
431         return addr;
432 }
433
434 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
435 {
436         if (page_is_ram(phys >> PAGE_SHIFT))
437                 return;
438
439         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
440         return;
441 }
442
443 static int __initdata early_ioremap_debug;
444
445 static int __init early_ioremap_debug_setup(char *str)
446 {
447         early_ioremap_debug = 1;
448
449         return 0;
450 }
451 early_param("early_ioremap_debug", early_ioremap_debug_setup);
452
453 static __initdata int after_paging_init;
454 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
455
456 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
457 {
458         /* Don't assume we're using swapper_pg_dir at this point */
459         pgd_t *base = __va(read_cr3());
460         pgd_t *pgd = &base[pgd_index(addr)];
461         pud_t *pud = pud_offset(pgd, addr);
462         pmd_t *pmd = pmd_offset(pud, addr);
463
464         return pmd;
465 }
466
467 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
468 {
469         return &bm_pte[pte_index(addr)];
470 }
471
472 void __init early_ioremap_init(void)
473 {
474         pmd_t *pmd;
475
476         if (early_ioremap_debug)
477                 printk(KERN_INFO "early_ioremap_init()\n");
478
479         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
480         memset(bm_pte, 0, sizeof(bm_pte));
481         pmd_populate_kernel(&init_mm, pmd, bm_pte);
482
483         /*
484          * The boot-ioremap range spans multiple pmds, for which
485          * we are not prepared:
486          */
487         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
488                 WARN_ON(1);
489                 printk(KERN_WARNING "pmd %p != %p\n",
490                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
491                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
492                         fix_to_virt(FIX_BTMAP_BEGIN));
493                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
494                         fix_to_virt(FIX_BTMAP_END));
495
496                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
497                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
498                        FIX_BTMAP_BEGIN);
499         }
500 }
501
502 void __init early_ioremap_clear(void)
503 {
504         pmd_t *pmd;
505
506         if (early_ioremap_debug)
507                 printk(KERN_INFO "early_ioremap_clear()\n");
508
509         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
510         pmd_clear(pmd);
511         paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
512         __flush_tlb_all();
513 }
514
515 void __init early_ioremap_reset(void)
516 {
517         enum fixed_addresses idx;
518         unsigned long addr, phys;
519         pte_t *pte;
520
521         after_paging_init = 1;
522         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
523                 addr = fix_to_virt(idx);
524                 pte = early_ioremap_pte(addr);
525                 if (pte_present(*pte)) {
526                         phys = pte_val(*pte) & PAGE_MASK;
527                         set_fixmap(idx, phys);
528                 }
529         }
530 }
531
532 static void __init __early_set_fixmap(enum fixed_addresses idx,
533                                    unsigned long phys, pgprot_t flags)
534 {
535         unsigned long addr = __fix_to_virt(idx);
536         pte_t *pte;
537
538         if (idx >= __end_of_fixed_addresses) {
539                 BUG();
540                 return;
541         }
542         pte = early_ioremap_pte(addr);
543
544         if (pgprot_val(flags))
545                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
546         else
547                 pte_clear(&init_mm, addr, pte);
548         __flush_tlb_one(addr);
549 }
550
551 static inline void __init early_set_fixmap(enum fixed_addresses idx,
552                                         unsigned long phys)
553 {
554         if (after_paging_init)
555                 set_fixmap(idx, phys);
556         else
557                 __early_set_fixmap(idx, phys, PAGE_KERNEL);
558 }
559
560 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
561 {
562         if (after_paging_init)
563                 clear_fixmap(idx);
564         else
565                 __early_set_fixmap(idx, 0, __pgprot(0));
566 }
567
568
569 static int __initdata early_ioremap_nested;
570
571 static int __init check_early_ioremap_leak(void)
572 {
573         if (!early_ioremap_nested)
574                 return 0;
575         WARN(1, KERN_WARNING
576                "Debug warning: early ioremap leak of %d areas detected.\n",
577                 early_ioremap_nested);
578         printk(KERN_WARNING
579                 "please boot with early_ioremap_debug and report the dmesg.\n");
580
581         return 1;
582 }
583 late_initcall(check_early_ioremap_leak);
584
585 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
586 {
587         unsigned long offset, last_addr;
588         unsigned int nrpages, nesting;
589         enum fixed_addresses idx0, idx;
590
591         WARN_ON(system_state != SYSTEM_BOOTING);
592
593         nesting = early_ioremap_nested;
594         if (early_ioremap_debug) {
595                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
596                        phys_addr, size, nesting);
597                 dump_stack();
598         }
599
600         /* Don't allow wraparound or zero size */
601         last_addr = phys_addr + size - 1;
602         if (!size || last_addr < phys_addr) {
603                 WARN_ON(1);
604                 return NULL;
605         }
606
607         if (nesting >= FIX_BTMAPS_NESTING) {
608                 WARN_ON(1);
609                 return NULL;
610         }
611         early_ioremap_nested++;
612         /*
613          * Mappings have to be page-aligned
614          */
615         offset = phys_addr & ~PAGE_MASK;
616         phys_addr &= PAGE_MASK;
617         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
618
619         /*
620          * Mappings have to fit in the FIX_BTMAP area.
621          */
622         nrpages = size >> PAGE_SHIFT;
623         if (nrpages > NR_FIX_BTMAPS) {
624                 WARN_ON(1);
625                 return NULL;
626         }
627
628         /*
629          * Ok, go for it..
630          */
631         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
632         idx = idx0;
633         while (nrpages > 0) {
634                 early_set_fixmap(idx, phys_addr);
635                 phys_addr += PAGE_SIZE;
636                 --idx;
637                 --nrpages;
638         }
639         if (early_ioremap_debug)
640                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
641
642         return (void *) (offset + fix_to_virt(idx0));
643 }
644
645 void __init early_iounmap(void *addr, unsigned long size)
646 {
647         unsigned long virt_addr;
648         unsigned long offset;
649         unsigned int nrpages;
650         enum fixed_addresses idx;
651         int nesting;
652
653         nesting = --early_ioremap_nested;
654         if (WARN_ON(nesting < 0))
655                 return;
656
657         if (early_ioremap_debug) {
658                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
659                        size, nesting);
660                 dump_stack();
661         }
662
663         virt_addr = (unsigned long)addr;
664         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
665                 WARN_ON(1);
666                 return;
667         }
668         offset = virt_addr & ~PAGE_MASK;
669         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
670
671         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
672         while (nrpages > 0) {
673                 early_clear_fixmap(idx);
674                 --idx;
675                 --nrpages;
676         }
677 }
678
679 void __this_fixmap_does_not_exist(void)
680 {
681         WARN_ON(1);
682 }