f31507f6f60bcc68b4268a35f5d8b95e46db370c
[cascardo/linux.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #include "physaddr.h"
26
27 /*
28  * Fix up the linear direct mapping of the kernel to avoid cache attribute
29  * conflicts.
30  */
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32                         enum page_cache_mode pcm)
33 {
34         unsigned long nrpages = size >> PAGE_SHIFT;
35         int err;
36
37         switch (pcm) {
38         case _PAGE_CACHE_MODE_UC:
39         default:
40                 err = _set_memory_uc(vaddr, nrpages);
41                 break;
42         case _PAGE_CACHE_MODE_WC:
43                 err = _set_memory_wc(vaddr, nrpages);
44                 break;
45         case _PAGE_CACHE_MODE_WB:
46                 err = _set_memory_wb(vaddr, nrpages);
47                 break;
48         }
49
50         return err;
51 }
52
53 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
54                                void *arg)
55 {
56         unsigned long i;
57
58         for (i = 0; i < nr_pages; ++i)
59                 if (pfn_valid(start_pfn + i) &&
60                     !PageReserved(pfn_to_page(start_pfn + i)))
61                         return 1;
62
63         WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
64
65         return 0;
66 }
67
68 /*
69  * Remap an arbitrary physical address space into the kernel virtual
70  * address space. Needed when the kernel wants to access high addresses
71  * directly.
72  *
73  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74  * have to convert them into an offset in a page-aligned mapping, but the
75  * caller shouldn't need to know that small detail.
76  */
77 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
78                 unsigned long size, enum page_cache_mode pcm, void *caller)
79 {
80         unsigned long offset, vaddr;
81         resource_size_t pfn, last_pfn, last_addr;
82         const resource_size_t unaligned_phys_addr = phys_addr;
83         const unsigned long unaligned_size = size;
84         struct vm_struct *area;
85         enum page_cache_mode new_pcm;
86         unsigned long new_prot_val;
87         pgprot_t prot;
88         int retval;
89         void __iomem *ret_addr;
90         int ram_region;
91
92         /* Don't allow wraparound or zero size */
93         last_addr = phys_addr + size - 1;
94         if (!size || last_addr < phys_addr)
95                 return NULL;
96
97         if (!phys_addr_valid(phys_addr)) {
98                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
99                        (unsigned long long)phys_addr);
100                 WARN_ON_ONCE(1);
101                 return NULL;
102         }
103
104         /*
105          * Don't remap the low PCI/ISA area, it's always mapped..
106          */
107         if (is_ISA_range(phys_addr, last_addr))
108                 return (__force void __iomem *)phys_to_virt(phys_addr);
109
110         /*
111          * Don't allow anybody to remap normal RAM that we're using..
112          */
113         /* First check if whole region can be identified as RAM or not */
114         ram_region = region_is_ram(phys_addr, size);
115         if (ram_region > 0) {
116                 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
117                                 (unsigned long int)phys_addr,
118                                 (unsigned long int)last_addr);
119                 return NULL;
120         }
121
122         /* If could not be identified(-1), check page by page */
123         if (ram_region < 0) {
124                 pfn      = phys_addr >> PAGE_SHIFT;
125                 last_pfn = last_addr >> PAGE_SHIFT;
126                 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
127                                           __ioremap_check_ram) == 1)
128                         return NULL;
129         }
130         /*
131          * Mappings have to be page-aligned
132          */
133         offset = phys_addr & ~PAGE_MASK;
134         phys_addr &= PHYSICAL_PAGE_MASK;
135         size = PAGE_ALIGN(last_addr+1) - phys_addr;
136
137         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
138                                  cachemode2protval(pcm), &new_prot_val);
139         if (retval) {
140                 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
141                 return NULL;
142         }
143
144         new_pcm = pgprot2cachemode(__pgprot(new_prot_val));
145
146         if (pcm != new_pcm) {
147                 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
148                         printk(KERN_ERR
149                 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
150                                 (unsigned long long)phys_addr,
151                                 (unsigned long long)(phys_addr + size),
152                                 pcm, new_pcm);
153                         goto err_free_memtype;
154                 }
155                 pcm = new_pcm;
156         }
157
158         prot = PAGE_KERNEL_IO;
159         switch (pcm) {
160         case _PAGE_CACHE_MODE_UC:
161         default:
162                 prot = __pgprot(pgprot_val(prot) |
163                                 cachemode2protval(_PAGE_CACHE_MODE_UC));
164                 break;
165         case _PAGE_CACHE_MODE_UC_MINUS:
166                 prot = __pgprot(pgprot_val(prot) |
167                                 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
168                 break;
169         case _PAGE_CACHE_MODE_WC:
170                 prot = __pgprot(pgprot_val(prot) |
171                                 cachemode2protval(_PAGE_CACHE_MODE_WC));
172                 break;
173         case _PAGE_CACHE_MODE_WB:
174                 break;
175         }
176
177         /*
178          * Ok, go for it..
179          */
180         area = get_vm_area_caller(size, VM_IOREMAP, caller);
181         if (!area)
182                 goto err_free_memtype;
183         area->phys_addr = phys_addr;
184         vaddr = (unsigned long) area->addr;
185
186         if (kernel_map_sync_memtype(phys_addr, size, pcm))
187                 goto err_free_area;
188
189         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
190                 goto err_free_area;
191
192         ret_addr = (void __iomem *) (vaddr + offset);
193         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
194
195         /*
196          * Check if the request spans more than any BAR in the iomem resource
197          * tree.
198          */
199         WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
200                   KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
201
202         return ret_addr;
203 err_free_area:
204         free_vm_area(area);
205 err_free_memtype:
206         free_memtype(phys_addr, phys_addr + size);
207         return NULL;
208 }
209
210 /**
211  * ioremap_nocache     -   map bus memory into CPU space
212  * @phys_addr:    bus address of the memory
213  * @size:      size of the resource to map
214  *
215  * ioremap_nocache performs a platform specific sequence of operations to
216  * make bus memory CPU accessible via the readb/readw/readl/writeb/
217  * writew/writel functions and the other mmio helpers. The returned
218  * address is not guaranteed to be usable directly as a virtual
219  * address.
220  *
221  * This version of ioremap ensures that the memory is marked uncachable
222  * on the CPU as well as honouring existing caching rules from things like
223  * the PCI bus. Note that there are other caches and buffers on many
224  * busses. In particular driver authors should read up on PCI writes
225  *
226  * It's useful if some control registers are in such an area and
227  * write combining or read caching is not desirable:
228  *
229  * Must be freed with iounmap.
230  */
231 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
232 {
233         /*
234          * Ideally, this should be:
235          *      pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
236          *
237          * Till we fix all X drivers to use ioremap_wc(), we will use
238          * UC MINUS.
239          */
240         enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
241
242         return __ioremap_caller(phys_addr, size, pcm,
243                                 __builtin_return_address(0));
244 }
245 EXPORT_SYMBOL(ioremap_nocache);
246
247 /**
248  * ioremap_wc   -       map memory into CPU space write combined
249  * @phys_addr:  bus address of the memory
250  * @size:       size of the resource to map
251  *
252  * This version of ioremap ensures that the memory is marked write combining.
253  * Write combining allows faster writes to some hardware devices.
254  *
255  * Must be freed with iounmap.
256  */
257 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
258 {
259         if (pat_enabled)
260                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
261                                         __builtin_return_address(0));
262         else
263                 return ioremap_nocache(phys_addr, size);
264 }
265 EXPORT_SYMBOL(ioremap_wc);
266
267 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
268 {
269         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
270                                 __builtin_return_address(0));
271 }
272 EXPORT_SYMBOL(ioremap_cache);
273
274 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
275                                 unsigned long prot_val)
276 {
277         return __ioremap_caller(phys_addr, size,
278                                 pgprot2cachemode(__pgprot(prot_val)),
279                                 __builtin_return_address(0));
280 }
281 EXPORT_SYMBOL(ioremap_prot);
282
283 /**
284  * iounmap - Free a IO remapping
285  * @addr: virtual address from ioremap_*
286  *
287  * Caller must ensure there is only one unmapping for the same pointer.
288  */
289 void iounmap(volatile void __iomem *addr)
290 {
291         struct vm_struct *p, *o;
292
293         if ((void __force *)addr <= high_memory)
294                 return;
295
296         /*
297          * __ioremap special-cases the PCI/ISA range by not instantiating a
298          * vm_area and by simply returning an address into the kernel mapping
299          * of ISA space.   So handle that here.
300          */
301         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
302             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
303                 return;
304
305         addr = (volatile void __iomem *)
306                 (PAGE_MASK & (unsigned long __force)addr);
307
308         mmiotrace_iounmap(addr);
309
310         /* Use the vm area unlocked, assuming the caller
311            ensures there isn't another iounmap for the same address
312            in parallel. Reuse of the virtual address is prevented by
313            leaving it in the global lists until we're done with it.
314            cpa takes care of the direct mappings. */
315         p = find_vm_area((void __force *)addr);
316
317         if (!p) {
318                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
319                 dump_stack();
320                 return;
321         }
322
323         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
324
325         /* Finally remove it */
326         o = remove_vm_area((void __force *)addr);
327         BUG_ON(p != o || o == NULL);
328         kfree(p);
329 }
330 EXPORT_SYMBOL(iounmap);
331
332 /*
333  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
334  * access
335  */
336 void *xlate_dev_mem_ptr(unsigned long phys)
337 {
338         void *addr;
339         unsigned long start = phys & PAGE_MASK;
340
341         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
342         if (page_is_ram(start >> PAGE_SHIFT))
343                 return __va(phys);
344
345         addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
346         if (addr)
347                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
348
349         return addr;
350 }
351
352 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
353 {
354         if (page_is_ram(phys >> PAGE_SHIFT))
355                 return;
356
357         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
358         return;
359 }
360
361 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
362
363 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
364 {
365         /* Don't assume we're using swapper_pg_dir at this point */
366         pgd_t *base = __va(read_cr3());
367         pgd_t *pgd = &base[pgd_index(addr)];
368         pud_t *pud = pud_offset(pgd, addr);
369         pmd_t *pmd = pmd_offset(pud, addr);
370
371         return pmd;
372 }
373
374 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
375 {
376         return &bm_pte[pte_index(addr)];
377 }
378
379 bool __init is_early_ioremap_ptep(pte_t *ptep)
380 {
381         return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
382 }
383
384 void __init early_ioremap_init(void)
385 {
386         pmd_t *pmd;
387
388 #ifdef CONFIG_X86_64
389         BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
390 #else
391         WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
392 #endif
393
394         early_ioremap_setup();
395
396         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
397         memset(bm_pte, 0, sizeof(bm_pte));
398         pmd_populate_kernel(&init_mm, pmd, bm_pte);
399
400         /*
401          * The boot-ioremap range spans multiple pmds, for which
402          * we are not prepared:
403          */
404 #define __FIXADDR_TOP (-PAGE_SIZE)
405         BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
406                      != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
407 #undef __FIXADDR_TOP
408         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
409                 WARN_ON(1);
410                 printk(KERN_WARNING "pmd %p != %p\n",
411                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
412                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
413                         fix_to_virt(FIX_BTMAP_BEGIN));
414                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
415                         fix_to_virt(FIX_BTMAP_END));
416
417                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
418                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
419                        FIX_BTMAP_BEGIN);
420         }
421 }
422
423 void __init __early_set_fixmap(enum fixed_addresses idx,
424                                phys_addr_t phys, pgprot_t flags)
425 {
426         unsigned long addr = __fix_to_virt(idx);
427         pte_t *pte;
428
429         if (idx >= __end_of_fixed_addresses) {
430                 BUG();
431                 return;
432         }
433         pte = early_ioremap_pte(addr);
434
435         if (pgprot_val(flags))
436                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
437         else
438                 pte_clear(&init_mm, addr, pte);
439         __flush_tlb_one(addr);
440 }