Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy...
[cascardo/linux.git] / arch / powerpc / mm / mem.c
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
34
35 #include <asm/pgalloc.h>
36 #include <asm/prom.h>
37 #include <asm/io.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
40 #include <asm/mmu.h>
41 #include <asm/smp.h>
42 #include <asm/machdep.h>
43 #include <asm/btext.h>
44 #include <asm/tlb.h>
45 #include <asm/lmb.h>
46 #include <asm/sections.h>
47 #include <asm/vdso.h>
48
49 #include "mmu_decl.h"
50
51 #ifndef CPU_FTR_COHERENT_ICACHE
52 #define CPU_FTR_COHERENT_ICACHE 0       /* XXX for now */
53 #define CPU_FTR_NOEXECUTE       0
54 #endif
55
56 int init_bootmem_done;
57 int mem_init_done;
58 unsigned long memory_limit;
59
60 int page_is_ram(unsigned long pfn)
61 {
62         unsigned long paddr = (pfn << PAGE_SHIFT);
63
64 #ifndef CONFIG_PPC64    /* XXX for now */
65         return paddr < __pa(high_memory);
66 #else
67         int i;
68         for (i=0; i < lmb.memory.cnt; i++) {
69                 unsigned long base;
70
71                 base = lmb.memory.region[i].base;
72
73                 if ((paddr >= base) &&
74                         (paddr < (base + lmb.memory.region[i].size))) {
75                         return 1;
76                 }
77         }
78
79         return 0;
80 #endif
81 }
82
83 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
84                               unsigned long size, pgprot_t vma_prot)
85 {
86         if (ppc_md.phys_mem_access_prot)
87                 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
88
89         if (!page_is_ram(pfn))
90                 vma_prot = __pgprot(pgprot_val(vma_prot)
91                                     | _PAGE_GUARDED | _PAGE_NO_CACHE);
92         return vma_prot;
93 }
94 EXPORT_SYMBOL(phys_mem_access_prot);
95
96 #ifdef CONFIG_MEMORY_HOTPLUG
97
98 void online_page(struct page *page)
99 {
100         ClearPageReserved(page);
101         init_page_count(page);
102         __free_page(page);
103         totalram_pages++;
104         num_physpages++;
105 }
106
107 #ifdef CONFIG_NUMA
108 int memory_add_physaddr_to_nid(u64 start)
109 {
110         return hot_add_scn_to_nid(start);
111 }
112 #endif
113
114 int __devinit arch_add_memory(int nid, u64 start, u64 size)
115 {
116         struct pglist_data *pgdata;
117         struct zone *zone;
118         unsigned long start_pfn = start >> PAGE_SHIFT;
119         unsigned long nr_pages = size >> PAGE_SHIFT;
120
121         pgdata = NODE_DATA(nid);
122
123         start = (unsigned long)__va(start);
124         create_section_mapping(start, start + size);
125
126         /* this should work for most non-highmem platforms */
127         zone = pgdata->node_zones;
128
129         return __add_pages(zone, start_pfn, nr_pages);
130 }
131
132 #endif /* CONFIG_MEMORY_HOTPLUG */
133
134 void show_mem(void)
135 {
136         unsigned long total = 0, reserved = 0;
137         unsigned long shared = 0, cached = 0;
138         unsigned long highmem = 0;
139         struct page *page;
140         pg_data_t *pgdat;
141         unsigned long i;
142
143         printk("Mem-info:\n");
144         show_free_areas();
145         printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
146         for_each_online_pgdat(pgdat) {
147                 unsigned long flags;
148                 pgdat_resize_lock(pgdat, &flags);
149                 for (i = 0; i < pgdat->node_spanned_pages; i++) {
150                         if (!pfn_valid(pgdat->node_start_pfn + i))
151                                 continue;
152                         page = pgdat_page_nr(pgdat, i);
153                         total++;
154                         if (PageHighMem(page))
155                                 highmem++;
156                         if (PageReserved(page))
157                                 reserved++;
158                         else if (PageSwapCache(page))
159                                 cached++;
160                         else if (page_count(page))
161                                 shared += page_count(page) - 1;
162                 }
163                 pgdat_resize_unlock(pgdat, &flags);
164         }
165         printk("%ld pages of RAM\n", total);
166 #ifdef CONFIG_HIGHMEM
167         printk("%ld pages of HIGHMEM\n", highmem);
168 #endif
169         printk("%ld reserved pages\n", reserved);
170         printk("%ld pages shared\n", shared);
171         printk("%ld pages swap cached\n", cached);
172 }
173
174 /*
175  * Initialize the bootmem system and give it all the memory we
176  * have available.  If we are using highmem, we only put the
177  * lowmem into the bootmem system.
178  */
179 #ifndef CONFIG_NEED_MULTIPLE_NODES
180 void __init do_init_bootmem(void)
181 {
182         unsigned long i;
183         unsigned long start, bootmap_pages;
184         unsigned long total_pages;
185         int boot_mapsize;
186
187         max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
188 #ifdef CONFIG_HIGHMEM
189         total_pages = total_lowmem >> PAGE_SHIFT;
190 #endif
191
192         /*
193          * Find an area to use for the bootmem bitmap.  Calculate the size of
194          * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
195          * Add 1 additional page in case the address isn't page-aligned.
196          */
197         bootmap_pages = bootmem_bootmap_pages(total_pages);
198
199         start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
200
201         boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
202
203         /* Add active regions with valid PFNs */
204         for (i = 0; i < lmb.memory.cnt; i++) {
205                 unsigned long start_pfn, end_pfn;
206                 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
207                 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
208                 add_active_range(0, start_pfn, end_pfn);
209         }
210
211         /* Add all physical memory to the bootmem map, mark each area
212          * present.
213          */
214 #ifdef CONFIG_HIGHMEM
215         free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
216
217         /* reserve the sections we're already using */
218         for (i = 0; i < lmb.reserved.cnt; i++) {
219                 unsigned long addr = lmb.reserved.region[i].base +
220                                      lmb_size_bytes(&lmb.reserved, i) - 1;
221                 if (addr < total_lowmem)
222                         reserve_bootmem(lmb.reserved.region[i].base,
223                                         lmb_size_bytes(&lmb.reserved, i));
224                 else if (lmb.reserved.region[i].base < total_lowmem) {
225                         unsigned long adjusted_size = total_lowmem -
226                                       lmb.reserved.region[i].base;
227                         reserve_bootmem(lmb.reserved.region[i].base,
228                                         adjusted_size);
229                 }
230         }
231 #else
232         free_bootmem_with_active_regions(0, max_pfn);
233
234         /* reserve the sections we're already using */
235         for (i = 0; i < lmb.reserved.cnt; i++)
236                 reserve_bootmem(lmb.reserved.region[i].base,
237                                 lmb_size_bytes(&lmb.reserved, i));
238
239 #endif
240         /* XXX need to clip this if using highmem? */
241         sparse_memory_present_with_active_regions(0);
242
243         init_bootmem_done = 1;
244 }
245
246 /* mark pages that don't exist as nosave */
247 static int __init mark_nonram_nosave(void)
248 {
249         unsigned long lmb_next_region_start_pfn,
250                       lmb_region_max_pfn;
251         int i;
252
253         for (i = 0; i < lmb.memory.cnt - 1; i++) {
254                 lmb_region_max_pfn =
255                         (lmb.memory.region[i].base >> PAGE_SHIFT) +
256                         (lmb.memory.region[i].size >> PAGE_SHIFT);
257                 lmb_next_region_start_pfn =
258                         lmb.memory.region[i+1].base >> PAGE_SHIFT;
259
260                 if (lmb_region_max_pfn < lmb_next_region_start_pfn)
261                         register_nosave_region(lmb_region_max_pfn,
262                                                lmb_next_region_start_pfn);
263         }
264
265         return 0;
266 }
267
268 /*
269  * paging_init() sets up the page tables - in fact we've already done this.
270  */
271 void __init paging_init(void)
272 {
273         unsigned long total_ram = lmb_phys_mem_size();
274         unsigned long top_of_ram = lmb_end_of_DRAM();
275         unsigned long max_zone_pfns[MAX_NR_ZONES];
276
277 #ifdef CONFIG_HIGHMEM
278         map_page(PKMAP_BASE, 0, 0);     /* XXX gross */
279         pkmap_page_table = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
280                         (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
281         map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
282         kmap_pte = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
283                         (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN),
284                          KMAP_FIX_BEGIN);
285         kmap_prot = PAGE_KERNEL;
286 #endif /* CONFIG_HIGHMEM */
287
288         printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
289                top_of_ram, total_ram);
290         printk(KERN_DEBUG "Memory hole size: %ldMB\n",
291                (top_of_ram - total_ram) >> 20);
292         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
293 #ifdef CONFIG_HIGHMEM
294         max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
295         max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
296 #else
297         max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
298 #endif
299         free_area_init_nodes(max_zone_pfns);
300
301         mark_nonram_nosave();
302 }
303 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
304
305 void __init mem_init(void)
306 {
307 #ifdef CONFIG_NEED_MULTIPLE_NODES
308         int nid;
309 #endif
310         pg_data_t *pgdat;
311         unsigned long i;
312         struct page *page;
313         unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
314
315         num_physpages = lmb.memory.size >> PAGE_SHIFT;
316         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
317
318 #ifdef CONFIG_NEED_MULTIPLE_NODES
319         for_each_online_node(nid) {
320                 if (NODE_DATA(nid)->node_spanned_pages != 0) {
321                         printk("freeing bootmem node %d\n", nid);
322                         totalram_pages +=
323                                 free_all_bootmem_node(NODE_DATA(nid));
324                 }
325         }
326 #else
327         max_mapnr = max_pfn;
328         totalram_pages += free_all_bootmem();
329 #endif
330         for_each_online_pgdat(pgdat) {
331                 for (i = 0; i < pgdat->node_spanned_pages; i++) {
332                         if (!pfn_valid(pgdat->node_start_pfn + i))
333                                 continue;
334                         page = pgdat_page_nr(pgdat, i);
335                         if (PageReserved(page))
336                                 reservedpages++;
337                 }
338         }
339
340         codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
341         datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
342         initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
343         bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
344
345 #ifdef CONFIG_HIGHMEM
346         {
347                 unsigned long pfn, highmem_mapnr;
348
349                 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
350                 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
351                         struct page *page = pfn_to_page(pfn);
352                         if (lmb_is_reserved(pfn << PAGE_SHIFT))
353                                 continue;
354                         ClearPageReserved(page);
355                         init_page_count(page);
356                         __free_page(page);
357                         totalhigh_pages++;
358                         reservedpages--;
359                 }
360                 totalram_pages += totalhigh_pages;
361                 printk(KERN_DEBUG "High memory: %luk\n",
362                        totalhigh_pages << (PAGE_SHIFT-10));
363         }
364 #endif /* CONFIG_HIGHMEM */
365
366         printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
367                "%luk reserved, %luk data, %luk bss, %luk init)\n",
368                 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
369                 num_physpages << (PAGE_SHIFT-10),
370                 codesize >> 10,
371                 reservedpages << (PAGE_SHIFT-10),
372                 datasize >> 10,
373                 bsssize >> 10,
374                 initsize >> 10);
375
376         mem_init_done = 1;
377 }
378
379 /*
380  * This is called when a page has been modified by the kernel.
381  * It just marks the page as not i-cache clean.  We do the i-cache
382  * flush later when the page is given to a user process, if necessary.
383  */
384 void flush_dcache_page(struct page *page)
385 {
386         if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
387                 return;
388         /* avoid an atomic op if possible */
389         if (test_bit(PG_arch_1, &page->flags))
390                 clear_bit(PG_arch_1, &page->flags);
391 }
392 EXPORT_SYMBOL(flush_dcache_page);
393
394 void flush_dcache_icache_page(struct page *page)
395 {
396 #ifdef CONFIG_BOOKE
397         void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
398         __flush_dcache_icache(start);
399         kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
400 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
401         /* On 8xx there is no need to kmap since highmem is not supported */
402         __flush_dcache_icache(page_address(page)); 
403 #else
404         __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
405 #endif
406
407 }
408 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
409 {
410         clear_page(page);
411
412         /*
413          * We shouldnt have to do this, but some versions of glibc
414          * require it (ld.so assumes zero filled pages are icache clean)
415          * - Anton
416          */
417         flush_dcache_page(pg);
418 }
419 EXPORT_SYMBOL(clear_user_page);
420
421 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
422                     struct page *pg)
423 {
424         copy_page(vto, vfrom);
425
426         /*
427          * We should be able to use the following optimisation, however
428          * there are two problems.
429          * Firstly a bug in some versions of binutils meant PLT sections
430          * were not marked executable.
431          * Secondly the first word in the GOT section is blrl, used
432          * to establish the GOT address. Until recently the GOT was
433          * not marked executable.
434          * - Anton
435          */
436 #if 0
437         if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
438                 return;
439 #endif
440
441         flush_dcache_page(pg);
442 }
443
444 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
445                              unsigned long addr, int len)
446 {
447         unsigned long maddr;
448
449         maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
450         flush_icache_range(maddr, maddr + len);
451         kunmap(page);
452 }
453 EXPORT_SYMBOL(flush_icache_user_range);
454
455 /*
456  * This is called at the end of handling a user page fault, when the
457  * fault has been handled by updating a PTE in the linux page tables.
458  * We use it to preload an HPTE into the hash table corresponding to
459  * the updated linux PTE.
460  * 
461  * This must always be called with the pte lock held.
462  */
463 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
464                       pte_t pte)
465 {
466 #ifdef CONFIG_PPC_STD_MMU
467         unsigned long access = 0, trap;
468 #endif
469         unsigned long pfn = pte_pfn(pte);
470
471         /* handle i-cache coherency */
472         if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
473             !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
474             pfn_valid(pfn)) {
475                 struct page *page = pfn_to_page(pfn);
476 #ifdef CONFIG_8xx
477                 /* On 8xx, cache control instructions (particularly
478                  * "dcbst" from flush_dcache_icache) fault as write
479                  * operation if there is an unpopulated TLB entry
480                  * for the address in question. To workaround that,
481                  * we invalidate the TLB here, thus avoiding dcbst
482                  * misbehaviour.
483                  */
484                 _tlbie(address, 0 /* 8xx doesn't care about PID */);
485 #endif
486                 if (!PageReserved(page)
487                     && !test_bit(PG_arch_1, &page->flags)) {
488                         if (vma->vm_mm == current->active_mm) {
489                                 __flush_dcache_icache((void *) address);
490                         } else
491                                 flush_dcache_icache_page(page);
492                         set_bit(PG_arch_1, &page->flags);
493                 }
494         }
495
496 #ifdef CONFIG_PPC_STD_MMU
497         /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
498         if (!pte_young(pte) || address >= TASK_SIZE)
499                 return;
500
501         /* We try to figure out if we are coming from an instruction
502          * access fault and pass that down to __hash_page so we avoid
503          * double-faulting on execution of fresh text. We have to test
504          * for regs NULL since init will get here first thing at boot
505          *
506          * We also avoid filling the hash if not coming from a fault
507          */
508         if (current->thread.regs == NULL)
509                 return;
510         trap = TRAP(current->thread.regs);
511         if (trap == 0x400)
512                 access |= _PAGE_EXEC;
513         else if (trap != 0x300)
514                 return;
515         hash_preload(vma->vm_mm, address, access, trap);
516 #endif /* CONFIG_PPC_STD_MMU */
517 }