2 * linux/arch/parisc/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2004 Randolph Chung (tausq@debian.org)
9 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
14 #include <linux/module.h>
16 #include <linux/bootmem.h>
17 #include <linux/memblock.h>
18 #include <linux/gfp.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
22 #include <linux/initrd.h>
23 #include <linux/swap.h>
24 #include <linux/unistd.h>
25 #include <linux/nodemask.h> /* for node_online_map */
26 #include <linux/pagemap.h> /* for release_pages */
27 #include <linux/compat.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
32 #include <asm/pdc_chassis.h>
33 #include <asm/mmzone.h>
34 #include <asm/sections.h>
35 #include <asm/msgbuf.h>
37 extern int data_start;
38 extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
40 #if CONFIG_PGTABLE_LEVELS == 3
41 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
42 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
43 * guarantee that global objects will be laid out in memory in the same order
44 * as the order of declaration, so put these in different sections and use
45 * the linker script to order them. */
46 pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
49 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
50 pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
52 #ifdef CONFIG_DISCONTIGMEM
53 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
54 signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
57 static struct resource data_resource = {
58 .name = "Kernel data",
59 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
62 static struct resource code_resource = {
63 .name = "Kernel code",
64 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
67 static struct resource pdcdata_resource = {
68 .name = "PDC data (Page Zero)",
71 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
74 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
76 /* The following array is initialized from the firmware specific
77 * information retrieved in kernel/inventory.c.
80 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
81 int npmem_ranges __read_mostly;
84 * get_memblock() allocates pages via memblock.
85 * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
86 * doesn't allocate from bottom to top which is needed because we only created
87 * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
89 static void * __init get_memblock(unsigned long size)
91 static phys_addr_t search_addr __initdata;
95 search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
96 search_addr = ALIGN(search_addr, size);
97 while (!memblock_is_region_memory(search_addr, size) ||
98 memblock_is_region_reserved(search_addr, size)) {
104 memblock_reserve(phys, size);
106 panic("get_memblock() failed.\n");
112 #define MAX_MEM (~0UL)
113 #else /* !CONFIG_64BIT */
114 #define MAX_MEM (3584U*1024U*1024U)
115 #endif /* !CONFIG_64BIT */
117 static unsigned long mem_limit __read_mostly = MAX_MEM;
119 static void __init mem_limit_func(void)
124 /* We need this before __setup() functions are called */
127 for (cp = boot_command_line; *cp; ) {
128 if (memcmp(cp, "mem=", 4) == 0) {
130 limit = memparse(cp, &end);
135 while (*cp != ' ' && *cp)
142 if (limit < mem_limit)
146 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
148 static void __init setup_bootmem(void)
150 unsigned long mem_max;
151 #ifndef CONFIG_DISCONTIGMEM
152 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
155 int i, sysram_resource_count;
157 disable_sr_hashing(); /* Turn off space register hashing */
160 * Sort the ranges. Since the number of ranges is typically
161 * small, and performance is not an issue here, just do
162 * a simple insertion sort.
165 for (i = 1; i < npmem_ranges; i++) {
168 for (j = i; j > 0; j--) {
171 if (pmem_ranges[j-1].start_pfn <
172 pmem_ranges[j].start_pfn) {
176 tmp = pmem_ranges[j-1].start_pfn;
177 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
178 pmem_ranges[j].start_pfn = tmp;
179 tmp = pmem_ranges[j-1].pages;
180 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
181 pmem_ranges[j].pages = tmp;
185 #ifndef CONFIG_DISCONTIGMEM
187 * Throw out ranges that are too far apart (controlled by
191 for (i = 1; i < npmem_ranges; i++) {
192 if (pmem_ranges[i].start_pfn -
193 (pmem_ranges[i-1].start_pfn +
194 pmem_ranges[i-1].pages) > MAX_GAP) {
196 printk("Large gap in memory detected (%ld pages). "
197 "Consider turning on CONFIG_DISCONTIGMEM\n",
198 pmem_ranges[i].start_pfn -
199 (pmem_ranges[i-1].start_pfn +
200 pmem_ranges[i-1].pages));
206 /* Print the memory ranges */
207 pr_info("Memory Ranges:\n");
209 for (i = 0; i < npmem_ranges; i++) {
210 struct resource *res = &sysram_resources[i];
214 size = (pmem_ranges[i].pages << PAGE_SHIFT);
215 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
216 pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
217 i, start, start + (size - 1), size >> 20);
219 /* request memory resource */
220 res->name = "System RAM";
222 res->end = start + size - 1;
223 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
224 request_resource(&iomem_resource, res);
227 sysram_resource_count = npmem_ranges;
230 * For 32 bit kernels we limit the amount of memory we can
231 * support, in order to preserve enough kernel address space
232 * for other purposes. For 64 bit kernels we don't normally
233 * limit the memory, but this mechanism can be used to
234 * artificially limit the amount of memory (and it is written
235 * to work with multiple memory ranges).
238 mem_limit_func(); /* check for "mem=" argument */
241 for (i = 0; i < npmem_ranges; i++) {
244 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
245 if ((mem_max + rsize) > mem_limit) {
246 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
247 if (mem_max == mem_limit)
250 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
251 - (mem_max >> PAGE_SHIFT);
252 npmem_ranges = i + 1;
260 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
262 #ifndef CONFIG_DISCONTIGMEM
263 /* Merge the ranges, keeping track of the holes */
266 unsigned long end_pfn;
267 unsigned long hole_pages;
270 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
271 for (i = 1; i < npmem_ranges; i++) {
273 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
275 pmem_holes[npmem_holes].start_pfn = end_pfn;
276 pmem_holes[npmem_holes++].pages = hole_pages;
277 end_pfn += hole_pages;
279 end_pfn += pmem_ranges[i].pages;
282 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
287 #ifdef CONFIG_DISCONTIGMEM
288 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
289 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
291 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
293 for (i = 0; i < npmem_ranges; i++) {
294 node_set_state(i, N_NORMAL_MEMORY);
300 * Initialize and free the full range of memory in each range.
304 for (i = 0; i < npmem_ranges; i++) {
305 unsigned long start_pfn;
306 unsigned long npages;
310 start_pfn = pmem_ranges[i].start_pfn;
311 npages = pmem_ranges[i].pages;
313 start = start_pfn << PAGE_SHIFT;
314 size = npages << PAGE_SHIFT;
316 /* add system RAM memblock */
317 memblock_add(start, size);
319 if ((start_pfn + npages) > max_pfn)
320 max_pfn = start_pfn + npages;
323 /* IOMMU is always used to access "high mem" on those boxes
324 * that can support enough mem that a PCI device couldn't
325 * directly DMA to any physical addresses.
326 * ISA DMA support will need to revisit this.
328 max_low_pfn = max_pfn;
330 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
332 #define PDC_CONSOLE_IO_IODC_SIZE 32768
334 memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
335 PDC_CONSOLE_IO_IODC_SIZE));
336 memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
337 (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
339 #ifndef CONFIG_DISCONTIGMEM
341 /* reserve the holes */
343 for (i = 0; i < npmem_holes; i++) {
344 memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
345 (pmem_holes[i].pages << PAGE_SHIFT));
349 #ifdef CONFIG_BLK_DEV_INITRD
351 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
352 if (__pa(initrd_start) < mem_max) {
353 unsigned long initrd_reserve;
355 if (__pa(initrd_end) > mem_max) {
356 initrd_reserve = mem_max - __pa(initrd_start);
358 initrd_reserve = initrd_end - initrd_start;
360 initrd_below_start_ok = 1;
361 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
363 memblock_reserve(__pa(initrd_start), initrd_reserve);
368 data_resource.start = virt_to_phys(&data_start);
369 data_resource.end = virt_to_phys(_end) - 1;
370 code_resource.start = virt_to_phys(_text);
371 code_resource.end = virt_to_phys(&data_start)-1;
373 /* We don't know which region the kernel will be in, so try
376 for (i = 0; i < sysram_resource_count; i++) {
377 struct resource *res = &sysram_resources[i];
378 request_resource(res, &code_resource);
379 request_resource(res, &data_resource);
381 request_resource(&sysram_resources[0], &pdcdata_resource);
384 static int __init parisc_text_address(unsigned long vaddr)
386 static unsigned long head_ptr __initdata;
389 head_ptr = PAGE_MASK & (unsigned long)
390 dereference_function_descriptor(&parisc_kernel_start);
392 return core_kernel_text(vaddr) || vaddr == head_ptr;
395 static void __init map_pages(unsigned long start_vaddr,
396 unsigned long start_paddr, unsigned long size,
397 pgprot_t pgprot, int force)
402 unsigned long end_paddr;
403 unsigned long start_pmd;
404 unsigned long start_pte;
407 unsigned long address;
409 unsigned long ro_start;
410 unsigned long ro_end;
411 unsigned long kernel_end;
413 ro_start = __pa((unsigned long)_text);
414 ro_end = __pa((unsigned long)&data_start);
415 kernel_end = __pa((unsigned long)&_end);
417 end_paddr = start_paddr + size;
419 pg_dir = pgd_offset_k(start_vaddr);
421 #if PTRS_PER_PMD == 1
424 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
426 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
428 address = start_paddr;
430 while (address < end_paddr) {
431 #if PTRS_PER_PMD == 1
432 pmd = (pmd_t *)__pa(pg_dir);
434 pmd = (pmd_t *)pgd_address(*pg_dir);
437 * pmd is physical at this point
441 pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
442 pmd = (pmd_t *) __pa(pmd);
445 pgd_populate(NULL, pg_dir, __va(pmd));
449 /* now change pmd to kernel virtual addresses */
451 pmd = (pmd_t *)__va(pmd) + start_pmd;
452 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
455 * pg_table is physical at this point
458 pg_table = (pte_t *)pmd_address(*pmd);
460 pg_table = (pte_t *) get_memblock(PAGE_SIZE);
461 pg_table = (pte_t *) __pa(pg_table);
464 pmd_populate_kernel(NULL, pmd, __va(pg_table));
466 /* now change pg_table to kernel virtual addresses */
468 pg_table = (pte_t *) __va(pg_table) + start_pte;
469 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
473 pte = __mk_pte(address, pgprot);
474 else if (parisc_text_address(vaddr)) {
475 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
476 if (address >= ro_start && address < kernel_end)
477 pte = pte_mkhuge(pte);
480 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
481 if (address >= ro_start && address < ro_end) {
482 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
483 pte = pte_mkhuge(pte);
487 pte = __mk_pte(address, pgprot);
488 if (address >= ro_start && address < kernel_end)
489 pte = pte_mkhuge(pte);
492 if (address >= end_paddr) {
499 set_pte(pg_table, pte);
501 address += PAGE_SIZE;
506 if (address >= end_paddr)
513 void free_initmem(void)
515 unsigned long init_begin = (unsigned long)__init_begin;
516 unsigned long init_end = (unsigned long)__init_end;
518 /* The init text pages are marked R-X. We have to
519 * flush the icache and mark them RW-
521 * This is tricky, because map_pages is in the init section.
522 * Do a dummy remap of the data section first (the data
523 * section is already PAGE_KERNEL) to pull in the TLB entries
525 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
527 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
529 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
532 /* force the kernel to see the new TLB entries */
533 __flush_tlb_range(0, init_begin, init_end);
535 /* finally dump all the instructions which were cached, since the
536 * pages are no-longer executable */
537 flush_icache_range(init_begin, init_end);
539 free_initmem_default(POISON_FREE_INITMEM);
541 /* set up a new led state on systems shipped LED State panel */
542 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
546 #ifdef CONFIG_DEBUG_RODATA
547 void mark_rodata_ro(void)
549 /* rodata memory was already mapped with KERNEL_RO access rights by
550 pagetable_init() and map_pages(). No need to do additional stuff here */
551 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
552 (unsigned long)(__end_rodata - __start_rodata) >> 10);
558 * Just an arbitrary offset to serve as a "hole" between mapping areas
559 * (between top of physical memory and a potential pcxl dma mapping
560 * area, and below the vmalloc mapping area).
562 * The current 32K value just means that there will be a 32K "hole"
563 * between mapping areas. That means that any out-of-bounds memory
564 * accesses will hopefully be caught. The vmalloc() routines leaves
565 * a hole of 4kB between each vmalloced area for the same reason.
568 /* Leave room for gateway page expansion */
569 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
570 #error KERNEL_MAP_START is in gateway reserved region
572 #define MAP_START (KERNEL_MAP_START)
574 #define VM_MAP_OFFSET (32*1024)
575 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
576 & ~(VM_MAP_OFFSET-1)))
578 void *parisc_vmalloc_start __read_mostly;
579 EXPORT_SYMBOL(parisc_vmalloc_start);
582 unsigned long pcxl_dma_start __read_mostly;
585 void __init mem_init(void)
587 /* Do sanity checks on IPC (compat) structures */
588 BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
590 BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
591 BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
592 BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
595 BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
596 BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
597 BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
598 BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
601 /* Do sanity checks on page table constants */
602 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
603 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
604 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
605 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
608 high_memory = __va((max_pfn << PAGE_SHIFT));
609 set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
613 if (hppa_dma_ops == &pcxl_dma_ops) {
614 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
615 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
616 + PCXL_DMA_MAP_SIZE);
619 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
622 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
625 mem_init_print_info(NULL);
626 #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
627 printk("virtual kernel memory layout:\n"
628 " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
629 " memory : 0x%p - 0x%p (%4ld MB)\n"
630 " .init : 0x%p - 0x%p (%4ld kB)\n"
631 " .data : 0x%p - 0x%p (%4ld kB)\n"
632 " .text : 0x%p - 0x%p (%4ld kB)\n",
634 (void*)VMALLOC_START, (void*)VMALLOC_END,
635 (VMALLOC_END - VMALLOC_START) >> 20,
637 __va(0), high_memory,
638 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
640 __init_begin, __init_end,
641 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
644 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
647 ((unsigned long)_etext - (unsigned long)_text) >> 10);
651 unsigned long *empty_zero_page __read_mostly;
652 EXPORT_SYMBOL(empty_zero_page);
654 void show_mem(unsigned int filter)
656 int total = 0,reserved = 0;
659 printk(KERN_INFO "Mem-info:\n");
660 show_free_areas(filter);
662 for_each_online_pgdat(pgdat) {
666 pgdat_resize_lock(pgdat, &flags);
667 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
668 struct zone *zone = &pgdat->node_zones[zoneid];
669 if (!populated_zone(zone))
672 total += zone->present_pages;
673 reserved = zone->present_pages - zone->managed_pages;
675 pgdat_resize_unlock(pgdat, &flags);
678 printk(KERN_INFO "%d pages of RAM\n", total);
679 printk(KERN_INFO "%d reserved pages\n", reserved);
681 #ifdef CONFIG_DISCONTIGMEM
686 for (i = 0; i < npmem_ranges; i++) {
687 zl = node_zonelist(i, 0);
688 for (j = 0; j < MAX_NR_ZONES; j++) {
692 printk("Zone list for zone %d on node %d: ", j, i);
693 for_each_zone_zonelist(zone, z, zl, j)
694 printk("[%d/%s] ", zone_to_nid(zone),
704 * pagetable_init() sets up the page tables
706 * Note that gateway_init() places the Linux gateway page at page 0.
707 * Since gateway pages cannot be dereferenced this has the desirable
708 * side effect of trapping those pesky NULL-reference errors in the
711 static void __init pagetable_init(void)
715 /* Map each physical memory range to its kernel vaddr */
717 for (range = 0; range < npmem_ranges; range++) {
718 unsigned long start_paddr;
719 unsigned long end_paddr;
722 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
723 size = pmem_ranges[range].pages << PAGE_SHIFT;
724 end_paddr = start_paddr + size;
726 map_pages((unsigned long)__va(start_paddr), start_paddr,
727 size, PAGE_KERNEL, 0);
730 #ifdef CONFIG_BLK_DEV_INITRD
731 if (initrd_end && initrd_end > mem_limit) {
732 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
733 map_pages(initrd_start, __pa(initrd_start),
734 initrd_end - initrd_start, PAGE_KERNEL, 0);
738 empty_zero_page = get_memblock(PAGE_SIZE);
741 static void __init gateway_init(void)
743 unsigned long linux_gateway_page_addr;
744 /* FIXME: This is 'const' in order to trick the compiler
745 into not treating it as DP-relative data. */
746 extern void * const linux_gateway_page;
748 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
751 * Setup Linux Gateway page.
753 * The Linux gateway page will reside in kernel space (on virtual
754 * page 0), so it doesn't need to be aliased into user space.
757 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
758 PAGE_SIZE, PAGE_GATEWAY, 1);
761 void __init paging_init(void)
768 flush_cache_all_local(); /* start with known state */
769 flush_tlb_all_local(NULL);
771 for (i = 0; i < npmem_ranges; i++) {
772 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
774 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
776 #ifdef CONFIG_DISCONTIGMEM
777 /* Need to initialize the pfnnid_map before we can initialize
781 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
782 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
789 free_area_init_node(i, zones_size,
790 pmem_ranges[i].start_pfn, NULL);
797 * Currently, all PA20 chips have 18 bit protection IDs, which is the
798 * limiting factor (space ids are 32 bits).
801 #define NR_SPACE_IDS 262144
806 * Currently we have a one-to-one relationship between space IDs and
807 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
808 * support 15 bit protection IDs, so that is the limiting factor.
809 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
810 * probably not worth the effort for a special case here.
813 #define NR_SPACE_IDS 32768
815 #endif /* !CONFIG_PA20 */
817 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
818 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
820 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
821 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
822 static unsigned long space_id_index;
823 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
824 static unsigned long dirty_space_ids = 0;
826 static DEFINE_SPINLOCK(sid_lock);
828 unsigned long alloc_sid(void)
832 spin_lock(&sid_lock);
834 if (free_space_ids == 0) {
835 if (dirty_space_ids != 0) {
836 spin_unlock(&sid_lock);
837 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
838 spin_lock(&sid_lock);
840 BUG_ON(free_space_ids == 0);
845 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
846 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
847 space_id_index = index;
849 spin_unlock(&sid_lock);
851 return index << SPACEID_SHIFT;
854 void free_sid(unsigned long spaceid)
856 unsigned long index = spaceid >> SPACEID_SHIFT;
857 unsigned long *dirty_space_offset;
859 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
860 index &= (BITS_PER_LONG - 1);
862 spin_lock(&sid_lock);
864 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
866 *dirty_space_offset |= (1L << index);
869 spin_unlock(&sid_lock);
874 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
878 /* NOTE: sid_lock must be held upon entry */
880 *ndirtyptr = dirty_space_ids;
881 if (dirty_space_ids != 0) {
882 for (i = 0; i < SID_ARRAY_SIZE; i++) {
883 dirty_array[i] = dirty_space_id[i];
884 dirty_space_id[i] = 0;
892 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
896 /* NOTE: sid_lock must be held upon entry */
899 for (i = 0; i < SID_ARRAY_SIZE; i++) {
900 space_id[i] ^= dirty_array[i];
903 free_space_ids += ndirty;
908 #else /* CONFIG_SMP */
910 static void recycle_sids(void)
914 /* NOTE: sid_lock must be held upon entry */
916 if (dirty_space_ids != 0) {
917 for (i = 0; i < SID_ARRAY_SIZE; i++) {
918 space_id[i] ^= dirty_space_id[i];
919 dirty_space_id[i] = 0;
922 free_space_ids += dirty_space_ids;
930 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
931 * purged, we can safely reuse the space ids that were released but
932 * not flushed from the tlb.
937 static unsigned long recycle_ndirty;
938 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
939 static unsigned int recycle_inuse;
941 void flush_tlb_all(void)
945 __inc_irq_stat(irq_tlb_count);
947 spin_lock(&sid_lock);
948 if (dirty_space_ids > RECYCLE_THRESHOLD) {
949 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
950 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
954 spin_unlock(&sid_lock);
955 on_each_cpu(flush_tlb_all_local, NULL, 1);
957 spin_lock(&sid_lock);
958 recycle_sids(recycle_ndirty,recycle_dirty_array);
960 spin_unlock(&sid_lock);
964 void flush_tlb_all(void)
966 __inc_irq_stat(irq_tlb_count);
967 spin_lock(&sid_lock);
968 flush_tlb_all_local(NULL);
970 spin_unlock(&sid_lock);
974 #ifdef CONFIG_BLK_DEV_INITRD
975 void free_initrd_mem(unsigned long start, unsigned long end)
977 free_reserved_area((void *)start, (void *)end, -1, "initrd");