3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
30 #include <linux/sched.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/radix-tree.h>
37 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
38 extern void paging_init(void);
39 extern void vmem_map_init(void);
42 * The S390 doesn't have any external MMU info: the kernel page
43 * tables contain all the necessary information.
45 #define update_mmu_cache(vma, address, ptep) do { } while (0)
46 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
49 * ZERO_PAGE is a global shared page that is always zero; used
50 * for zero-mapped memory areas etc..
53 extern unsigned long empty_zero_page;
54 extern unsigned long zero_page_mask;
56 #define ZERO_PAGE(vaddr) \
57 (virt_to_page((void *)(empty_zero_page + \
58 (((unsigned long)(vaddr)) &zero_page_mask))))
59 #define __HAVE_COLOR_ZERO_PAGE
61 /* TODO: s390 cannot support io_remap_pfn_range... */
62 #endif /* !__ASSEMBLY__ */
65 * PMD_SHIFT determines the size of the area a second-level page
67 * PGDIR_SHIFT determines what a third-level page table entry can map
72 # define PGDIR_SHIFT 20
73 #else /* CONFIG_64BIT */
76 # define PGDIR_SHIFT 42
77 #endif /* CONFIG_64BIT */
79 #define PMD_SIZE (1UL << PMD_SHIFT)
80 #define PMD_MASK (~(PMD_SIZE-1))
81 #define PUD_SIZE (1UL << PUD_SHIFT)
82 #define PUD_MASK (~(PUD_SIZE-1))
83 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
84 #define PGDIR_MASK (~(PGDIR_SIZE-1))
87 * entries per page directory level: the S390 is two-level, so
88 * we don't really have any PMD directory physically.
89 * for S390 segment-table entries are combined to one PGD
90 * that leads to 1024 pte per pgd
92 #define PTRS_PER_PTE 256
94 #define PTRS_PER_PMD 1
95 #define PTRS_PER_PUD 1
96 #else /* CONFIG_64BIT */
97 #define PTRS_PER_PMD 2048
98 #define PTRS_PER_PUD 2048
99 #endif /* CONFIG_64BIT */
100 #define PTRS_PER_PGD 2048
102 #define FIRST_USER_ADDRESS 0
104 #define pte_ERROR(e) \
105 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
106 #define pmd_ERROR(e) \
107 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
108 #define pud_ERROR(e) \
109 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
110 #define pgd_ERROR(e) \
111 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
115 * The vmalloc and module area will always be on the topmost area of the kernel
116 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
117 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
118 * modules will reside. That makes sure that inter module branches always
119 * happen without trampolines and in addition the placement within a 2GB frame
120 * is branch prediction unit friendly.
122 extern unsigned long VMALLOC_START;
123 extern unsigned long VMALLOC_END;
124 extern struct page *vmemmap;
126 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
129 extern unsigned long MODULES_VADDR;
130 extern unsigned long MODULES_END;
131 #define MODULES_VADDR MODULES_VADDR
132 #define MODULES_END MODULES_END
133 #define MODULES_LEN (1UL << 31)
136 static inline int is_module_addr(void *addr)
139 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
140 if (addr < (void *)MODULES_VADDR)
142 if (addr > (void *)MODULES_END)
149 * A 31 bit pagetable entry of S390 has following format:
152 * 00000000001111111111222222222233
153 * 01234567890123456789012345678901
155 * I Page-Invalid Bit: Page is not available for address-translation
156 * P Page-Protection Bit: Store access not possible for page
158 * A 31 bit segmenttable entry of S390 has following format:
159 * | P-table origin | |PTL
161 * 00000000001111111111222222222233
162 * 01234567890123456789012345678901
164 * I Segment-Invalid Bit: Segment is not available for address-translation
165 * C Common-Segment Bit: Segment is not private (PoP 3-30)
166 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
168 * The 31 bit segmenttable origin of S390 has following format:
170 * |S-table origin | | STL |
172 * 00000000001111111111222222222233
173 * 01234567890123456789012345678901
175 * X Space-Switch event:
176 * G Segment-Invalid Bit: *
177 * P Private-Space Bit: Segment is not private (PoP 3-30)
178 * S Storage-Alteration:
179 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
181 * A 64 bit pagetable entry of S390 has following format:
183 * 0000000000111111111122222222223333333333444444444455555555556666
184 * 0123456789012345678901234567890123456789012345678901234567890123
186 * I Page-Invalid Bit: Page is not available for address-translation
187 * P Page-Protection Bit: Store access not possible for page
188 * C Change-bit override: HW is not required to set change bit
190 * A 64 bit segmenttable entry of S390 has following format:
191 * | P-table origin | TT
192 * 0000000000111111111122222222223333333333444444444455555555556666
193 * 0123456789012345678901234567890123456789012345678901234567890123
195 * I Segment-Invalid Bit: Segment is not available for address-translation
196 * C Common-Segment Bit: Segment is not private (PoP 3-30)
197 * P Page-Protection Bit: Store access not possible for page
200 * A 64 bit region table entry of S390 has following format:
201 * | S-table origin | TF TTTL
202 * 0000000000111111111122222222223333333333444444444455555555556666
203 * 0123456789012345678901234567890123456789012345678901234567890123
205 * I Segment-Invalid Bit: Segment is not available for address-translation
210 * The 64 bit regiontable origin of S390 has following format:
211 * | region table origon | DTTL
212 * 0000000000111111111122222222223333333333444444444455555555556666
213 * 0123456789012345678901234567890123456789012345678901234567890123
215 * X Space-Switch event:
216 * G Segment-Invalid Bit:
217 * P Private-Space Bit:
218 * S Storage-Alteration:
222 * A storage key has the following format:
226 * F : fetch protection bit
231 /* Hardware bits in the page table entry */
232 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
233 #define _PAGE_INVALID 0x400 /* HW invalid bit */
234 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
236 /* Software bits in the page table entry */
237 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
238 #define _PAGE_TYPE 0x002 /* SW pte type bit */
239 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
240 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
241 #define _PAGE_READ 0x010 /* SW pte read bit */
242 #define _PAGE_WRITE 0x020 /* SW pte write bit */
243 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
244 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
245 #define __HAVE_ARCH_PTE_SPECIAL
247 /* Set of bits not changed in pte_modify */
248 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
252 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
253 * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
254 * is used to distinguish present from not-present ptes. It is changed only
255 * with the page table lock held.
257 * The following table gives the different possible bit combinations for
258 * the pte hardware and software bits in the last 12 bits of a pte:
267 * prot-none, clean, old .11...000001
268 * prot-none, clean, young .11...000101
269 * prot-none, dirty, old .10...001001
270 * prot-none, dirty, young .10...001101
271 * read-only, clean, old .11...010001
272 * read-only, clean, young .01...010101
273 * read-only, dirty, old .11...011001
274 * read-only, dirty, young .01...011101
275 * read-write, clean, old .11...110001
276 * read-write, clean, young .01...110101
277 * read-write, dirty, old .10...111001
278 * read-write, dirty, young .00...111101
280 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
281 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
282 * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
283 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
288 /* Bits in the segment table address-space-control-element */
289 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
290 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
291 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
292 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
293 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
295 /* Bits in the segment table entry */
296 #define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
297 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
298 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
299 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
300 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
301 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
303 #define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
304 #define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
305 #define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
306 #define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
307 #define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
308 #define _SEGMENT_ENTRY_BITS_LARGE 0
309 #define _SEGMENT_ENTRY_ORIGIN_LARGE 0
311 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
312 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
315 * Segment table entry encoding (I = invalid, R = read-only bit):
317 * prot-none ..1...1.....
318 * read-only ..1...0.....
319 * read-write ..0...0.....
323 /* Page status table bits for virtualization */
324 #define PGSTE_ACC_BITS 0xf0000000UL
325 #define PGSTE_FP_BIT 0x08000000UL
326 #define PGSTE_PCL_BIT 0x00800000UL
327 #define PGSTE_HR_BIT 0x00400000UL
328 #define PGSTE_HC_BIT 0x00200000UL
329 #define PGSTE_GR_BIT 0x00040000UL
330 #define PGSTE_GC_BIT 0x00020000UL
331 #define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */
332 #define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */
334 #else /* CONFIG_64BIT */
336 /* Bits in the segment/region table address-space-control-element */
337 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
338 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
339 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
340 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
341 #define _ASCE_REAL_SPACE 0x20 /* real space control */
342 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
343 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
344 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
345 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
346 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
347 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
349 /* Bits in the region table entry */
350 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
351 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
352 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
353 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
354 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
355 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
356 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
357 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
359 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
360 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
361 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
362 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
363 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
364 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
366 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
367 #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
369 /* Bits in the segment table entry */
370 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
371 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
372 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
373 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
374 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
375 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
377 #define _SEGMENT_ENTRY (0)
378 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
380 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
381 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
382 #define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
383 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
384 #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
385 #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
388 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
390 * prot-none, clean, old 00..1...1...00
391 * prot-none, clean, young 01..1...1...00
392 * prot-none, dirty, old 10..1...1...00
393 * prot-none, dirty, young 11..1...1...00
394 * read-only, clean, old 00..1...1...01
395 * read-only, clean, young 01..1...0...01
396 * read-only, dirty, old 10..1...1...01
397 * read-only, dirty, young 11..1...0...01
398 * read-write, clean, old 00..1...1...11
399 * read-write, clean, young 01..1...0...11
400 * read-write, dirty, old 10..0...1...11
401 * read-write, dirty, young 11..0...0...11
402 * The segment table origin is used to distinguish empty (origin==0) from
403 * read-write, old segment table entries (origin!=0)
406 #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
408 /* Page status table bits for virtualization */
409 #define PGSTE_ACC_BITS 0xf000000000000000UL
410 #define PGSTE_FP_BIT 0x0800000000000000UL
411 #define PGSTE_PCL_BIT 0x0080000000000000UL
412 #define PGSTE_HR_BIT 0x0040000000000000UL
413 #define PGSTE_HC_BIT 0x0020000000000000UL
414 #define PGSTE_GR_BIT 0x0004000000000000UL
415 #define PGSTE_GC_BIT 0x0002000000000000UL
416 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
417 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
419 #endif /* CONFIG_64BIT */
421 /* Guest Page State used for virtualization */
422 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
423 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
424 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
425 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
428 * A user page table pointer has the space-switch-event bit, the
429 * private-space-control bit and the storage-alteration-event-control
430 * bit set. A kernel page table pointer doesn't need them.
432 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
436 * Page protection definitions.
438 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
439 #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
440 _PAGE_INVALID | _PAGE_PROTECT)
441 #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
442 _PAGE_INVALID | _PAGE_PROTECT)
444 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
445 _PAGE_YOUNG | _PAGE_DIRTY)
446 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
447 _PAGE_YOUNG | _PAGE_DIRTY)
448 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
452 * On s390 the page table entry has an invalid bit and a read-only bit.
453 * Read permission implies execute permission and write permission
454 * implies read permission.
457 #define __P000 PAGE_NONE
458 #define __P001 PAGE_READ
459 #define __P010 PAGE_READ
460 #define __P011 PAGE_READ
461 #define __P100 PAGE_READ
462 #define __P101 PAGE_READ
463 #define __P110 PAGE_READ
464 #define __P111 PAGE_READ
466 #define __S000 PAGE_NONE
467 #define __S001 PAGE_READ
468 #define __S010 PAGE_WRITE
469 #define __S011 PAGE_WRITE
470 #define __S100 PAGE_READ
471 #define __S101 PAGE_READ
472 #define __S110 PAGE_WRITE
473 #define __S111 PAGE_WRITE
476 * Segment entry (large page) protection definitions.
478 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
479 _SEGMENT_ENTRY_PROTECT)
480 #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
482 #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
483 _SEGMENT_ENTRY_WRITE)
485 static inline int mm_has_pgste(struct mm_struct *mm)
488 if (unlikely(mm->context.has_pgste))
495 * In the case that a guest uses storage keys
496 * faults should no longer be backed by zero pages
498 #define mm_forbids_zeropage mm_use_skey
499 static inline int mm_use_skey(struct mm_struct *mm)
502 if (mm->context.use_skey)
509 * pgd/pmd/pte query functions
513 static inline int pgd_present(pgd_t pgd) { return 1; }
514 static inline int pgd_none(pgd_t pgd) { return 0; }
515 static inline int pgd_bad(pgd_t pgd) { return 0; }
517 static inline int pud_present(pud_t pud) { return 1; }
518 static inline int pud_none(pud_t pud) { return 0; }
519 static inline int pud_large(pud_t pud) { return 0; }
520 static inline int pud_bad(pud_t pud) { return 0; }
522 #else /* CONFIG_64BIT */
524 static inline int pgd_present(pgd_t pgd)
526 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
528 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
531 static inline int pgd_none(pgd_t pgd)
533 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
535 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
538 static inline int pgd_bad(pgd_t pgd)
541 * With dynamic page table levels the pgd can be a region table
542 * entry or a segment table entry. Check for the bit that are
543 * invalid for either table entry.
546 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
547 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
548 return (pgd_val(pgd) & mask) != 0;
551 static inline int pud_present(pud_t pud)
553 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
555 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
558 static inline int pud_none(pud_t pud)
560 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
562 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
565 static inline int pud_large(pud_t pud)
567 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
569 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
572 static inline int pud_bad(pud_t pud)
575 * With dynamic page table levels the pud can be a region table
576 * entry or a segment table entry. Check for the bit that are
577 * invalid for either table entry.
580 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
581 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
582 return (pud_val(pud) & mask) != 0;
585 #endif /* CONFIG_64BIT */
587 static inline int pmd_present(pmd_t pmd)
589 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
592 static inline int pmd_none(pmd_t pmd)
594 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
597 static inline int pmd_large(pmd_t pmd)
599 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
602 static inline int pmd_pfn(pmd_t pmd)
604 unsigned long origin_mask;
606 origin_mask = _SEGMENT_ENTRY_ORIGIN;
608 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
609 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
612 static inline int pmd_bad(pmd_t pmd)
615 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
616 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
619 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
620 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
621 unsigned long addr, pmd_t *pmdp);
623 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
624 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
625 unsigned long address, pmd_t *pmdp,
626 pmd_t entry, int dirty);
628 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
629 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
630 unsigned long address, pmd_t *pmdp);
632 #define __HAVE_ARCH_PMD_WRITE
633 static inline int pmd_write(pmd_t pmd)
635 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
638 static inline int pmd_dirty(pmd_t pmd)
642 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
646 static inline int pmd_young(pmd_t pmd)
650 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
654 static inline int pte_present(pte_t pte)
656 /* Bit pattern: (pte & 0x001) == 0x001 */
657 return (pte_val(pte) & _PAGE_PRESENT) != 0;
660 static inline int pte_none(pte_t pte)
662 /* Bit pattern: pte == 0x400 */
663 return pte_val(pte) == _PAGE_INVALID;
666 static inline int pte_swap(pte_t pte)
668 /* Bit pattern: (pte & 0x603) == 0x402 */
669 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
670 _PAGE_TYPE | _PAGE_PRESENT))
671 == (_PAGE_INVALID | _PAGE_TYPE);
674 static inline int pte_file(pte_t pte)
676 /* Bit pattern: (pte & 0x601) == 0x600 */
677 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
678 == (_PAGE_INVALID | _PAGE_PROTECT);
681 static inline int pte_special(pte_t pte)
683 return (pte_val(pte) & _PAGE_SPECIAL);
686 #define __HAVE_ARCH_PTE_SAME
687 static inline int pte_same(pte_t a, pte_t b)
689 return pte_val(a) == pte_val(b);
692 static inline pgste_t pgste_get_lock(pte_t *ptep)
694 unsigned long new = 0;
702 " nihh %0,0xff7f\n" /* clear PCL bit in old */
703 " oihh %1,0x0080\n" /* set PCL bit in new */
706 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
707 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
712 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
716 " nihh %1,0xff7f\n" /* clear PCL bit */
718 : "=Q" (ptep[PTRS_PER_PTE])
719 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
725 static inline pgste_t pgste_get(pte_t *ptep)
727 unsigned long pgste = 0;
729 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
731 return __pgste(pgste);
734 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
737 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
741 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
742 struct mm_struct *mm)
745 unsigned long address, bits, skey;
747 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
749 address = pte_val(*ptep) & PAGE_MASK;
750 skey = (unsigned long) page_get_storage_key(address);
751 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
752 /* Transfer page changed & referenced bit to guest bits in pgste */
753 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
754 /* Copy page access key and fetch protection bit to pgste */
755 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
756 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
762 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
763 struct mm_struct *mm)
766 unsigned long address;
769 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
771 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
772 address = pte_val(entry) & PAGE_MASK;
774 * Set page access key and fetch protection bit from pgste.
775 * The guest C/R information is still in the PGSTE, set real
778 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
779 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
780 page_set_storage_key(address, nkey, 0);
784 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
786 if ((pte_val(entry) & _PAGE_PRESENT) &&
787 (pte_val(entry) & _PAGE_WRITE) &&
788 !(pte_val(entry) & _PAGE_INVALID)) {
789 if (!MACHINE_HAS_ESOP) {
791 * Without enhanced suppression-on-protection force
792 * the dirty bit on for all writable ptes.
794 pte_val(entry) |= _PAGE_DIRTY;
795 pte_val(entry) &= ~_PAGE_PROTECT;
797 if (!(pte_val(entry) & _PAGE_PROTECT))
798 /* This pte allows write access, set user-dirty */
799 pgste_val(pgste) |= PGSTE_UC_BIT;
806 * struct gmap_struct - guest address space
807 * @crst_list: list of all crst tables used in the guest address space
808 * @mm: pointer to the parent mm_struct
809 * @guest_to_host: radix tree with guest to host address translation
810 * @host_to_guest: radix tree with pointer to segment table entries
811 * @guest_table_lock: spinlock to protect all entries in the guest page table
812 * @table: pointer to the page directory
813 * @asce: address space control element for gmap page table
814 * @pfault_enabled: defines if pfaults are applicable for the guest
817 struct list_head list;
818 struct list_head crst_list;
819 struct mm_struct *mm;
820 struct radix_tree_root guest_to_host;
821 struct radix_tree_root host_to_guest;
822 spinlock_t guest_table_lock;
823 unsigned long *table;
825 unsigned long asce_end;
831 * struct gmap_notifier - notify function block for page invalidation
832 * @notifier_call: address of callback function
834 struct gmap_notifier {
835 struct list_head list;
836 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
839 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
840 void gmap_free(struct gmap *gmap);
841 void gmap_enable(struct gmap *gmap);
842 void gmap_disable(struct gmap *gmap);
843 int gmap_map_segment(struct gmap *gmap, unsigned long from,
844 unsigned long to, unsigned long len);
845 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
846 unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
847 unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
848 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
849 int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
850 void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
851 void __gmap_zap(struct gmap *, unsigned long gaddr);
852 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
855 void gmap_register_ipte_notifier(struct gmap_notifier *);
856 void gmap_unregister_ipte_notifier(struct gmap_notifier *);
857 int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
858 void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
860 static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
862 pte_t *ptep, pgste_t pgste)
865 if (pgste_val(pgste) & PGSTE_IN_BIT) {
866 pgste_val(pgste) &= ~PGSTE_IN_BIT;
867 gmap_do_ipte_notify(mm, addr, ptep);
874 * Certain architectures need to do special things when PTEs
875 * within a page table are directly modified. Thus, the following
876 * hook is made available.
878 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
879 pte_t *ptep, pte_t entry)
883 if (mm_has_pgste(mm)) {
884 pgste = pgste_get_lock(ptep);
885 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
886 pgste_set_key(ptep, pgste, entry, mm);
887 pgste = pgste_set_pte(ptep, pgste, entry);
888 pgste_set_unlock(ptep, pgste);
895 * query functions pte_write/pte_dirty/pte_young only work if
896 * pte_present() is true. Undefined behaviour if not..
898 static inline int pte_write(pte_t pte)
900 return (pte_val(pte) & _PAGE_WRITE) != 0;
903 static inline int pte_dirty(pte_t pte)
905 return (pte_val(pte) & _PAGE_DIRTY) != 0;
908 static inline int pte_young(pte_t pte)
910 return (pte_val(pte) & _PAGE_YOUNG) != 0;
913 #define __HAVE_ARCH_PTE_UNUSED
914 static inline int pte_unused(pte_t pte)
916 return pte_val(pte) & _PAGE_UNUSED;
920 * pgd/pmd/pte modification functions
923 static inline void pgd_clear(pgd_t *pgd)
926 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
927 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
931 static inline void pud_clear(pud_t *pud)
934 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
935 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
939 static inline void pmd_clear(pmd_t *pmdp)
941 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
944 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
946 pte_val(*ptep) = _PAGE_INVALID;
950 * The following pte modification functions only work if
951 * pte_present() is true. Undefined behaviour if not..
953 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
955 pte_val(pte) &= _PAGE_CHG_MASK;
956 pte_val(pte) |= pgprot_val(newprot);
958 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
959 * invalid bit set, clear it again for readable, young pages
961 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
962 pte_val(pte) &= ~_PAGE_INVALID;
964 * newprot for PAGE_READ and PAGE_WRITE has the page protection
965 * bit set, clear it again for writable, dirty pages
967 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
968 pte_val(pte) &= ~_PAGE_PROTECT;
972 static inline pte_t pte_wrprotect(pte_t pte)
974 pte_val(pte) &= ~_PAGE_WRITE;
975 pte_val(pte) |= _PAGE_PROTECT;
979 static inline pte_t pte_mkwrite(pte_t pte)
981 pte_val(pte) |= _PAGE_WRITE;
982 if (pte_val(pte) & _PAGE_DIRTY)
983 pte_val(pte) &= ~_PAGE_PROTECT;
987 static inline pte_t pte_mkclean(pte_t pte)
989 pte_val(pte) &= ~_PAGE_DIRTY;
990 pte_val(pte) |= _PAGE_PROTECT;
994 static inline pte_t pte_mkdirty(pte_t pte)
996 pte_val(pte) |= _PAGE_DIRTY;
997 if (pte_val(pte) & _PAGE_WRITE)
998 pte_val(pte) &= ~_PAGE_PROTECT;
1002 static inline pte_t pte_mkold(pte_t pte)
1004 pte_val(pte) &= ~_PAGE_YOUNG;
1005 pte_val(pte) |= _PAGE_INVALID;
1009 static inline pte_t pte_mkyoung(pte_t pte)
1011 pte_val(pte) |= _PAGE_YOUNG;
1012 if (pte_val(pte) & _PAGE_READ)
1013 pte_val(pte) &= ~_PAGE_INVALID;
1017 static inline pte_t pte_mkspecial(pte_t pte)
1019 pte_val(pte) |= _PAGE_SPECIAL;
1023 #ifdef CONFIG_HUGETLB_PAGE
1024 static inline pte_t pte_mkhuge(pte_t pte)
1026 pte_val(pte) |= _PAGE_LARGE;
1031 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1033 unsigned long pto = (unsigned long) ptep;
1035 #ifndef CONFIG_64BIT
1036 /* pto in ESA mode must point to the start of the segment table */
1039 /* Invalidation + global TLB flush for the pte */
1042 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1045 static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
1047 unsigned long pto = (unsigned long) ptep;
1049 #ifndef CONFIG_64BIT
1050 /* pto in ESA mode must point to the start of the segment table */
1053 /* Invalidation + local TLB flush for the pte */
1055 " .insn rrf,0xb2210000,%2,%3,0,1"
1056 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1059 static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
1061 unsigned long pto = (unsigned long) ptep;
1063 #ifndef CONFIG_64BIT
1064 /* pto in ESA mode must point to the start of the segment table */
1067 /* Invalidate a range of ptes + global TLB flush of the ptes */
1070 " .insn rrf,0xb2210000,%2,%0,%1,0"
1071 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
1072 } while (nr != 255);
1075 static inline void ptep_flush_direct(struct mm_struct *mm,
1076 unsigned long address, pte_t *ptep)
1080 if (pte_val(*ptep) & _PAGE_INVALID)
1082 active = (mm == current->active_mm) ? 1 : 0;
1083 count = atomic_add_return(0x10000, &mm->context.attach_count);
1084 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1085 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1086 __ptep_ipte_local(address, ptep);
1088 __ptep_ipte(address, ptep);
1089 atomic_sub(0x10000, &mm->context.attach_count);
1092 static inline void ptep_flush_lazy(struct mm_struct *mm,
1093 unsigned long address, pte_t *ptep)
1097 if (pte_val(*ptep) & _PAGE_INVALID)
1099 active = (mm == current->active_mm) ? 1 : 0;
1100 count = atomic_add_return(0x10000, &mm->context.attach_count);
1101 if ((count & 0xffff) <= active) {
1102 pte_val(*ptep) |= _PAGE_INVALID;
1103 mm->context.flush_mm = 1;
1105 __ptep_ipte(address, ptep);
1106 atomic_sub(0x10000, &mm->context.attach_count);
1110 * Get (and clear) the user dirty bit for a pte.
1112 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
1120 if (!mm_has_pgste(mm))
1122 pgste = pgste_get_lock(ptep);
1123 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
1124 pgste_val(pgste) &= ~PGSTE_UC_BIT;
1126 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
1127 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
1128 __ptep_ipte(addr, ptep);
1129 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
1130 pte_val(pte) |= _PAGE_PROTECT;
1132 pte_val(pte) |= _PAGE_INVALID;
1135 pgste_set_unlock(ptep, pgste);
1139 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1140 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1141 unsigned long addr, pte_t *ptep)
1147 if (mm_has_pgste(vma->vm_mm)) {
1148 pgste = pgste_get_lock(ptep);
1149 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
1152 oldpte = pte = *ptep;
1153 ptep_flush_direct(vma->vm_mm, addr, ptep);
1154 young = pte_young(pte);
1155 pte = pte_mkold(pte);
1157 if (mm_has_pgste(vma->vm_mm)) {
1158 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
1159 pgste = pgste_set_pte(ptep, pgste, pte);
1160 pgste_set_unlock(ptep, pgste);
1167 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1168 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1169 unsigned long address, pte_t *ptep)
1171 return ptep_test_and_clear_young(vma, address, ptep);
1175 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1176 * both clear the TLB for the unmapped pte. The reason is that
1177 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1178 * to modify an active pte. The sequence is
1179 * 1) ptep_get_and_clear
1181 * 3) flush_tlb_range
1182 * On s390 the tlb needs to get flushed with the modification of the pte
1183 * if the pte is active. The only way how this can be implemented is to
1184 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1187 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1188 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1189 unsigned long address, pte_t *ptep)
1194 if (mm_has_pgste(mm)) {
1195 pgste = pgste_get_lock(ptep);
1196 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1200 ptep_flush_lazy(mm, address, ptep);
1201 pte_val(*ptep) = _PAGE_INVALID;
1203 if (mm_has_pgste(mm)) {
1204 pgste = pgste_update_all(&pte, pgste, mm);
1205 pgste_set_unlock(ptep, pgste);
1210 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1211 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1212 unsigned long address,
1218 if (mm_has_pgste(mm)) {
1219 pgste = pgste_get_lock(ptep);
1220 pgste_ipte_notify(mm, address, ptep, pgste);
1224 ptep_flush_lazy(mm, address, ptep);
1226 if (mm_has_pgste(mm)) {
1227 pgste = pgste_update_all(&pte, pgste, mm);
1228 pgste_set(ptep, pgste);
1233 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1234 unsigned long address,
1235 pte_t *ptep, pte_t pte)
1239 if (mm_has_pgste(mm)) {
1240 pgste = pgste_get(ptep);
1241 pgste_set_key(ptep, pgste, pte, mm);
1242 pgste = pgste_set_pte(ptep, pgste, pte);
1243 pgste_set_unlock(ptep, pgste);
1248 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1249 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1250 unsigned long address, pte_t *ptep)
1255 if (mm_has_pgste(vma->vm_mm)) {
1256 pgste = pgste_get_lock(ptep);
1257 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1261 ptep_flush_direct(vma->vm_mm, address, ptep);
1262 pte_val(*ptep) = _PAGE_INVALID;
1264 if (mm_has_pgste(vma->vm_mm)) {
1265 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1266 _PGSTE_GPS_USAGE_UNUSED)
1267 pte_val(pte) |= _PAGE_UNUSED;
1268 pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
1269 pgste_set_unlock(ptep, pgste);
1275 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1276 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1277 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1278 * cannot be accessed while the batched unmap is running. In this case
1279 * full==1 and a simple pte_clear is enough. See tlb.h.
1281 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1282 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1283 unsigned long address,
1284 pte_t *ptep, int full)
1289 if (!full && mm_has_pgste(mm)) {
1290 pgste = pgste_get_lock(ptep);
1291 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1296 ptep_flush_lazy(mm, address, ptep);
1297 pte_val(*ptep) = _PAGE_INVALID;
1299 if (!full && mm_has_pgste(mm)) {
1300 pgste = pgste_update_all(&pte, pgste, mm);
1301 pgste_set_unlock(ptep, pgste);
1306 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1307 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1308 unsigned long address, pte_t *ptep)
1313 if (pte_write(pte)) {
1314 if (mm_has_pgste(mm)) {
1315 pgste = pgste_get_lock(ptep);
1316 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1319 ptep_flush_lazy(mm, address, ptep);
1320 pte = pte_wrprotect(pte);
1322 if (mm_has_pgste(mm)) {
1323 pgste = pgste_set_pte(ptep, pgste, pte);
1324 pgste_set_unlock(ptep, pgste);
1331 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1332 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1333 unsigned long address, pte_t *ptep,
1334 pte_t entry, int dirty)
1338 if (pte_same(*ptep, entry))
1340 if (mm_has_pgste(vma->vm_mm)) {
1341 pgste = pgste_get_lock(ptep);
1342 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1345 ptep_flush_direct(vma->vm_mm, address, ptep);
1347 if (mm_has_pgste(vma->vm_mm)) {
1348 pgste_set_key(ptep, pgste, entry, vma->vm_mm);
1349 pgste = pgste_set_pte(ptep, pgste, entry);
1350 pgste_set_unlock(ptep, pgste);
1357 * Conversion functions: convert a page and protection to a page entry,
1358 * and a page entry and page directory to the page they refer to.
1360 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1363 pte_val(__pte) = physpage + pgprot_val(pgprot);
1364 return pte_mkyoung(__pte);
1367 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1369 unsigned long physpage = page_to_phys(page);
1370 pte_t __pte = mk_pte_phys(physpage, pgprot);
1372 if (pte_write(__pte) && PageDirty(page))
1373 __pte = pte_mkdirty(__pte);
1377 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1378 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1379 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1380 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1382 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1383 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1385 #ifndef CONFIG_64BIT
1387 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1388 #define pud_deref(pmd) ({ BUG(); 0UL; })
1389 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1391 #define pud_offset(pgd, address) ((pud_t *) pgd)
1392 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1394 #else /* CONFIG_64BIT */
1396 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1397 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1398 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1400 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1402 pud_t *pud = (pud_t *) pgd;
1403 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1404 pud = (pud_t *) pgd_deref(*pgd);
1405 return pud + pud_index(address);
1408 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1410 pmd_t *pmd = (pmd_t *) pud;
1411 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1412 pmd = (pmd_t *) pud_deref(*pud);
1413 return pmd + pmd_index(address);
1416 #endif /* CONFIG_64BIT */
1418 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1419 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1420 #define pte_page(x) pfn_to_page(pte_pfn(x))
1422 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1424 /* Find an entry in the lowest level page table.. */
1425 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1426 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1427 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1428 #define pte_unmap(pte) do { } while (0)
1430 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1431 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1434 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1435 * Convert to segment table entry format.
1437 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1438 return pgprot_val(SEGMENT_NONE);
1439 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1440 return pgprot_val(SEGMENT_READ);
1441 return pgprot_val(SEGMENT_WRITE);
1444 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1446 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1447 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1451 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1453 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1454 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1456 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1460 static inline pmd_t pmd_mkclean(pmd_t pmd)
1462 if (pmd_large(pmd)) {
1463 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1464 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1469 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1471 if (pmd_large(pmd)) {
1472 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
1473 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1474 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1479 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1481 if (pmd_large(pmd)) {
1482 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1483 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1484 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1489 static inline pmd_t pmd_mkold(pmd_t pmd)
1491 if (pmd_large(pmd)) {
1492 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1493 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1498 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1500 if (pmd_large(pmd)) {
1501 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1502 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1503 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
1504 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1505 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1506 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1507 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1508 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1511 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1512 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1516 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1519 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1523 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1525 static inline void __pmdp_csp(pmd_t *pmdp)
1527 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1528 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1529 _SEGMENT_ENTRY_INVALID;
1530 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1535 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1538 static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1542 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1544 " .insn rrf,0xb98e0000,%2,%3,0,0"
1546 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1550 static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1554 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1556 " .insn rrf,0xb98e0000,%2,%3,0,1"
1558 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1562 static inline void pmdp_flush_direct(struct mm_struct *mm,
1563 unsigned long address, pmd_t *pmdp)
1567 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1569 if (!MACHINE_HAS_IDTE) {
1573 active = (mm == current->active_mm) ? 1 : 0;
1574 count = atomic_add_return(0x10000, &mm->context.attach_count);
1575 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1576 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1577 __pmdp_idte_local(address, pmdp);
1579 __pmdp_idte(address, pmdp);
1580 atomic_sub(0x10000, &mm->context.attach_count);
1583 static inline void pmdp_flush_lazy(struct mm_struct *mm,
1584 unsigned long address, pmd_t *pmdp)
1588 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1590 active = (mm == current->active_mm) ? 1 : 0;
1591 count = atomic_add_return(0x10000, &mm->context.attach_count);
1592 if ((count & 0xffff) <= active) {
1593 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1594 mm->context.flush_mm = 1;
1595 } else if (MACHINE_HAS_IDTE)
1596 __pmdp_idte(address, pmdp);
1599 atomic_sub(0x10000, &mm->context.attach_count);
1602 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1604 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1605 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1608 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1609 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1611 static inline int pmd_trans_splitting(pmd_t pmd)
1613 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
1614 (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
1617 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1618 pmd_t *pmdp, pmd_t entry)
1623 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1625 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1626 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1627 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1631 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1632 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1633 unsigned long address, pmd_t *pmdp)
1638 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1639 *pmdp = pmd_mkold(pmd);
1640 return pmd_young(pmd);
1643 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1644 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1645 unsigned long address, pmd_t *pmdp)
1649 pmdp_flush_direct(mm, address, pmdp);
1654 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1655 static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
1656 unsigned long address,
1657 pmd_t *pmdp, int full)
1662 pmdp_flush_lazy(mm, address, pmdp);
1667 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1668 static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1669 unsigned long address, pmd_t *pmdp)
1671 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1674 #define __HAVE_ARCH_PMDP_INVALIDATE
1675 static inline void pmdp_invalidate(struct vm_area_struct *vma,
1676 unsigned long address, pmd_t *pmdp)
1678 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1681 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1682 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1683 unsigned long address, pmd_t *pmdp)
1687 if (pmd_write(pmd)) {
1688 pmdp_flush_direct(mm, address, pmdp);
1689 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1693 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1694 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1696 static inline int pmd_trans_huge(pmd_t pmd)
1698 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1701 static inline int has_transparent_hugepage(void)
1703 return MACHINE_HAS_HPAGE ? 1 : 0;
1705 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1708 * 31 bit swap entry format:
1709 * A page-table entry has some bits we have to treat in a special way.
1710 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1711 * exception will occur instead of a page translation exception. The
1712 * specifiation exception has the bad habit not to store necessary
1713 * information in the lowcore.
1714 * Bits 21, 22, 30 and 31 are used to indicate the page type.
1715 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1716 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1717 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1718 * plus 24 for the offset.
1719 * 0| offset |0110|o|type |00|
1720 * 0 0000000001111111111 2222 2 22222 33
1721 * 0 1234567890123456789 0123 4 56789 01
1723 * 64 bit swap entry format:
1724 * A page-table entry has some bits we have to treat in a special way.
1725 * Bits 52 and bit 55 have to be zero, otherwise an specification
1726 * exception will occur instead of a page translation exception. The
1727 * specifiation exception has the bad habit not to store necessary
1728 * information in the lowcore.
1729 * Bits 53, 54, 62 and 63 are used to indicate the page type.
1730 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1731 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1732 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1733 * plus 56 for the offset.
1734 * | offset |0110|o|type |00|
1735 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1736 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1738 #ifndef CONFIG_64BIT
1739 #define __SWP_OFFSET_MASK (~0UL >> 12)
1741 #define __SWP_OFFSET_MASK (~0UL >> 11)
1743 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1746 offset &= __SWP_OFFSET_MASK;
1747 pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
1748 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1752 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1753 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1754 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1756 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1757 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1759 #ifndef CONFIG_64BIT
1760 # define PTE_FILE_MAX_BITS 26
1761 #else /* CONFIG_64BIT */
1762 # define PTE_FILE_MAX_BITS 59
1763 #endif /* CONFIG_64BIT */
1765 #define pte_to_pgoff(__pte) \
1766 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1768 #define pgoff_to_pte(__off) \
1769 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1770 | _PAGE_INVALID | _PAGE_PROTECT })
1772 #endif /* !__ASSEMBLY__ */
1774 #define kern_addr_valid(addr) (1)
1776 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1777 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1778 extern int s390_enable_sie(void);
1779 extern int s390_enable_skey(void);
1780 extern void s390_reset_cmma(struct mm_struct *mm);
1783 * No page table caches to initialise
1785 static inline void pgtable_cache_init(void) { }
1786 static inline void check_pgt_cache(void) { }
1788 #include <asm-generic/pgtable.h>
1790 #endif /* _S390_PAGE_H */