x86: Use new cache mode type in asm/pgtable.h
authorJuergen Gross <jgross@suse.com>
Mon, 3 Nov 2014 13:01:53 +0000 (14:01 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Sun, 16 Nov 2014 10:04:25 +0000 (11:04 +0100)
Instead of directly using the cache mode bits in the pte switch to
using the cache mode type. This requires changing some callers of
is_new_memtype_allowed() to be changed as well.

Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stefan.bader@canonical.com
Cc: xen-devel@lists.xensource.com
Cc: konrad.wilk@oracle.com
Cc: ville.syrjala@linux.intel.com
Cc: david.vrabel@citrix.com
Cc: jbeulich@suse.com
Cc: toshi.kani@hp.com
Cc: plagnioj@jcrosoft.com
Cc: tomi.valkeinen@ti.com
Cc: bhelgaas@google.com
Link: http://lkml.kernel.org/r/1415019724-4317-8-git-send-email-jgross@suse.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/include/asm/pgtable.h
arch/x86/mm/ioremap.c
arch/x86/mm/pat.c

index aa97a07..c112ea6 100644 (file)
@@ -9,9 +9,10 @@
 /*
  * Macro to mark a page protection value as UC-
  */
-#define pgprot_noncached(prot)                                 \
-       ((boot_cpu_data.x86 > 3)                                \
-        ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS))  \
+#define pgprot_noncached(prot)                                         \
+       ((boot_cpu_data.x86 > 3)                                        \
+        ? (__pgprot(pgprot_val(prot) |                                 \
+                    cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))     \
         : (prot))
 
 #ifndef __ASSEMBLY__
@@ -404,8 +405,8 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
 
 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
-                                        unsigned long flags,
-                                        unsigned long new_flags)
+                                        enum page_cache_mode pcm,
+                                        enum page_cache_mode new_pcm)
 {
        /*
         * PAT type is always WB for untracked ranges, so no need to check.
@@ -419,10 +420,10 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
         * - request is uncached, return cannot be write-back
         * - request is write-combine, return cannot be write-back
         */
-       if ((flags == _PAGE_CACHE_UC_MINUS &&
-            new_flags == _PAGE_CACHE_WB) ||
-           (flags == _PAGE_CACHE_WC &&
-            new_flags == _PAGE_CACHE_WB)) {
+       if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
+            new_pcm == _PAGE_CACHE_MODE_WB) ||
+           (pcm == _PAGE_CACHE_MODE_WC &&
+            new_pcm == _PAGE_CACHE_MODE_WB)) {
                return 0;
        }
 
index af78e50..3a81eb9 100644 (file)
@@ -142,7 +142,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
 
        if (prot_val != new_prot_val) {
                if (!is_new_memtype_allowed(phys_addr, size,
-                                           prot_val, new_prot_val)) {
+                               pgprot2cachemode(__pgprot(prot_val)),
+                               pgprot2cachemode(__pgprot(new_prot_val)))) {
                        printk(KERN_ERR
                "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
                                (unsigned long long)phys_addr,
index 6574388..47282c2 100644 (file)
@@ -455,7 +455,9 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
        if (ret)
                goto out_err;
 
-       if (!is_new_memtype_allowed(start, size, req_type, new_type))
+       if (!is_new_memtype_allowed(start, size,
+                                   pgprot2cachemode(__pgprot(req_type)),
+                                   pgprot2cachemode(__pgprot(new_type))))
                goto out_free;
 
        if (kernel_map_sync_memtype(start, size, new_type) < 0)
@@ -630,7 +632,9 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
 
        if (flags != want_flags) {
                if (strict_prot ||
-                   !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
+                   !is_new_memtype_allowed(paddr, size,
+                               pgprot2cachemode(__pgprot(want_flags)),
+                               pgprot2cachemode(__pgprot(flags)))) {
                        free_memtype(paddr, paddr + size);
                        printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
                                " for [mem %#010Lx-%#010Lx], got %s\n",