x86: Use new cache mode type in track_pfn_remap() and track_pfn_insert()
authorJuergen Gross <jgross@suse.com>
Mon, 3 Nov 2014 13:01:55 +0000 (14:01 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Sun, 16 Nov 2014 10:04:25 +0000 (11:04 +0100)
Instead of directly using the cache mode bits in the pte switch to
using the cache mode type. As those are the main callers of
lookup_memtype(), change this as well.

Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stefan.bader@canonical.com
Cc: xen-devel@lists.xensource.com
Cc: konrad.wilk@oracle.com
Cc: ville.syrjala@linux.intel.com
Cc: david.vrabel@citrix.com
Cc: jbeulich@suse.com
Cc: toshi.kani@hp.com
Cc: plagnioj@jcrosoft.com
Cc: tomi.valkeinen@ti.com
Cc: bhelgaas@google.com
Link: http://lkml.kernel.org/r/1415019724-4317-10-git-send-email-jgross@suse.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/mm/pat.c

index 6d5a8e3..2f3744f 100644 (file)
@@ -394,12 +394,12 @@ int free_memtype(u64 start, u64 end)
  *
  * Only to be called when PAT is enabled
  *
- * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
- * _PAGE_CACHE_UC
+ * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
+ * or _PAGE_CACHE_MODE_UC
  */
-static unsigned long lookup_memtype(u64 paddr)
+static enum page_cache_mode lookup_memtype(u64 paddr)
 {
-       int rettype = _PAGE_CACHE_WB;
+       enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
        struct memtype *entry;
 
        if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
@@ -408,13 +408,13 @@ static unsigned long lookup_memtype(u64 paddr)
        if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
                struct page *page;
                page = pfn_to_page(paddr >> PAGE_SHIFT);
-               rettype = get_page_memtype(page);
+               rettype = pgprot2cachemode(__pgprot(get_page_memtype(page)));
                /*
                 * -1 from get_page_memtype() implies RAM page is in its
                 * default state and not reserved, and hence of type WB
                 */
                if (rettype == -1)
-                       rettype = _PAGE_CACHE_WB;
+                       rettype = _PAGE_CACHE_MODE_WB;
 
                return rettype;
        }
@@ -423,9 +423,9 @@ static unsigned long lookup_memtype(u64 paddr)
 
        entry = rbt_memtype_lookup(paddr);
        if (entry != NULL)
-               rettype = entry->type;
+               rettype = pgprot2cachemode(__pgprot(entry->type));
        else
-               rettype = _PAGE_CACHE_UC_MINUS;
+               rettype = _PAGE_CACHE_MODE_UC_MINUS;
 
        spin_unlock(&memtype_lock);
        return rettype;
@@ -613,7 +613,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                if (!pat_enabled)
                        return 0;
 
-               flags = lookup_memtype(paddr);
+               flags = cachemode2protval(lookup_memtype(paddr));
                if (want_flags != flags) {
                        printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
@@ -715,7 +715,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
                    unsigned long pfn, unsigned long addr, unsigned long size)
 {
        resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
-       unsigned long flags;
+       enum page_cache_mode pcm;
 
        /* reserve the whole chunk starting from paddr */
        if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
@@ -734,18 +734,18 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
         * For anything smaller than the vma size we set prot based on the
         * lookup.
         */
-       flags = lookup_memtype(paddr);
+       pcm = lookup_memtype(paddr);
 
        /* Check memtype for the remaining pages */
        while (size > PAGE_SIZE) {
                size -= PAGE_SIZE;
                paddr += PAGE_SIZE;
-               if (flags != lookup_memtype(paddr))
+               if (pcm != lookup_memtype(paddr))
                        return -EINVAL;
        }
 
        *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
-                        flags);
+                        cachemode2protval(pcm));
 
        return 0;
 }
@@ -753,15 +753,15 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
 int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
                     unsigned long pfn)
 {
-       unsigned long flags;
+       enum page_cache_mode pcm;
 
        if (!pat_enabled)
                return 0;
 
        /* Set prot based on lookup */
-       flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
+       pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
        *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
-                        flags);
+                        cachemode2protval(pcm));
 
        return 0;
 }