Merge tag 'imx-fixes-4.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo...
[cascardo/linux.git] / kernel / kexec_core.c
index 56b3ed0..5616755 100644 (file)
@@ -95,6 +95,12 @@ int kexec_should_crash(struct task_struct *p)
        return 0;
 }
 
+int kexec_crash_loaded(void)
+{
+       return !!kexec_crash_image;
+}
+EXPORT_SYMBOL_GPL(kexec_crash_loaded);
+
 /*
  * When kexec transitions to the new kernel there is a one-to-one
  * mapping between physical and virtual addresses.  On processors
@@ -140,6 +146,7 @@ int kexec_should_crash(struct task_struct *p)
  * allocating pages whose destination address we do not care about.
  */
 #define KIMAGE_NO_DEST (-1UL)
+#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
 
 static struct page *kimage_alloc_page(struct kimage *image,
                                       gfp_t gfp_mask,
@@ -147,8 +154,9 @@ static struct page *kimage_alloc_page(struct kimage *image,
 
 int sanity_check_segment_list(struct kimage *image)
 {
-       int result, i;
+       int i;
        unsigned long nr_segments = image->nr_segments;
+       unsigned long total_pages = 0;
 
        /*
         * Verify we have good destination addresses.  The caller is
@@ -163,16 +171,17 @@ int sanity_check_segment_list(struct kimage *image)
         * simply because addresses are changed to page size
         * granularity.
         */
-       result = -EADDRNOTAVAIL;
        for (i = 0; i < nr_segments; i++) {
                unsigned long mstart, mend;
 
                mstart = image->segment[i].mem;
                mend   = mstart + image->segment[i].memsz;
+               if (mstart > mend)
+                       return -EADDRNOTAVAIL;
                if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
-                       return result;
+                       return -EADDRNOTAVAIL;
                if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
-                       return result;
+                       return -EADDRNOTAVAIL;
        }
 
        /* Verify our destination addresses do not overlap.
@@ -180,7 +189,6 @@ int sanity_check_segment_list(struct kimage *image)
         * through very weird things can happen with no
         * easy explanation as one segment stops on another.
         */
-       result = -EINVAL;
        for (i = 0; i < nr_segments; i++) {
                unsigned long mstart, mend;
                unsigned long j;
@@ -194,7 +202,7 @@ int sanity_check_segment_list(struct kimage *image)
                        pend   = pstart + image->segment[j].memsz;
                        /* Do the segments overlap ? */
                        if ((mend > pstart) && (mstart < pend))
-                               return result;
+                               return -EINVAL;
                }
        }
 
@@ -203,12 +211,26 @@ int sanity_check_segment_list(struct kimage *image)
         * and it is easier to check up front than to be surprised
         * later on.
         */
-       result = -EINVAL;
        for (i = 0; i < nr_segments; i++) {
                if (image->segment[i].bufsz > image->segment[i].memsz)
-                       return result;
+                       return -EINVAL;
+       }
+
+       /*
+        * Verify that no more than half of memory will be consumed. If the
+        * request from userspace is too large, a large amount of time will be
+        * wasted allocating pages, which can cause a soft lockup.
+        */
+       for (i = 0; i < nr_segments; i++) {
+               if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
+                       return -EINVAL;
+
+               total_pages += PAGE_COUNT(image->segment[i].memsz);
        }
 
+       if (total_pages > totalram_pages / 2)
+               return -EINVAL;
+
        /*
         * Verify we have good destination addresses.  Normally
         * the caller is responsible for making certain we don't
@@ -220,16 +242,15 @@ int sanity_check_segment_list(struct kimage *image)
         */
 
        if (image->type == KEXEC_TYPE_CRASH) {
-               result = -EADDRNOTAVAIL;
                for (i = 0; i < nr_segments; i++) {
                        unsigned long mstart, mend;
 
                        mstart = image->segment[i].mem;
                        mend = mstart + image->segment[i].memsz - 1;
                        /* Ensure we are within the crash kernel limits */
-                       if ((mstart < crashk_res.start) ||
-                           (mend > crashk_res.end))
-                               return result;
+                       if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
+                           (mend > phys_to_boot_phys(crashk_res.end)))
+                               return -EADDRNOTAVAIL;
                }
        }
 
@@ -352,7 +373,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
                pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
                if (!pages)
                        break;
-               pfn   = page_to_pfn(pages);
+               pfn   = page_to_boot_pfn(pages);
                epfn  = pfn + count;
                addr  = pfn << PAGE_SHIFT;
                eaddr = epfn << PAGE_SHIFT;
@@ -478,7 +499,7 @@ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
                        return -ENOMEM;
 
                ind_page = page_address(page);
-               *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
+               *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
                image->entry = ind_page;
                image->last_entry = ind_page +
                                      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
@@ -533,13 +554,13 @@ void kimage_terminate(struct kimage *image)
 #define for_each_kimage_entry(image, ptr, entry) \
        for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
                ptr = (entry & IND_INDIRECTION) ? \
-                       phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
+                       boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
 
 static void kimage_free_entry(kimage_entry_t entry)
 {
        struct page *page;
 
-       page = pfn_to_page(entry >> PAGE_SHIFT);
+       page = boot_pfn_to_page(entry >> PAGE_SHIFT);
        kimage_free_pages(page);
 }
 
@@ -633,7 +654,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
         * have a match.
         */
        list_for_each_entry(page, &image->dest_pages, lru) {
-               addr = page_to_pfn(page) << PAGE_SHIFT;
+               addr = page_to_boot_pfn(page) << PAGE_SHIFT;
                if (addr == destination) {
                        list_del(&page->lru);
                        return page;
@@ -648,12 +669,12 @@ static struct page *kimage_alloc_page(struct kimage *image,
                if (!page)
                        return NULL;
                /* If the page cannot be used file it away */
-               if (page_to_pfn(page) >
+               if (page_to_boot_pfn(page) >
                                (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
                        list_add(&page->lru, &image->unusable_pages);
                        continue;
                }
-               addr = page_to_pfn(page) << PAGE_SHIFT;
+               addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 
                /* If it is the destination page we want use it */
                if (addr == destination)
@@ -676,7 +697,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
                        struct page *old_page;
 
                        old_addr = *old & PAGE_MASK;
-                       old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
+                       old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
                        copy_highpage(page, old_page);
                        *old = addr | (*old & ~PAGE_MASK);
 
@@ -732,7 +753,7 @@ static int kimage_load_normal_segment(struct kimage *image,
                        result  = -ENOMEM;
                        goto out;
                }
-               result = kimage_add_page(image, page_to_pfn(page)
+               result = kimage_add_page(image, page_to_boot_pfn(page)
                                                                << PAGE_SHIFT);
                if (result < 0)
                        goto out;
@@ -793,7 +814,7 @@ static int kimage_load_crash_segment(struct kimage *image,
                char *ptr;
                size_t uchunk, mchunk;
 
-               page = pfn_to_page(maddr >> PAGE_SHIFT);
+               page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
                if (!page) {
                        result  = -ENOMEM;
                        goto out;
@@ -921,7 +942,7 @@ void __weak crash_free_reserved_phys_range(unsigned long begin,
        unsigned long addr;
 
        for (addr = begin; addr < end; addr += PAGE_SIZE)
-               free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
+               free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
 }
 
 int crash_shrink_memory(unsigned long new_size)
@@ -1374,7 +1395,7 @@ void vmcoreinfo_append_str(const char *fmt, ...)
 void __weak arch_crash_save_vmcoreinfo(void)
 {}
 
-unsigned long __weak paddr_vmcoreinfo_note(void)
+phys_addr_t __weak paddr_vmcoreinfo_note(void)
 {
        return __pa((unsigned long)(char *)&vmcoreinfo_note);
 }