Revert "arm64: hibernate: Refuse to hibernate if the boot cpu is offline"
[cascardo/linux.git] / arch / arm64 / kernel / hibernate.c
index 65d81f9..d55a7b0 100644 (file)
@@ -15,9 +15,9 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 #define pr_fmt(x) "hibernate: " x
+#include <linux/cpu.h>
 #include <linux/kvm_host.h>
 #include <linux/mm.h>
-#include <linux/notifier.h>
 #include <linux/pm.h>
 #include <linux/sched.h>
 #include <linux/suspend.h>
@@ -26,6 +26,7 @@
 
 #include <asm/barrier.h>
 #include <asm/cacheflush.h>
+#include <asm/cputype.h>
 #include <asm/irqflags.h>
 #include <asm/memory.h>
 #include <asm/mmu_context.h>
@@ -34,6 +35,7 @@
 #include <asm/pgtable-hwdef.h>
 #include <asm/sections.h>
 #include <asm/smp.h>
+#include <asm/smp_plat.h>
 #include <asm/suspend.h>
 #include <asm/sysreg.h>
 #include <asm/virt.h>
@@ -54,18 +56,18 @@ extern int in_suspend;
 /* Do we need to reset el2? */
 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
 
-/*
- * Start/end of the hibernate exit code, this must be copied to a 'safe'
- * location in memory, and executed from there.
- */
-extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
-
 /* temporary el2 vectors in the __hibernate_exit_text section. */
 extern char hibernate_el2_vectors[];
 
 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
 extern char __hyp_stub_vectors[];
 
+/*
+ * The logical cpu number we should resume on, initialised to a non-cpu
+ * number.
+ */
+static int sleep_cpu = -EINVAL;
+
 /*
  * Values that may not change over hibernate/resume. We put the build number
  * and date in here so that we guarantee not to resume with a different
@@ -88,6 +90,8 @@ static struct arch_hibernate_hdr {
         * re-configure el2.
         */
        phys_addr_t     __hyp_stub_vectors;
+
+       u64             sleep_cpu_mpidr;
 } resume_hdr;
 
 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
@@ -130,12 +134,22 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
        else
                hdr->__hyp_stub_vectors = 0;
 
+       /* Save the mpidr of the cpu we called cpu_suspend() on... */
+       if (sleep_cpu < 0) {
+               pr_err("Failing to hibernate on an unkown CPU.\n");
+               return -ENODEV;
+       }
+       hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
+       pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
+               hdr->sleep_cpu_mpidr);
+
        return 0;
 }
 EXPORT_SYMBOL(arch_hibernation_header_save);
 
 int arch_hibernation_header_restore(void *addr)
 {
+       int ret;
        struct arch_hibernate_hdr_invariants invariants;
        struct arch_hibernate_hdr *hdr = addr;
 
@@ -145,6 +159,24 @@ int arch_hibernation_header_restore(void *addr)
                return -EINVAL;
        }
 
+       sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
+       pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
+               hdr->sleep_cpu_mpidr);
+       if (sleep_cpu < 0) {
+               pr_crit("Hibernated on a CPU not known to this kernel!\n");
+               sleep_cpu = -EINVAL;
+               return -EINVAL;
+       }
+       if (!cpu_online(sleep_cpu)) {
+               pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
+               ret = cpu_up(sleep_cpu);
+               if (ret) {
+                       pr_err("Failed to bring hibernate-CPU up!\n");
+                       sleep_cpu = -EINVAL;
+                       return ret;
+               }
+       }
+
        resume_hdr = *hdr;
 
        return 0;
@@ -241,6 +273,7 @@ out:
        return rc;
 }
 
+#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
 
 int swsusp_arch_suspend(void)
 {
@@ -256,10 +289,16 @@ int swsusp_arch_suspend(void)
        local_dbg_save(flags);
 
        if (__cpu_suspend_enter(&state)) {
+               sleep_cpu = smp_processor_id();
                ret = swsusp_save();
        } else {
-               /* Clean kernel to PoC for secondary core startup */
-               __flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START);
+               /* Clean kernel core startup/idle code to PoC*/
+               dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
+               dcache_clean_range(__idmap_text_start, __idmap_text_end);
+
+               /* Clean kvm setup code to PoC? */
+               if (el2_reset_needed())
+                       dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
 
                /*
                 * Tell the hibernation core that we've just restored
@@ -267,6 +306,7 @@ int swsusp_arch_suspend(void)
                 */
                in_suspend = 0;
 
+               sleep_cpu = -EINVAL;
                __cpu_suspend_exit();
        }
 
@@ -275,6 +315,33 @@ int swsusp_arch_suspend(void)
        return ret;
 }
 
+static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
+{
+       pte_t pte = *src_pte;
+
+       if (pte_valid(pte)) {
+               /*
+                * Resume will overwrite areas that may be marked
+                * read only (code, rodata). Clear the RDONLY bit from
+                * the temporary mappings we use during restore.
+                */
+               set_pte(dst_pte, pte_clear_rdonly(pte));
+       } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
+               /*
+                * debug_pagealloc will removed the PTE_VALID bit if
+                * the page isn't in use by the resume kernel. It may have
+                * been in use by the original kernel, in which case we need
+                * to put it back in our copy to do the restore.
+                *
+                * Before marking this entry valid, check the pfn should
+                * be mapped.
+                */
+               BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+               set_pte(dst_pte, pte_mkpresent(pte_clear_rdonly(pte)));
+       }
+}
+
 static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
                    unsigned long end)
 {
@@ -290,13 +357,7 @@ static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
 
        src_pte = pte_offset_kernel(src_pmd, start);
        do {
-               if (!pte_none(*src_pte))
-                       /*
-                        * Resume will overwrite areas that may be marked
-                        * read only (code, rodata). Clear the RDONLY bit from
-                        * the temporary mappings we use during restore.
-                        */
-                       set_pte(dst_pte, __pte(pte_val(*src_pte) & ~PTE_RDONLY));
+               _copy_pte(dst_pte, src_pte, addr);
        } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
 
        return 0;
@@ -483,27 +544,12 @@ out:
        return rc;
 }
 
-static int check_boot_cpu_online_pm_callback(struct notifier_block *nb,
-                                            unsigned long action, void *ptr)
+int hibernate_resume_nonboot_cpu_disable(void)
 {
-       if (action == PM_HIBERNATION_PREPARE &&
-            cpumask_first(cpu_online_mask) != 0) {
-               pr_warn("CPU0 is offline.\n");
-               return notifier_from_errno(-ENODEV);
+       if (sleep_cpu < 0) {
+               pr_err("Failing to resume from hibernate on an unkown CPU.\n");
+               return -ENODEV;
        }
 
-       return NOTIFY_OK;
-}
-
-static int __init check_boot_cpu_online_init(void)
-{
-       /*
-        * Set this pm_notifier callback with a lower priority than
-        * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be
-        * called earlier to disable cpu hotplug before the cpu online check.
-        */
-       pm_notifier(check_boot_cpu_online_pm_callback, -INT_MAX);
-
-       return 0;
+       return freeze_secondary_cpus(sleep_cpu);
 }
-core_initcall(check_boot_cpu_online_init);