powerpc/mm: Convert early cpu/mmu feature check to use the new helpers
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Sat, 23 Jul 2016 09:12:35 +0000 (14:42 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 1 Aug 2016 01:15:01 +0000 (11:15 +1000)
This switches early feature checks to use the non static key variant of
the function. In later patches we will be switching cpu_has_feature()
and mmu_has_feature() to use static keys and we can use them only after
static key/jump label is initialized. Any check for feature before jump
label init should be done using this new helper.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/mmu.h
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/init_64.c

index 70c9958..0cbde6a 100644 (file)
@@ -128,7 +128,7 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
                                              phys_addr_t first_memblock_size)
 {
-       if (radix_enabled())
+       if (early_radix_enabled())
                return radix__setup_initial_memory_limit(first_memblock_base,
                                                   first_memblock_size);
        return hash__setup_initial_memory_limit(first_memblock_base,
index 93dae29..fa20060 100644 (file)
@@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca)
         * if we do a GET_PACA() before the feature fixups have been
         * applied
         */
-       if (cpu_has_feature(CPU_FTR_HVMODE))
+       if (early_cpu_has_feature(CPU_FTR_HVMODE))
                mtspr(SPRN_SPRG_HPACA, local_paca);
 #endif
        mtspr(SPRN_SPRG_PACA, local_paca);
index 9846961..eafb9a7 100644 (file)
@@ -227,8 +227,8 @@ static void __init configure_exceptions(void)
                        opal_configure_cores();
 
                /* Enable AIL if supported, and we are in hypervisor mode */
-               if (cpu_has_feature(CPU_FTR_HVMODE) &&
-                   cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
+                   early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
                        unsigned long lpcr = mfspr(SPRN_LPCR);
                        mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
                }
index 1a96b28..0821556 100644 (file)
@@ -549,7 +549,7 @@ static void __init htab_scan_page_sizes(void)
         * Try to find the available page sizes in the device-tree
         */
        rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
-       if (rc == 0 && mmu_has_feature(MMU_FTR_16M_PAGE)) {
+       if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) {
                /*
                 * Nothing in the device-tree, but the CPU supports 16M pages,
                 * so let's fallback on a known size list for 16M capable CPUs.
index 6259f5d..16ada1e 100644 (file)
@@ -427,7 +427,7 @@ void __init mmu_early_init_devtree(void)
        if (disable_radix)
                cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 
-       if (radix_enabled())
+       if (early_radix_enabled())
                radix__early_init_devtree();
        else
                hash__early_init_devtree();