Merge tag 'powerpc-4.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Oct 2016 18:07:42 +0000 (11:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Oct 2016 18:07:42 +0000 (11:07 -0700)
Pull more powerpc updates from Michael Ellerman:
 "Some more powerpc updates for 4.9:

  Freescale updates from Scott Wood:
   - qbman support (a prerequisite for datapath drivers such as ethernet)
   - a PCI DMA fix+improvement
   - reset handler changes
   - more 8xx optimizations
   - some cleanups and fixes.'

  Fixes:
   - selftests/powerpc: Add missing binaries to .gitignores (Michael Ellerman)
   - selftests/powerpc: Fix build break caused by EXPORT_SYMBOL changes (Michael Ellerman)
   - powerpc/pseries: Fix stack corruption in htpe code (Laurent Dufour)
   - powerpc/64s: Fix power4_fixup_nap placement (Nicholas Piggin)
   - powerpc/64: Fix incorrect return value from __copy_tofrom_user (Paul Mackerras)
   - powerpc/mm/hash64: Fix might_have_hea() check (Michael Ellerman)

  Other:
   - MAINTAINERS: Remove myself from PA Semi entries (Olof Johansson)
   - MAINTAINERS: Drop separate pseries entry (Michael Ellerman)
   - MAINTAINERS: Update powerpc website & add selftests (Michael Ellerman):

* tag 'powerpc-4.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (35 commits)
  powerpc/mm/hash64: Fix might_have_hea() check
  powerpc/64: Fix incorrect return value from __copy_tofrom_user
  powerpc/64s: Fix power4_fixup_nap placement
  powerpc/pseries: Fix stack corruption in htpe code
  selftests/powerpc: Fix build break caused by EXPORT_SYMBOL changes
  MAINTAINERS: Update powerpc website & add selftests
  MAINTAINERS: Drop separate pseries entry
  MAINTAINERS: Remove myself from PA Semi entries
  selftests/powerpc: Add missing binaries to .gitignores
  arch/powerpc: Add CONFIG_FSL_DPAA to corenetXX_smp_defconfig
  soc/qman: Add self-test for QMan driver
  soc/bman: Add self-test for BMan driver
  soc/fsl: Introduce DPAA 1.x QMan device driver
  soc/fsl: Introduce DPAA 1.x BMan device driver
  powerpc/8xx: make user addr DTLB miss the short path
  powerpc/8xx: Move additional DTLBMiss handlers out of exception area
  powerpc/8xx: use r3 to scratch CR in ITLBmiss
  soc/fsl/qe: fix gpio save_regs functions
  powerpc/8xx: add dedicated machine check handler
  powerpc/8xx: add system_reset_exception
  ...

104 files changed:
MAINTAINERS
arch/powerpc/Makefile
arch/powerpc/configs/dpaa.config [new file with mode: 0644]
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/reg_8xx.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_8xx.S
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/traps.c
arch/powerpc/lib/copyuser_64.S
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/platforms/82xx/Kconfig
arch/powerpc/platforms/82xx/ep8248e.c
arch/powerpc/platforms/83xx/asp834x.c
arch/powerpc/platforms/83xx/km83xx.c
arch/powerpc/platforms/83xx/misc.c
arch/powerpc/platforms/83xx/mpc830x_rdb.c
arch/powerpc/platforms/83xx/mpc831x_rdb.c
arch/powerpc/platforms/83xx/mpc832x_mds.c
arch/powerpc/platforms/83xx/mpc832x_rdb.c
arch/powerpc/platforms/83xx/mpc834x_itx.c
arch/powerpc/platforms/83xx/mpc834x_mds.c
arch/powerpc/platforms/83xx/mpc836x_mds.c
arch/powerpc/platforms/83xx/mpc836x_rdk.c
arch/powerpc/platforms/83xx/mpc837x_mds.c
arch/powerpc/platforms/83xx/mpc837x_rdb.c
arch/powerpc/platforms/83xx/mpc83xx.h
arch/powerpc/platforms/83xx/sbc834x.c
arch/powerpc/platforms/85xx/Kconfig
arch/powerpc/platforms/85xx/bsc913x_qds.c
arch/powerpc/platforms/85xx/bsc913x_rdb.c
arch/powerpc/platforms/85xx/c293pcie.c
arch/powerpc/platforms/85xx/corenet_generic.c
arch/powerpc/platforms/85xx/ge_imp3a.c
arch/powerpc/platforms/85xx/mpc8536_ds.c
arch/powerpc/platforms/85xx/mpc85xx_ads.c
arch/powerpc/platforms/85xx/mpc85xx_cds.c
arch/powerpc/platforms/85xx/mpc85xx_ds.c
arch/powerpc/platforms/85xx/mpc85xx_mds.c
arch/powerpc/platforms/85xx/mpc85xx_rdb.c
arch/powerpc/platforms/85xx/mvme2500.c
arch/powerpc/platforms/85xx/p1010rdb.c
arch/powerpc/platforms/85xx/p1022_ds.c
arch/powerpc/platforms/85xx/p1022_rdk.c
arch/powerpc/platforms/85xx/p1023_rdb.c
arch/powerpc/platforms/85xx/ppa8548.c
arch/powerpc/platforms/85xx/qemu_e500.c
arch/powerpc/platforms/85xx/sbc8548.c
arch/powerpc/platforms/85xx/sgy_cts1000.c
arch/powerpc/platforms/85xx/socrates.c
arch/powerpc/platforms/85xx/stx_gp3.c
arch/powerpc/platforms/85xx/tqm85xx.c
arch/powerpc/platforms/85xx/twr_p102x.c
arch/powerpc/platforms/85xx/xes_mpc85xx.c
arch/powerpc/platforms/86xx/gef_ppc9a.c
arch/powerpc/platforms/86xx/gef_sbc310.c
arch/powerpc/platforms/86xx/gef_sbc610.c
arch/powerpc/platforms/86xx/mpc8610_hpcd.c
arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
arch/powerpc/platforms/86xx/mvme7100.c
arch/powerpc/platforms/86xx/sbc8641d.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/sysdev/cpm1.c
arch/powerpc/sysdev/cpm2.c
arch/powerpc/sysdev/cpm_common.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/fsl_soc.c
arch/powerpc/sysdev/fsl_soc.h
arch/powerpc/sysdev/mpic.c
drivers/soc/Kconfig
drivers/soc/fsl/Makefile
drivers/soc/fsl/qbman/Kconfig [new file with mode: 0644]
drivers/soc/fsl/qbman/Makefile [new file with mode: 0644]
drivers/soc/fsl/qbman/bman.c [new file with mode: 0644]
drivers/soc/fsl/qbman/bman_ccsr.c [new file with mode: 0644]
drivers/soc/fsl/qbman/bman_portal.c [new file with mode: 0644]
drivers/soc/fsl/qbman/bman_priv.h [new file with mode: 0644]
drivers/soc/fsl/qbman/bman_test.c [new file with mode: 0644]
drivers/soc/fsl/qbman/bman_test.h [new file with mode: 0644]
drivers/soc/fsl/qbman/bman_test_api.c [new file with mode: 0644]
drivers/soc/fsl/qbman/dpaa_sys.h [new file with mode: 0644]
drivers/soc/fsl/qbman/qman.c [new file with mode: 0644]
drivers/soc/fsl/qbman/qman_ccsr.c [new file with mode: 0644]
drivers/soc/fsl/qbman/qman_portal.c [new file with mode: 0644]
drivers/soc/fsl/qbman/qman_priv.h [new file with mode: 0644]
drivers/soc/fsl/qbman/qman_test.c [new file with mode: 0644]
drivers/soc/fsl/qbman/qman_test.h [new file with mode: 0644]
drivers/soc/fsl/qbman/qman_test_api.c [new file with mode: 0644]
drivers/soc/fsl/qbman/qman_test_stash.c [new file with mode: 0644]
drivers/soc/fsl/qe/gpio.c
drivers/soc/fsl/qe/qe.c
drivers/soc/fsl/qe/qe_common.c
drivers/soc/fsl/qe/qe_tdm.c
include/soc/fsl/bman.h [new file with mode: 0644]
include/soc/fsl/qman.h [new file with mode: 0644]
tools/testing/selftests/powerpc/copyloops/asm/export.h [new file with mode: 0644]
tools/testing/selftests/powerpc/math/.gitignore
tools/testing/selftests/powerpc/signal/.gitignore [new file with mode: 0644]
tools/testing/selftests/powerpc/stringloops/asm/export.h [new file with mode: 0644]
tools/testing/selftests/powerpc/tm/.gitignore

index 1fc66f0..a5ae0f2 100644 (file)
@@ -7201,17 +7201,11 @@ F:      drivers/lightnvm/
 F:     include/linux/lightnvm.h
 F:     include/uapi/linux/lightnvm.h
 
-LINUX FOR IBM pSERIES (RS/6000)
-M:     Paul Mackerras <paulus@au.ibm.com>
-W:     http://www.ibm.com/linux/ltc/projects/ppc
-S:     Supported
-F:     arch/powerpc/boot/rs6000.h
-
 LINUX FOR POWERPC (32-BIT AND 64-BIT)
 M:     Benjamin Herrenschmidt <benh@kernel.crashing.org>
 M:     Paul Mackerras <paulus@samba.org>
 M:     Michael Ellerman <mpe@ellerman.id.au>
-W:     http://www.penguinppc.org/
+W:     https://github.com/linuxppc/linux/wiki
 L:     linuxppc-dev@lists.ozlabs.org
 Q:     http://patchwork.ozlabs.org/project/linuxppc-dev/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
@@ -7226,6 +7220,7 @@ F:        drivers/net/ethernet/ibm/ibmvnic.*
 F:     drivers/pci/hotplug/pnv_php.c
 F:     drivers/pci/hotplug/rpa*
 F:     drivers/scsi/ibmvscsi/
+F:     tools/testing/selftests/powerpc
 N:     opal
 N:     /pmac
 N:     powermac
@@ -7282,9 +7277,8 @@ F:        arch/powerpc/platforms/83xx/
 F:     arch/powerpc/platforms/85xx/
 
 LINUX FOR POWERPC PA SEMI PWRFICIENT
-M:     Olof Johansson <olof@lixom.net>
 L:     linuxppc-dev@lists.ozlabs.org
-S:     Maintained
+S:     Orphan
 F:     arch/powerpc/platforms/pasemi/
 F:     drivers/*/*pasemi*
 F:     drivers/*/*/*pasemi*
@@ -9019,15 +9013,13 @@ S:      Maintained
 F:     drivers/net/wireless/intersil/p54/
 
 PA SEMI ETHERNET DRIVER
-M:     Olof Johansson <olof@lixom.net>
 L:     netdev@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/net/ethernet/pasemi/*
 
 PA SEMI SMBUS DRIVER
-M:     Olof Johansson <olof@lixom.net>
 L:     linux-i2c@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/i2c/busses/i2c-pasemi.c
 
 PADATA PARALLEL EXECUTION MECHANISM
index 50d020a..617dece 100644 (file)
@@ -318,12 +318,12 @@ mpc85xx_smp_defconfig:
 PHONY += corenet32_smp_defconfig
 corenet32_smp_defconfig:
        $(call merge_into_defconfig,corenet_basic_defconfig,\
-               85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw)
+               85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw dpaa)
 
 PHONY += corenet64_smp_defconfig
 corenet64_smp_defconfig:
        $(call merge_into_defconfig,corenet_basic_defconfig,\
-               85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw)
+               85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw dpaa)
 
 PHONY += mpc86xx_defconfig
 mpc86xx_defconfig:
diff --git a/arch/powerpc/configs/dpaa.config b/arch/powerpc/configs/dpaa.config
new file mode 100644 (file)
index 0000000..efa99c0
--- /dev/null
@@ -0,0 +1 @@
+CONFIG_FSL_DPAA=y
index f752e6f..ab68d0e 100644 (file)
@@ -43,6 +43,7 @@ extern int machine_check_e500mc(struct pt_regs *regs);
 extern int machine_check_e500(struct pt_regs *regs);
 extern int machine_check_e200(struct pt_regs *regs);
 extern int machine_check_47x(struct pt_regs *regs);
+int machine_check_8xx(struct pt_regs *regs);
 
 extern void cpu_down_flush_e500v2(void);
 extern void cpu_down_flush_e500mc(void);
index c7d82ff..eba6041 100644 (file)
@@ -155,6 +155,8 @@ static inline unsigned long arch_local_irq_save(void)
        unsigned long flags = arch_local_save_flags();
 #ifdef CONFIG_BOOKE
        asm volatile("wrteei 0" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+       wrtspr(SPRN_EID);
 #else
        SET_MSR_EE(flags & ~MSR_EE);
 #endif
@@ -165,6 +167,8 @@ static inline void arch_local_irq_disable(void)
 {
 #ifdef CONFIG_BOOKE
        asm volatile("wrteei 0" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+       wrtspr(SPRN_EID);
 #else
        arch_local_irq_save();
 #endif
@@ -174,6 +178,8 @@ static inline void arch_local_irq_enable(void)
 {
 #ifdef CONFIG_BOOKE
        asm volatile("wrteei 1" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+       wrtspr(SPRN_EIE);
 #else
        unsigned long msr = mfmsr();
        SET_MSR_EE(msr | MSR_EE);
index 54ff8ce..0132831 100644 (file)
 #define PPC_INST_LWSYNC                        0x7c2004ac
 #define PPC_INST_SYNC                  0x7c0004ac
 #define PPC_INST_SYNC_MASK             0xfc0007fe
+#define PPC_INST_ISYNC                 0x4c00012c
 #define PPC_INST_LXVD2X                        0x7c000698
 #define PPC_INST_MCRXR                 0x7c000400
 #define PPC_INST_MCRXR_MASK            0xfc0007fe
index 2a62078..9cd4e8c 100644 (file)
@@ -1250,6 +1250,8 @@ static inline void mtmsr_isync(unsigned long val)
                                     : "r" ((unsigned long)(v)) \
                                     : "memory")
 #endif
+#define wrtspr(rn)     asm volatile("mtspr " __stringify(rn) ",0" : \
+                                    : : "memory")
 
 extern unsigned long msr_check_and_set(unsigned long bits);
 extern bool strict_msr_control;
index 94d01f8..0197e12 100644 (file)
 #define SPRN_MD_RAM0   825
 #define SPRN_MD_RAM1   826
 
+/* Special MSR manipulation registers */
+#define SPRN_EIE       80      /* External interrupt enable (EE=1, RI=1) */
+#define SPRN_EID       81      /* External interrupt disable (EE=0, RI=1) */
+
 /* Commands.  Only the first few are available to the instruction cache.
 */
 #define        IDC_ENABLE      0x02000000      /* Cache enable */
index 6c4646a..6a82ef0 100644 (file)
@@ -1248,6 +1248,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .mmu_features           = MMU_FTR_TYPE_8xx,
                .icache_bsize           = 16,
                .dcache_bsize           = 16,
+               .machine_check          = machine_check_8xx,
                .platform               = "ppc823",
        },
 #endif /* CONFIG_8xx */
index 08992f8..f129408 100644 (file)
@@ -1377,7 +1377,7 @@ __end_interrupts:
 DEFINE_FIXED_SYMBOL(__end_interrupts)
 
 #ifdef CONFIG_PPC_970_NAP
-TRAMP_REAL_BEGIN(power4_fixup_nap)
+EXC_COMMON_BEGIN(power4_fixup_nap)
        andc    r9,r9,r10
        std     r9,TI_LOCAL_FLAGS(r11)
        ld      r10,_LINK(r1)           /* make idle task do the */
index 3a185c5..033a6b7 100644 (file)
@@ -226,7 +226,7 @@ i##n:                                                               \
                          ret_from_except)
 
 /* System reset */
-       EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
+       EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
 
 /* Machine check */
        . = 0x200
@@ -321,7 +321,7 @@ SystemCall:
 #endif
 
 InstructionTLBMiss:
-#ifdef CONFIG_8xx_CPU6
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
        mtspr   SPRN_SPRG_SCRATCH2, r3
 #endif
        EXCEPTION_PROLOG_0
@@ -329,23 +329,20 @@ InstructionTLBMiss:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
+       mfspr   r10, SPRN_SRR0  /* Get effective address of fault */
+       INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
 #if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
        /* Only modules will cause ITLB Misses as we always
         * pin the first 8MB of kernel memory */
-       mfspr   r11, SPRN_SRR0  /* Get effective address of fault */
-       INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
-       mfcr    r10
-       IS_KERNEL(r11, r11)
+       mfcr    r3
+       IS_KERNEL(r11, r10)
+#endif
        mfspr   r11, SPRN_M_TW  /* Get level 1 table */
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
        BRANCH_UNLESS_KERNEL(3f)
        lis     r11, (swapper_pg_dir-PAGE_OFFSET)@ha
 3:
-       mtcr    r10
-       mfspr   r10, SPRN_SRR0  /* Get effective address of fault */
-#else
-       mfspr   r10, SPRN_SRR0  /* Get effective address of fault */
-       INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
-       mfspr   r11, SPRN_M_TW  /* Get level 1 table base address */
+       mtcr    r3
 #endif
        /* Insert level 1 index */
        rlwimi  r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -377,58 +374,39 @@ InstructionTLBMiss:
        MTSPR_CPU6(SPRN_MI_RPN, r10, r3)        /* Update TLB entry */
 
        /* Restore registers */
-#ifdef CONFIG_8xx_CPU6
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
        mfspr   r3, SPRN_SPRG_SCRATCH2
 #endif
        EXCEPTION_EPILOG_0
        rfi
 
-/*
- * Bottom part of DataStoreTLBMiss handler for IMMR area
- * not enough space in the DataStoreTLBMiss area
- */
-DTLBMissIMMR:
-       mtcr    r10
-       /* Set 512k byte guarded page and mark it valid */
-       li      r10, MD_PS512K | MD_GUARDED | MD_SVALID
-       MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
-       mfspr   r10, SPRN_IMMR                  /* Get current IMMR */
-       rlwinm  r10, r10, 0, 0xfff80000         /* Get 512 kbytes boundary */
-       ori     r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
-                         _PAGE_PRESENT | _PAGE_NO_CACHE
-       MTSPR_CPU6(SPRN_MD_RPN, r10, r11)       /* Update TLB entry */
-
-       li      r11, RPN_PATTERN
-       mtspr   SPRN_DAR, r11   /* Tag DAR */
-       EXCEPTION_EPILOG_0
-       rfi
-
        . = 0x1200
 DataStoreTLBMiss:
+       mtspr   SPRN_SPRG_SCRATCH2, r3
        EXCEPTION_PROLOG_0
-       mfcr    r10
+       mfcr    r3
 
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
-       mfspr   r11, SPRN_MD_EPN
-       rlwinm  r11, r11, 16, 0xfff8
+       mfspr   r10, SPRN_MD_EPN
+       rlwinm  r10, r10, 16, 0xfff8
+       cmpli   cr0, r10, PAGE_OFFSET@h
+       mfspr   r11, SPRN_M_TW  /* Get level 1 table */
+       blt+    3f
 #ifndef CONFIG_PIN_TLB_IMMR
-       cmpli   cr0, r11, VIRT_IMMR_BASE@h
+       cmpli   cr0, r10, VIRT_IMMR_BASE@h
 #endif
-       cmpli   cr7, r11, PAGE_OFFSET@h
+_ENTRY(DTLBMiss_cmp)
+       cmpli   cr7, r10, (PAGE_OFFSET + 0x1800000)@h
+       lis     r11, (swapper_pg_dir-PAGE_OFFSET)@ha
 #ifndef CONFIG_PIN_TLB_IMMR
 _ENTRY(DTLBMiss_jmp)
        beq-    DTLBMissIMMR
 #endif
-       bge-    cr7, 4f
-
-       mfspr   r11, SPRN_M_TW  /* Get level 1 table */
+       blt     cr7, DTLBMissLinear
 3:
-       mtcr    r10
-#ifdef CONFIG_8xx_CPU6
-       mtspr   SPRN_SPRG_SCRATCH2, r3
-#endif
+       mtcr    r3
        mfspr   r10, SPRN_MD_EPN
 
        /* Insert level 1 index */
@@ -481,30 +459,7 @@ _ENTRY(DTLBMiss_jmp)
        MTSPR_CPU6(SPRN_MD_RPN, r10, r3)        /* Update TLB entry */
 
        /* Restore registers */
-#ifdef CONFIG_8xx_CPU6
        mfspr   r3, SPRN_SPRG_SCRATCH2
-#endif
-       mtspr   SPRN_DAR, r11   /* Tag DAR */
-       EXCEPTION_EPILOG_0
-       rfi
-
-4:
-_ENTRY(DTLBMiss_cmp)
-       cmpli   cr0, r11, (PAGE_OFFSET + 0x1800000)@h
-       lis     r11, (swapper_pg_dir-PAGE_OFFSET)@ha
-       bge-    3b
-
-       mtcr    r10
-       /* Set 8M byte page and mark it valid */
-       li      r10, MD_PS8MEG | MD_SVALID
-       MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
-       mfspr   r10, SPRN_MD_EPN
-       rlwinm  r10, r10, 0, 0x0f800000         /* 8xx supports max 256Mb RAM */
-       ori     r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
-                         _PAGE_PRESENT
-       MTSPR_CPU6(SPRN_MD_RPN, r10, r11)       /* Update TLB entry */
-
-       li      r11, RPN_PATTERN
        mtspr   SPRN_DAR, r11   /* Tag DAR */
        EXCEPTION_EPILOG_0
        rfi
@@ -570,6 +525,43 @@ DARFixed:/* Return from dcbx instruction bug workaround */
 
        . = 0x2000
 
+/*
+ * Bottom part of DataStoreTLBMiss handlers for IMMR area and linear RAM.
+ * not enough space in the DataStoreTLBMiss area.
+ */
+DTLBMissIMMR:
+       mtcr    r3
+       /* Set 512k byte guarded page and mark it valid */
+       li      r10, MD_PS512K | MD_GUARDED | MD_SVALID
+       MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
+       mfspr   r10, SPRN_IMMR                  /* Get current IMMR */
+       rlwinm  r10, r10, 0, 0xfff80000         /* Get 512 kbytes boundary */
+       ori     r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
+                         _PAGE_PRESENT | _PAGE_NO_CACHE
+       MTSPR_CPU6(SPRN_MD_RPN, r10, r11)       /* Update TLB entry */
+
+       li      r11, RPN_PATTERN
+       mtspr   SPRN_DAR, r11   /* Tag DAR */
+       mfspr   r3, SPRN_SPRG_SCRATCH2
+       EXCEPTION_EPILOG_0
+       rfi
+
+DTLBMissLinear:
+       mtcr    r3
+       /* Set 8M byte page and mark it valid */
+       li      r11, MD_PS8MEG | MD_SVALID
+       MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
+       rlwinm  r10, r10, 16, 0x0f800000        /* 8xx supports max 256Mb RAM */
+       ori     r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
+                         _PAGE_PRESENT
+       MTSPR_CPU6(SPRN_MD_RPN, r10, r11)       /* Update TLB entry */
+
+       li      r11, RPN_PATTERN
+       mtspr   SPRN_DAR, r11   /* Tag DAR */
+       mfspr   r3, SPRN_SPRG_SCRATCH2
+       EXCEPTION_EPILOG_0
+       rfi
+
 /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
  * by decoding the registers used by the dcbx instruction and adding them.
  * DAR is set to the calculated address.
@@ -586,7 +578,9 @@ FixupDAR:/* Entry point for dcbx workaround. */
        rlwinm  r11, r10, 16, 0xfff8
 _ENTRY(FixupDAR_cmp)
        cmpli   cr7, r11, (PAGE_OFFSET + 0x1800000)@h
-       blt-    cr7, 200f
+       /* create physical page address from effective address */
+       tophys(r11, r10)
+       blt-    cr7, 201f
        lis     r11, (swapper_pg_dir-PAGE_OFFSET)@ha
        /* Insert level 1 index */
 3:     rlwimi  r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -616,10 +610,6 @@ _ENTRY(FixupDAR_cmp)
 141:   mfspr   r10,SPRN_SPRG_SCRATCH2
        b       DARFixed        /* Nope, go back to normal TLB processing */
 
-       /* create physical page address from effective address */
-200:   tophys(r11, r10)
-       b       201b
-
 144:   mfspr   r10, SPRN_DSISR
        rlwinm  r10, r10,0,7,5  /* Clear store bit for buggy dcbst insn */
        mtspr   SPRN_DSISR, r10
index dba265c..270ee30 100644 (file)
@@ -131,15 +131,26 @@ void machine_shutdown(void)
                ppc_md.machine_shutdown();
 }
 
+static void machine_hang(void)
+{
+       pr_emerg("System Halted, OK to turn off power\n");
+       local_irq_disable();
+       while (1)
+               ;
+}
+
 void machine_restart(char *cmd)
 {
        machine_shutdown();
        if (ppc_md.restart)
                ppc_md.restart(cmd);
+
        smp_send_stop();
-       printk(KERN_EMERG "System Halted, OK to turn off power\n");
-       local_irq_disable();
-       while (1) ;
+
+       do_kernel_restart(cmd);
+       mdelay(1000);
+
+       machine_hang();
 }
 
 void machine_power_off(void)
@@ -147,10 +158,9 @@ void machine_power_off(void)
        machine_shutdown();
        if (pm_power_off)
                pm_power_off();
+
        smp_send_stop();
-       printk(KERN_EMERG "System Halted, OK to turn off power\n");
-       local_irq_disable();
-       while (1) ;
+       machine_hang();
 }
 /* Used by the G5 thermal driver */
 EXPORT_SYMBOL_GPL(machine_power_off);
@@ -163,10 +173,9 @@ void machine_halt(void)
        machine_shutdown();
        if (ppc_md.halt)
                ppc_md.halt();
+
        smp_send_stop();
-       printk(KERN_EMERG "System Halted, OK to turn off power\n");
-       local_irq_disable();
-       while (1) ;
+       machine_hang();
 }
 
 
index a1f8f56..023a462 100644 (file)
@@ -273,7 +273,6 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
        force_sig_info(signr, &info, current);
 }
 
-#ifdef CONFIG_PPC64
 void system_reset_exception(struct pt_regs *regs)
 {
        /* See if any machine dependent calls */
@@ -291,6 +290,7 @@ void system_reset_exception(struct pt_regs *regs)
        /* What should we do here? We could issue a shutdown or hard reset. */
 }
 
+#ifdef CONFIG_PPC64
 /*
  * This function is called in real mode. Strictly no printk's please.
  *
@@ -352,12 +352,11 @@ static inline int check_io_access(struct pt_regs *regs)
                 * For the debug message, we look at the preceding
                 * load or store.
                 */
-               if (*nip == 0x60000000)         /* nop */
+               if (*nip == PPC_INST_NOP)
                        nip -= 2;
-               else if (*nip == 0x4c00012c)    /* isync */
+               else if (*nip == PPC_INST_ISYNC)
                        --nip;
-               if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
-                       /* sync or twi */
+               if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
                        unsigned int rb;
 
                        --nip;
@@ -668,6 +667,31 @@ int machine_check_e200(struct pt_regs *regs)
 
        return 0;
 }
+#elif defined(CONFIG_PPC_8xx)
+int machine_check_8xx(struct pt_regs *regs)
+{
+       unsigned long reason = get_mc_reason(regs);
+
+       pr_err("Machine check in kernel mode.\n");
+       pr_err("Caused by (from SRR1=%lx): ", reason);
+       if (reason & 0x40000000)
+               pr_err("Fetch error at address %lx\n", regs->nip);
+       else
+               pr_err("Data access error at address %lx\n", regs->dar);
+
+#ifdef CONFIG_PCI
+       /* the qspan pci read routines can cause machine checks -- Cort
+        *
+        * yuck !!! that totally needs to go away ! There are better ways
+        * to deal with that than having a wart in the mcheck handler.
+        * -- BenH
+        */
+       bad_page_fault(regs, regs->dar, SIGBUS);
+       return 1;
+#else
+       return 0;
+#endif
+}
 #else
 int machine_check_generic(struct pt_regs *regs)
 {
@@ -727,17 +751,6 @@ void machine_check_exception(struct pt_regs *regs)
        if (recover > 0)
                goto bail;
 
-#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
-       /* the qspan pci read routines can cause machine checks -- Cort
-        *
-        * yuck !!! that totally needs to go away ! There are better ways
-        * to deal with that than having a wart in the mcheck handler.
-        * -- BenH
-        */
-       bad_page_fault(regs, regs->dar, SIGBUS);
-       goto bail;
-#endif
-
        if (debugger_fault_handler(regs))
                goto bail;
 
index f09899e..7b22624 100644 (file)
@@ -359,6 +359,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
        addi    r3,r3,8
 171:
 177:
+179:
        addi    r3,r3,8
 370:
 372:
@@ -373,7 +374,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 173:
 174:
 175:
-179:
 181:
 184:
 186:
index 90480e2..44d3c3a 100644 (file)
@@ -529,7 +529,7 @@ static bool might_have_hea(void)
         */
 #ifdef CONFIG_IBMEBUS
        return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
-               !firmware_has_feature(FW_FEATURE_SPLPAR);
+               firmware_has_feature(FW_FEATURE_SPLPAR);
 #else
        return false;
 #endif
index 7c7df40..994d1a9 100644 (file)
@@ -30,8 +30,8 @@ config EP8248E
        select 8272
        select 8260
        select FSL_SOC
-       select PHYLIB
-       select MDIO_BITBANG
+       select PHYLIB if NETDEVICES
+       select MDIO_BITBANG if PHYLIB
        help
          This enables support for the Embedded Planet EP8248E board.
 
index cdab847..8fec050 100644 (file)
@@ -298,7 +298,9 @@ static const struct of_device_id of_bus_ids[] __initconst = {
 static int __init declare_of_platform_devices(void)
 {
        of_platform_bus_probe(NULL, of_bus_ids, NULL);
-       platform_driver_register(&ep8248e_mdio_driver);
+
+       if (IS_ENABLED(CONFIG_MDIO_BITBANG))
+               platform_driver_register(&ep8248e_mdio_driver);
 
        return 0;
 }
index 17e5433..575afd6 100644 (file)
@@ -30,9 +30,7 @@
  */
 static void __init asp834x_setup_arch(void)
 {
-       if (ppc_md.progress)
-               ppc_md.progress("asp834x_setup_arch()", 0);
-
+       mpc83xx_setup_arch();
        mpc834x_usb_cfg();
 }
 
index e7fbd63..d8642a4 100644 (file)
@@ -130,10 +130,7 @@ static void __init mpc83xx_km_setup_arch(void)
        struct device_node *np;
 #endif
 
-       if (ppc_md.progress)
-               ppc_md.progress("kmpbec83xx_setup_arch()", 0);
-
-       mpc83xx_setup_pci();
+       mpc83xx_setup_arch();
 
 #ifdef CONFIG_QUICC_ENGINE
        np = of_find_node_by_name(NULL, "par_io");
index 8899aa9..d75c981 100644 (file)
@@ -142,3 +142,11 @@ void __init mpc83xx_setup_pci(void)
                mpc83xx_add_bridge(np);
 }
 #endif
+
+void __init mpc83xx_setup_arch(void)
+{
+       if (ppc_md.progress)
+               ppc_md.progress("mpc83xx_setup_arch()", 0);
+
+       mpc83xx_setup_pci();
+}
index 040d5d0..272c41c 100644 (file)
  */
 static void __init mpc830x_rdb_setup_arch(void)
 {
-       if (ppc_md.progress)
-               ppc_md.progress("mpc830x_rdb_setup_arch()", 0);
-
-       mpc83xx_setup_pci();
+       mpc83xx_setup_arch();
        mpc831x_usb_cfg();
 }
 
index 40e0d83..fd80fd5 100644 (file)
  */
 static void __init mpc831x_rdb_setup_arch(void)
 {
-       if (ppc_md.progress)
-               ppc_md.progress("mpc831x_rdb_setup_arch()", 0);
-
-       mpc83xx_setup_pci();
+       mpc83xx_setup_arch();
        mpc831x_usb_cfg();
 }
 
index cdfa47c..bb7b25a 100644 (file)
@@ -58,8 +58,7 @@ static void __init mpc832x_sys_setup_arch(void)
        struct device_node *np;
        u8 __iomem *bcsr_regs = NULL;
 
-       if (ppc_md.progress)
-               ppc_md.progress("mpc832x_sys_setup_arch()", 0);
+       mpc83xx_setup_arch();
 
        /* Map BCSR area */
        np = of_find_node_by_name(NULL, "bcsr");
@@ -71,8 +70,6 @@ static void __init mpc832x_sys_setup_arch(void)
                of_node_put(np);
        }
 
-       mpc83xx_setup_pci();
-
 #ifdef CONFIG_QUICC_ENGINE
        if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
                par_io_init(np);
index 0d6a62f..d7c9b18 100644 (file)
@@ -197,10 +197,7 @@ static void __init mpc832x_rdb_setup_arch(void)
        struct device_node *np;
 #endif
 
-       if (ppc_md.progress)
-               ppc_md.progress("mpc832x_rdb_setup_arch()", 0);
-
-       mpc83xx_setup_pci();
+       mpc83xx_setup_arch();
 
 #ifdef CONFIG_QUICC_ENGINE
        if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
index 8fd0c1e..73a5267 100644 (file)
@@ -57,10 +57,7 @@ machine_device_initcall(mpc834x_itx, mpc834x_itx_declare_of_platform_devices);
  */
 static void __init mpc834x_itx_setup_arch(void)
 {
-       if (ppc_md.progress)
-               ppc_md.progress("mpc834x_itx_setup_arch()", 0);
-
-       mpc83xx_setup_pci();
+       mpc83xx_setup_arch();
 
        mpc834x_usb_cfg();
 }
index eeaee61..009cfc1 100644 (file)
@@ -76,10 +76,7 @@ static int mpc834xemds_usb_cfg(void)
  */
 static void __init mpc834x_mds_setup_arch(void)
 {
-       if (ppc_md.progress)
-               ppc_md.progress("mpc834x_mds_setup_arch()", 0);
-
-       mpc83xx_setup_pci();
+       mpc83xx_setup_arch();
 
        mpc834xemds_usb_cfg();
 }
index dacf4c2..4fc3051 100644 (file)
@@ -66,8 +66,7 @@ static void __init mpc836x_mds_setup_arch(void)
        struct device_node *np;
        u8 __iomem *bcsr_regs = NULL;
 
-       if (ppc_md.progress)
-               ppc_md.progress("mpc836x_mds_setup_arch()", 0);
+       mpc83xx_setup_arch();
 
        /* Map BCSR area */
        np = of_find_node_by_name(NULL, "bcsr");
@@ -79,8 +78,6 @@ static void __init mpc836x_mds_setup_arch(void)
                of_node_put(np);
        }
 
-       mpc83xx_setup_pci();
-
 #ifdef CONFIG_QUICC_ENGINE
        if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
                par_io_init(np);
index cf67ac9..93f024f 100644 (file)
@@ -31,10 +31,7 @@ machine_device_initcall(mpc836x_rdk, mpc83xx_declare_of_platform_devices);
 
 static void __init mpc836x_rdk_setup_arch(void)
 {
-       if (ppc_md.progress)
-               ppc_md.progress("mpc836x_rdk_setup_arch()", 0);
-
-       mpc83xx_setup_pci();
+       mpc83xx_setup_arch();
 }
 
 /*
index 652b97d..3b34cc1 100644 (file)
@@ -79,10 +79,7 @@ out:
  */
 static void __init mpc837x_mds_setup_arch(void)
 {
-       if (ppc_md.progress)
-               ppc_md.progress("mpc837x_mds_setup_arch()", 0);
-
-       mpc83xx_setup_pci();
+       mpc83xx_setup_arch();
        mpc837xmds_usb_cfg();
 }
 
index 667731d..0c55fa6 100644 (file)
@@ -50,10 +50,7 @@ static void mpc837x_rdb_sd_cfg(void)
  */
 static void __init mpc837x_rdb_setup_arch(void)
 {
-       if (ppc_md.progress)
-               ppc_md.progress("mpc837x_rdb_setup_arch()", 0);
-
-       mpc83xx_setup_pci();
+       mpc83xx_setup_arch();
        mpc837x_usb_cfg();
        mpc837x_rdb_sd_cfg();
 }
index ad48419..636eb9d 100644 (file)
@@ -86,5 +86,6 @@ extern void mpc83xx_setup_pci(void);
 #endif
 
 extern int mpc83xx_declare_of_platform_devices(void);
+extern void mpc83xx_setup_arch(void);
 
 #endif                         /* __MPC83XX_H__ */
index b867e88..cb4bdab 100644 (file)
  */
 static void __init sbc834x_setup_arch(void)
 {
-       if (ppc_md.progress)
-               ppc_md.progress("sbc834x_setup_arch()", 0);
-
-       mpc83xx_setup_pci();
+       mpc83xx_setup_arch();
 }
 
 machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices);
index df25a3e..9dc1d28 100644 (file)
@@ -72,7 +72,7 @@ config MPC85xx_CDS
 config MPC85xx_MDS
        bool "Freescale MPC85xx MDS"
        select DEFAULT_UIMAGE
-       select PHYLIB
+       select PHYLIB if NETDEVICES
        select HAS_RAPIDIO
        select SWIOTLB
        help
index 07dd6ae..d2f4556 100644 (file)
@@ -72,7 +72,6 @@ define_machine(bsc9132_qds) {
        .pcibios_fixup_bus      = fsl_pcibios_fixup_bus,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index e48f671..0ffdb4a 100644 (file)
@@ -59,7 +59,6 @@ define_machine(bsc9131_rdb) {
        .setup_arch             = bsc913x_rdb_setup_arch,
        .init_IRQ               = bsc913x_rdb_pic_init,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 3b9e3f0..4df1b40 100644 (file)
@@ -65,7 +65,6 @@ define_machine(c293_pcie) {
        .setup_arch             = c293_pcie_setup_arch,
        .init_IRQ               = c293_pcie_pic_init,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 3a6a84f..1179115 100644 (file)
@@ -225,7 +225,6 @@ define_machine(corenet_generic) {
 #else
        .get_irq                = mpic_get_coreint_irq,
 #endif
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 #ifdef CONFIG_PPC64
index 14af36a..f29c6f0 100644 (file)
@@ -215,7 +215,6 @@ define_machine(ge_imp3a) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 6ba687f..94a7f92 100644 (file)
@@ -77,7 +77,6 @@ define_machine(mpc8536_ds) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 8756715..f3e055f 100644 (file)
@@ -170,7 +170,6 @@ define_machine(mpc85xx_ads) {
        .init_IRQ               = mpc85xx_ads_pic_init,
        .show_cpuinfo           = mpc85xx_ads_show_cpuinfo,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 86f2015..224db30 100644 (file)
@@ -83,7 +83,8 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
                return PCIBIOS_SUCCESSFUL;
 }
 
-static void __noreturn mpc85xx_cds_restart(char *cmd)
+static int mpc85xx_cds_restart(struct notifier_block *this,
+                              unsigned long mode, void *cmd)
 {
        struct pci_dev *dev;
        u_char tmp;
@@ -108,12 +109,25 @@ static void __noreturn mpc85xx_cds_restart(char *cmd)
        }
 
        /*
-        *  If we can't find the VIA chip (maybe the P2P bridge is disabled)
-        *  or the VIA chip reset didn't work, just use the default reset.
+        *  If we can't find the VIA chip (maybe the P2P bridge is
+        *  disabled) or the VIA chip reset didn't work, just return
+        *  and let default reset sequence happen.
         */
-       fsl_rstcr_restart(NULL);
+       return NOTIFY_DONE;
 }
 
+static int mpc85xx_cds_restart_register(void)
+{
+       static struct notifier_block restart_handler;
+
+       restart_handler.notifier_call = mpc85xx_cds_restart;
+       restart_handler.priority = 192;
+
+       return register_restart_handler(&restart_handler);
+}
+machine_arch_initcall(mpc85xx_cds, mpc85xx_cds_restart_register);
+
+
 static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
 {
        u_char c;
@@ -380,11 +394,8 @@ define_machine(mpc85xx_cds) {
        .show_cpuinfo   = mpc85xx_cds_show_cpuinfo,
        .get_irq        = mpic_get_irq,
 #ifdef CONFIG_PCI
-       .restart        = mpc85xx_cds_restart,
        .pcibios_fixup_bus      = mpc85xx_cds_fixup_bus,
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
-#else
-       .restart        = fsl_rstcr_restart,
 #endif
        .calibrate_decr = generic_calibrate_decr,
        .progress       = udbg_progress,
index ed69c7e..dc9e035 100644 (file)
@@ -204,7 +204,6 @@ define_machine(mpc8544_ds) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -219,7 +218,6 @@ define_machine(mpc8572_ds) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -234,7 +232,6 @@ define_machine(p2020_ds) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index fa9cd71..d7e440e 100644 (file)
@@ -63,6 +63,8 @@
 #define DBG(fmt...)
 #endif
 
+#if IS_BUILTIN(CONFIG_PHYLIB)
+
 #define MV88E1111_SCR  0x10
 #define MV88E1111_SCR_125CLK   0x0010
 static int mpc8568_fixup_125_clock(struct phy_device *phydev)
@@ -152,6 +154,8 @@ static int mpc8568_mds_phy_fixups(struct phy_device *phydev)
        return err;
 }
 
+#endif
+
 /* ************************************************************************
  *
  * Setup the architecture
@@ -313,6 +317,7 @@ static void __init mpc85xx_mds_setup_arch(void)
        swiotlb_detect_4g();
 }
 
+#if IS_BUILTIN(CONFIG_PHYLIB)
 
 static int __init board_fixups(void)
 {
@@ -342,9 +347,12 @@ static int __init board_fixups(void)
 
        return 0;
 }
+
 machine_arch_initcall(mpc8568_mds, board_fixups);
 machine_arch_initcall(mpc8569_mds, board_fixups);
 
+#endif
+
 static int __init mpc85xx_publish_devices(void)
 {
        if (machine_is(mpc8568_mds))
@@ -385,7 +393,6 @@ define_machine(mpc8568_mds) {
        .setup_arch     = mpc85xx_mds_setup_arch,
        .init_IRQ       = mpc85xx_mds_pic_init,
        .get_irq        = mpic_get_irq,
-       .restart        = fsl_rstcr_restart,
        .calibrate_decr = generic_calibrate_decr,
        .progress       = udbg_progress,
 #ifdef CONFIG_PCI
@@ -405,7 +412,6 @@ define_machine(mpc8569_mds) {
        .setup_arch     = mpc85xx_mds_setup_arch,
        .init_IRQ       = mpc85xx_mds_pic_init,
        .get_irq        = mpic_get_irq,
-       .restart        = fsl_rstcr_restart,
        .calibrate_decr = generic_calibrate_decr,
        .progress       = udbg_progress,
 #ifdef CONFIG_PCI
@@ -426,7 +432,6 @@ define_machine(p1021_mds) {
        .setup_arch     = mpc85xx_mds_setup_arch,
        .init_IRQ       = mpc85xx_mds_pic_init,
        .get_irq        = mpic_get_irq,
-       .restart        = fsl_rstcr_restart,
        .calibrate_decr = generic_calibrate_decr,
        .progress       = udbg_progress,
 #ifdef CONFIG_PCI
@@ -434,4 +439,3 @@ define_machine(p1021_mds) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 };
-
index c1499cb..1006950 100644 (file)
@@ -213,7 +213,6 @@ define_machine(p2020_rdb) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -228,7 +227,6 @@ define_machine(p1020_rdb) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -243,7 +241,6 @@ define_machine(p1021_rdb_pc) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -258,7 +255,6 @@ define_machine(p2020_rdb_pc) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -273,7 +269,6 @@ define_machine(p1025_rdb) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -288,7 +283,6 @@ define_machine(p1020_mbg_pc) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -303,7 +297,6 @@ define_machine(p1020_utm_pc) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -318,7 +311,6 @@ define_machine(p1020_rdb_pc) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -333,7 +325,6 @@ define_machine(p1020_rdb_pd) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -348,7 +339,6 @@ define_machine(p1024_rdb) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index acc3d0d..d5af072 100644 (file)
@@ -66,7 +66,6 @@ define_machine(mvme2500) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 661d7b5..78d13b3 100644 (file)
@@ -79,7 +79,6 @@ define_machine(p1010_rdb) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 63568d6..0908abd 100644 (file)
@@ -568,7 +568,6 @@ define_machine(p1022_ds) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 2f29436..276e00a 100644 (file)
@@ -148,7 +148,6 @@ define_machine(p1022_rdk) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 40d8de5..3e8cd03 100644 (file)
@@ -110,7 +110,6 @@ define_machine(p1023_rdb) {
        .setup_arch             = mpc85xx_rdb_setup_arch,
        .init_IRQ               = mpc85xx_rdb_pic_init,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 #ifdef CONFIG_PCI
index 2410167..33c5ba6 100644 (file)
@@ -91,7 +91,6 @@ define_machine(ppa8548) {
        .init_IRQ       = ppa8548_pic_init,
        .show_cpuinfo   = ppa8548_show_cpuinfo,
        .get_irq        = mpic_get_irq,
-       .restart        = fsl_rstcr_restart,
        .calibrate_decr = generic_calibrate_decr,
        .progress       = udbg_progress,
 };
index 50d7458..b63a854 100644 (file)
@@ -77,7 +77,6 @@ define_machine(qemu_e500) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_coreint_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 62b6c45..2c67084 100644 (file)
@@ -130,7 +130,6 @@ define_machine(sbc8548) {
        .init_IRQ       = sbc8548_pic_init,
        .show_cpuinfo   = sbc8548_show_cpuinfo,
        .get_irq        = mpic_get_irq,
-       .restart        = fsl_rstcr_restart,
 #ifdef CONFIG_PCI
        .pcibios_fixup_bus      = fsl_pcibios_fixup_bus,
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
index 79fd0df..21d6aaa 100644 (file)
@@ -38,18 +38,18 @@ static void gpio_halt_wfn(struct work_struct *work)
 }
 static DECLARE_WORK(gpio_halt_wq, gpio_halt_wfn);
 
-static void gpio_halt_cb(void)
+static void __noreturn gpio_halt_cb(void)
 {
        enum of_gpio_flags flags;
        int trigger, gpio;
 
        if (!halt_node)
-               return;
+               panic("No reset GPIO information was provided in DT\n");
 
        gpio = of_get_gpio_flags(halt_node, 0, &flags);
 
        if (!gpio_is_valid(gpio))
-               return;
+               panic("Provided GPIO is invalid\n");
 
        trigger = (flags == OF_GPIO_ACTIVE_LOW);
 
@@ -57,6 +57,8 @@ static void gpio_halt_cb(void)
 
        /* Probably wont return */
        gpio_set_value(gpio, trigger);
+
+       panic("Halt failed\n");
 }
 
 /* This IRQ means someone pressed the power button and it is waiting for us
index cd255ac..8da4ed9 100644 (file)
@@ -91,7 +91,6 @@ define_machine(socrates) {
        .setup_arch             = socrates_setup_arch,
        .init_IRQ               = socrates_pic_init,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 91b824c..1a1d44e 100644 (file)
@@ -103,7 +103,6 @@ define_machine(stx_gp3) {
        .init_IRQ               = stx_gp3_pic_init,
        .show_cpuinfo           = stx_gp3_show_cpuinfo,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index b7c5445..9fc20a3 100644 (file)
@@ -132,7 +132,6 @@ define_machine(tqm85xx) {
        .init_IRQ               = tqm85xx_pic_init,
        .show_cpuinfo           = tqm85xx_show_cpuinfo,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index 1bc02a8..360f625 100644 (file)
@@ -140,7 +140,6 @@ define_machine(twr_p1025) {
        .pcibios_fixup_bus      = fsl_pcibios_fixup_bus,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index cf0c70f..cd6ce84 100644 (file)
@@ -167,7 +167,6 @@ define_machine(xes_mpc8572) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -182,7 +181,6 @@ define_machine(xes_mpc8548) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
@@ -197,7 +195,6 @@ define_machine(xes_mpc8540) {
        .pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
 };
index ef684af..6b99300 100644 (file)
@@ -204,7 +204,6 @@ define_machine(gef_ppc9a) {
        .init_IRQ               = gef_ppc9a_init_irq,
        .show_cpuinfo           = gef_ppc9a_show_cpuinfo,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .time_init              = mpc86xx_time_init,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
index 67dd0c2..8cdeca0 100644 (file)
@@ -191,7 +191,6 @@ define_machine(gef_sbc310) {
        .init_IRQ               = gef_sbc310_init_irq,
        .show_cpuinfo           = gef_sbc310_show_cpuinfo,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .time_init              = mpc86xx_time_init,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
index 8050269..da8723a 100644 (file)
@@ -181,7 +181,6 @@ define_machine(gef_sbc610) {
        .init_IRQ               = gef_sbc610_init_irq,
        .show_cpuinfo           = gef_sbc610_show_cpuinfo,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .time_init              = mpc86xx_time_init,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
index fef0582..a5d73fa 100644 (file)
@@ -331,7 +331,6 @@ define_machine(mpc86xx_hpcd) {
        .setup_arch             = mpc86xx_hpcd_setup_arch,
        .init_IRQ               = mpc86xx_init_irq,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .time_init              = mpc86xx_time_init,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
index 5ae42a0..a0e989e 100644 (file)
@@ -130,7 +130,6 @@ define_machine(mpc86xx_hpcn) {
        .init_IRQ               = mpc86xx_init_irq,
        .show_cpuinfo           = mpc86xx_hpcn_show_cpuinfo,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .time_init              = mpc86xx_time_init,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
index addb41e..835352e 100644 (file)
@@ -111,7 +111,6 @@ define_machine(mvme7100) {
        .setup_arch             = mvme7100_setup_arch,
        .init_IRQ               = mpc86xx_init_irq,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .time_init              = mpc86xx_time_init,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
index 52af573..93db35d 100644 (file)
@@ -82,7 +82,6 @@ define_machine(sbc8641) {
        .init_IRQ               = mpc86xx_init_irq,
        .show_cpuinfo           = sbc8641_show_cpuinfo,
        .get_irq                = mpic_get_irq,
-       .restart                = fsl_rstcr_restart,
        .time_init              = mpc86xx_time_init,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
index 86707e6..aa35245 100644 (file)
@@ -393,7 +393,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
                                             unsigned long *vpn, int count,
                                             int psize, int ssize)
 {
-       unsigned long param[8];
+       unsigned long param[PLPAR_HCALL9_BUFSIZE];
        int i = 0, pix = 0, rc;
        unsigned long flags = 0;
        int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -522,7 +522,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
        unsigned long flags = 0;
        struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
        int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
-       unsigned long param[9];
+       unsigned long param[PLPAR_HCALL9_BUFSIZE];
        unsigned long hash, index, shift, hidx, slot;
        real_pte_t pte;
        int psize, ssize;
index 3c0eb9b..986cd11 100644 (file)
@@ -233,8 +233,6 @@ void __init cpm_reset(void)
        else
                out_be32(&siu_conf->sc_sdcr, 1);
        immr_unmap(siu_conf);
-
-       cpm_muram_init();
 }
 
 static DEFINE_SPINLOCK(cmd_lock);
index 8dc1e24..f78ff84 100644 (file)
@@ -66,10 +66,6 @@ void __init cpm2_reset(void)
        cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE);
 #endif
 
-       /* Reclaim the DP memory for our use.
-        */
-       cpm_muram_init();
-
        /* Tell everyone where the comm processor resides.
         */
        cpmp = &cpm2_immr->im_cpm;
index 947f420..51bf749 100644 (file)
 #include <linux/of_gpio.h>
 #endif
 
+static int __init cpm_init(void)
+{
+       struct device_node *np;
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
+       if (!np)
+               np = of_find_compatible_node(NULL, NULL, "fsl,cpm2");
+       if (!np)
+               return -ENODEV;
+       cpm_muram_init();
+       of_node_put(np);
+       return 0;
+}
+subsys_initcall(cpm_init);
+
 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
 static u32 __iomem *cpm_udbg_txdesc;
 static u8 __iomem *cpm_udbg_txbuf;
index 0ef9df4..d3a5974 100644 (file)
@@ -111,8 +111,7 @@ static struct pci_ops fsl_indirect_pcie_ops =
        .write = indirect_write_config,
 };
 
-#define MAX_PHYS_ADDR_BITS     40
-static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
+static u64 pci64_dma_offset;
 
 #ifdef CONFIG_SWIOTLB
 static void setup_swiotlb_ops(struct pci_controller *hose)
@@ -132,12 +131,10 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
                return -EIO;
 
        /*
-        * Fixup PCI devices that are able to DMA to above the physical
-        * address width of the SoC such that we can address any internal
-        * SoC address from across PCI if needed
+        * Fix up PCI devices that are able to DMA to the large inbound
+        * mapping that allows addressing any RAM address from across PCI.
         */
-       if ((dev_is_pci(dev)) &&
-           dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) {
+       if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
                set_dma_ops(dev, &dma_direct_ops);
                set_dma_offset(dev, pci64_dma_offset);
        }
@@ -387,6 +384,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
                                mem_log++;
 
                        piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
+                       pci64_dma_offset = 1ULL << mem_log;
 
                        if (setup_inbound) {
                                /* Setup inbound memory window */
index a09ca70..d93056e 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/fsl_devices.h>
 #include <linux/fs_enet_pd.h>
 #include <linux/fs_uart_pd.h>
+#include <linux/reboot.h>
 
 #include <linux/atomic.h>
 #include <asm/io.h>
@@ -180,23 +181,38 @@ EXPORT_SYMBOL(get_baudrate);
 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 static __be32 __iomem *rstcr;
 
+static int fsl_rstcr_restart(struct notifier_block *this,
+                            unsigned long mode, void *cmd)
+{
+       local_irq_disable();
+       /* set reset control register */
+       out_be32(rstcr, 0x2);   /* HRESET_REQ */
+
+       return NOTIFY_DONE;
+}
+
 static int __init setup_rstcr(void)
 {
        struct device_node *np;
 
+       static struct notifier_block restart_handler = {
+               .notifier_call = fsl_rstcr_restart,
+               .priority = 128,
+       };
+
        for_each_node_by_name(np, "global-utilities") {
                if ((of_get_property(np, "fsl,has-rstcr", NULL))) {
                        rstcr = of_iomap(np, 0) + 0xb0;
-                       if (!rstcr)
+                       if (!rstcr) {
                                printk (KERN_ERR "Error: reset control "
                                                "register not mapped!\n");
+                       } else {
+                               register_restart_handler(&restart_handler);
+                       }
                        break;
                }
        }
 
-       if (!rstcr && ppc_md.restart == fsl_rstcr_restart)
-               printk(KERN_ERR "No RSTCR register, warm reboot won't work\n");
-
        of_node_put(np);
 
        return 0;
@@ -204,15 +220,6 @@ static int __init setup_rstcr(void)
 
 arch_initcall(setup_rstcr);
 
-void __noreturn fsl_rstcr_restart(char *cmd)
-{
-       local_irq_disable();
-       if (rstcr)
-               /* set reset control register */
-               out_be32(rstcr, 0x2);   /* HRESET_REQ */
-
-       while (1) ;
-}
 #endif
 
 #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
index 433566a..d73daa4 100644 (file)
@@ -19,8 +19,6 @@ extern u32 fsl_get_sys_freq(void);
 struct spi_board_info;
 struct device_node;
 
-extern void __noreturn fsl_rstcr_restart(char *cmd);
-
 /* The different ports that the DIU can be connected to */
 enum fsl_diu_monitor_port {
        FSL_DIU_PORT_DVI,       /* DVI */
index 4d48cec..b9aac95 100644 (file)
@@ -1249,7 +1249,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
        /* Pick the physical address from the device tree if unspecified */
        if (!phys_addr) {
                /* Check if it is DCR-based */
-               if (of_get_property(node, "dcr-reg", NULL)) {
+               if (of_property_read_bool(node, "dcr-reg")) {
                        flags |= MPIC_USES_DCR;
                } else {
                        struct resource r;
index fe42a2f..e6e90e8 100644 (file)
@@ -1,6 +1,7 @@
 menu "SOC (System On Chip) specific Drivers"
 
 source "drivers/soc/bcm/Kconfig"
+source "drivers/soc/fsl/qbman/Kconfig"
 source "drivers/soc/fsl/qe/Kconfig"
 source "drivers/soc/mediatek/Kconfig"
 source "drivers/soc/qcom/Kconfig"
index 203307f..75e1f53 100644 (file)
@@ -2,5 +2,6 @@
 # Makefile for the Linux Kernel SOC fsl specific device drivers
 #
 
+obj-$(CONFIG_FSL_DPAA)                 += qbman/
 obj-$(CONFIG_QUICC_ENGINE)             += qe/
 obj-$(CONFIG_CPM)                      += qe/
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644 (file)
index 0000000..757033c
--- /dev/null
@@ -0,0 +1,67 @@
+menuconfig FSL_DPAA
+       bool "Freescale DPAA 1.x support"
+       depends on FSL_SOC_BOOKE
+       select GENERIC_ALLOCATOR
+       help
+         The Freescale Data Path Acceleration Architecture (DPAA) is a set of
+         hardware components on specific QorIQ multicore processors.
+         This architecture provides the infrastructure to support simplified
+         sharing of networking interfaces and accelerators by multiple CPUs.
+         The major h/w blocks composing DPAA are BMan and QMan.
+
+         The Buffer Manager (BMan) is a hardware buffer pool management block
+         that allows software and accelerators on the datapath to acquire and
+         release buffers in order to build frames.
+
+         The Queue Manager (QMan) is a hardware queue management block
+         that allows software and accelerators on the datapath to enqueue and
+         dequeue frames in order to communicate.
+
+if FSL_DPAA
+
+config FSL_DPAA_CHECKING
+       bool "Additional driver checking"
+       help
+         Compiles in additional checks, to sanity-check the drivers and
+         any use of the exported API. Not recommended for performance.
+
+config FSL_BMAN_TEST
+       tristate "BMan self-tests"
+       help
+         Compile the BMan self-test code. These tests will
+         exercise the BMan APIs to confirm functionality
+         of both the software drivers and hardware device.
+
+config FSL_BMAN_TEST_API
+       bool "High-level API self-test"
+       depends on FSL_BMAN_TEST
+       default y
+       help
+         This requires the presence of cpu-affine portals, and performs
+         high-level API testing with them (whichever portal(s) are affine
+         to the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST
+       tristate "QMan self-tests"
+       help
+         Compile self-test code for QMan.
+
+config FSL_QMAN_TEST_API
+       bool "QMan high-level self-test"
+       depends on FSL_QMAN_TEST
+       default y
+       help
+         This requires the presence of cpu-affine portals, and performs
+         high-level API testing with them (whichever portal(s) are affine to
+         the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST_STASH
+       bool "QMan 'hot potato' data-stashing self-test"
+       depends on FSL_QMAN_TEST
+       default y
+       help
+         This performs a "hot potato" style test enqueuing/dequeuing a frame
+         across a series of FQs scheduled to different portals (and cpus), with
+         DQRR, data and context stashing always on.
+
+endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644 (file)
index 0000000..7ae199f
--- /dev/null
@@ -0,0 +1,12 @@
+obj-$(CONFIG_FSL_DPAA)                          += bman_ccsr.o qman_ccsr.o \
+                                                  bman_portal.o qman_portal.o \
+                                                  bman.o qman.o
+
+obj-$(CONFIG_FSL_BMAN_TEST)                     += bman-test.o
+bman-test-y                                      = bman_test.o
+bman-test-$(CONFIG_FSL_BMAN_TEST_API)           += bman_test_api.o
+
+obj-$(CONFIG_FSL_QMAN_TEST)                    += qman-test.o
+qman-test-y                                     = qman_test.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_API)          += qman_test_api.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_STASH)                += qman_test_stash.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644 (file)
index 0000000..ffa48fd
--- /dev/null
@@ -0,0 +1,797 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define IRQNAME                "BMan portal %d"
+#define MAX_IRQNAME    16      /* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH     0x0000
+#define BM_REG_RCR_CI_CINH     0x0004
+#define BM_REG_RCR_ITR         0x0008
+#define BM_REG_CFG             0x0100
+#define BM_REG_SCN(n)          (0x0200 + ((n) << 2))
+#define BM_REG_ISR             0x0e00
+#define BM_REG_IER             0x0e04
+#define BM_REG_ISDR            0x0e08
+#define BM_REG_IIR             0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR               0x0000
+#define BM_CL_RR0              0x0100
+#define BM_CL_RR1              0x0140
+#define BM_CL_RCR              0x1000
+#define BM_CL_RCR_PI_CENA      0x3000
+#define BM_CL_RCR_CI_CENA      0x3100
+
+/*
+ * Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode {            /* matches BCSP_CFG::RPM */
+       bm_rcr_pci = 0,         /* PI index, cache-inhibited */
+       bm_rcr_pce = 1,         /* PI index, cache-enabled */
+       bm_rcr_pvb = 2          /* valid-bit */
+};
+enum bm_rcr_cmode {            /* s/w-only */
+       bm_rcr_cci,             /* CI index, cache-inhibited */
+       bm_rcr_cce              /* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE            8
+
+/* Release Command */
+struct bm_rcr_entry {
+       union {
+               struct {
+                       u8 _ncw_verb; /* writes to this are non-coherent */
+                       u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+                       u8 __reserved1[62];
+               };
+               struct bm_buffer bufs[8];
+       };
+};
+#define BM_RCR_VERB_VBIT               0x80
+#define BM_RCR_VERB_CMD_MASK           0x70    /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE    0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI     0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK      0x0f    /* values 1..8 */
+
+struct bm_rcr {
+       struct bm_rcr_entry *ring, *cursor;
+       u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       u32 busy;
+       enum bm_rcr_pmode pmode;
+       enum bm_rcr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+struct bm_mc_command {
+       u8 _ncw_verb; /* writes to this are non-coherent */
+       u8 bpid; /* used by acquire command */
+       u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT               0x80
+#define BM_MCC_VERB_CMD_MASK           0x70    /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE                0x10
+#define BM_MCC_VERB_CMD_QUERY          0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT   0x0f    /* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+       struct {
+               u8 verb;
+               u8 bpid;
+               u8 __reserved[62];
+       };
+       struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT               0x80
+#define BM_MCR_VERB_CMD_MASK           BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE                BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY          BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID    0x60
+#define BM_MCR_VERB_CMD_ERR_ECC                0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT   BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT                 10000 /* us */
+
+struct bm_mc {
+       struct bm_mc_command *cr;
+       union bm_mc_result *rr;
+       u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       enum {
+               /* Can only be _mc_start()ed */
+               mc_idle,
+               /* Can only be _mc_commit()ed or _mc_abort()ed */
+               mc_user,
+               /* Can only be _mc_retry()ed */
+               mc_hw
+       } state;
+#endif
+};
+
+struct bm_addr {
+       void __iomem *ce;       /* cache-enabled */
+       void __iomem *ci;       /* cache-inhibited */
+};
+
+struct bm_portal {
+       struct bm_addr addr;
+       struct bm_rcr rcr;
+       struct bm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+       return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+       __raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+       dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+       dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+       return __raw_readl(p->addr.ce + offset);
+}
+
+struct bman_portal {
+       struct bm_portal p;
+       /* interrupt sources processed by portal_isr(), configurable */
+       unsigned long irq_sources;
+       /* probing time config params for cpu-affine portals */
+       const struct bm_portal_config *config;
+       char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+       return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+       put_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
+ */
+struct bman_pool {
+       /* index of the buffer pool to encapsulate (0-63) */
+       u32 bpid;
+       /* Used for hash-table admin when using depletion notifications. */
+       struct bman_portal *portal;
+       struct bman_pool *next;
+};
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+       struct bman_portal *p = ptr;
+       struct bm_portal *portal = &p->p;
+       u32 clear = p->irq_sources;
+       u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+       if (unlikely(!is))
+               return IRQ_NONE;
+
+       clear |= poll_portal_slow(p, is);
+       bm_out(portal, BM_REG_ISR, clear);
+       return IRQ_HANDLED;
+}
+
+/* --- RCR API --- */
+
+#define RCR_SHIFT      ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY      (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
+{
+       uintptr_t addr = (uintptr_t)p;
+
+       addr &= ~RCR_CARRY;
+
+       return (struct bm_rcr_entry *)addr;
+}
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
+{
+       return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
+}
+#endif
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
+{
+       /* increment to the next RCR pointer and handle overflow and 'vbit' */
+       struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+       rcr->cursor = rcr_carryclear(partial);
+       if (partial != rcr->cursor)
+               rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static int bm_rcr_get_avail(struct bm_portal *portal)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+
+       return rcr->available;
+}
+
+static int bm_rcr_get_fill(struct bm_portal *portal)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+
+       return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+
+       rcr->ithresh = ithresh;
+       bm_out(portal, BM_REG_RCR_ITR, ithresh);
+}
+
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+       __maybe_unused struct bm_rcr *rcr = &portal->rcr;
+
+       DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+       bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
+}
+
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+       u8 diff, old_ci = rcr->ci;
+
+       DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+       rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+       bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+       diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+       rcr->available += diff;
+       return diff;
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+
+       DPAA_ASSERT(!rcr->busy);
+       if (!rcr->available)
+               return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       rcr->busy = 1;
+#endif
+       dpaa_zero(rcr->cursor);
+       return rcr->cursor;
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+       struct bm_rcr_entry *rcursor;
+
+       DPAA_ASSERT(rcr->busy);
+       DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+       DPAA_ASSERT(rcr->available >= 1);
+       dma_wmb();
+       rcursor = rcr->cursor;
+       rcursor->_ncw_verb = myverb | rcr->vbit;
+       dpaa_flush(rcursor);
+       rcr_inc(rcr);
+       rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       rcr->busy = 0;
+#endif
+}
+
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+                      enum bm_rcr_cmode cmode)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+       u32 cfg;
+       u8 pi;
+
+       rcr->ring = portal->addr.ce + BM_CL_RCR;
+       rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+       pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+       rcr->cursor = rcr->ring + pi;
+       rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+               BM_RCR_VERB_VBIT : 0;
+       rcr->available = BM_RCR_SIZE - 1
+               - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+       rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       rcr->busy = 0;
+       rcr->pmode = pmode;
+       rcr->cmode = cmode;
+#endif
+       cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+               | (pmode & 0x3); /* BCSP_CFG::RPM */
+       bm_out(portal, BM_REG_CFG, cfg);
+       return 0;
+}
+
+static void bm_rcr_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       struct bm_rcr *rcr = &portal->rcr;
+       int i;
+
+       DPAA_ASSERT(!rcr->busy);
+
+       i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+       if (i != rcr_ptr2idx(rcr->cursor))
+               pr_crit("losing uncommited RCR entries\n");
+
+       i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+       if (i != rcr->ci)
+               pr_crit("missing existing RCR completions\n");
+       if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+               pr_crit("RCR destroyed unquiesced\n");
+#endif
+}
+
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
+{
+       struct bm_mc *mc = &portal->mc;
+
+       mc->cr = portal->addr.ce + BM_CL_CR;
+       mc->rr = portal->addr.ce + BM_CL_RR0;
+       mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
+                   0 : 1;
+       mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = mc_idle;
+#endif
+       return 0;
+}
+
+static void bm_mc_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       struct bm_mc *mc = &portal->mc;
+
+       DPAA_ASSERT(mc->state == mc_idle);
+       if (mc->state != mc_idle)
+               pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+       struct bm_mc *mc = &portal->mc;
+
+       DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = mc_user;
+#endif
+       dpaa_zero(mc->cr);
+       return mc->cr;
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+       struct bm_mc *mc = &portal->mc;
+       union bm_mc_result *rr = mc->rr + mc->rridx;
+
+       DPAA_ASSERT(mc->state == mc_user);
+       dma_wmb();
+       mc->cr->_ncw_verb = myverb | mc->vbit;
+       dpaa_flush(mc->cr);
+       dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = mc_hw;
+#endif
+}
+
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+       struct bm_mc *mc = &portal->mc;
+       union bm_mc_result *rr = mc->rr + mc->rridx;
+
+       DPAA_ASSERT(mc->state == mc_hw);
+       /*
+        * The inactive response register's verb byte always returns zero until
+        * its command is submitted and completed. This includes the valid-bit,
+        * in case you were wondering...
+        */
+       if (!__raw_readb(&rr->verb)) {
+               dpaa_invalidate_touch_ro(rr);
+               return NULL;
+       }
+       mc->rridx ^= 1;
+       mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = mc_idle;
+#endif
+       return rr;
+}
+
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+                                      union bm_mc_result **mcr)
+{
+       int timeout = BM_MCR_TIMEOUT;
+
+       do {
+               *mcr = bm_mc_result(portal);
+               if (*mcr)
+                       break;
+               udelay(1);
+       } while (--timeout);
+
+       return timeout;
+}
+
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
+{
+       bm_out(portal, BM_REG_SCN(0), 0);
+       bm_out(portal, BM_REG_SCN(1), 0);
+}
+
+static int bman_create_portal(struct bman_portal *portal,
+                             const struct bm_portal_config *c)
+{
+       struct bm_portal *p;
+       int ret;
+
+       p = &portal->p;
+       /*
+        * prep the low-level portal struct with the mapped addresses from the
+        * config, everything that follows depends on it and "config" is more
+        * for (de)reference...
+        */
+       p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+       p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+       if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+               dev_err(c->dev, "RCR initialisation failed\n");
+               goto fail_rcr;
+       }
+       if (bm_mc_init(p)) {
+               dev_err(c->dev, "MC initialisation failed\n");
+               goto fail_mc;
+       }
+       /*
+        * Default to all BPIDs disabled, we enable as required at
+        * run-time.
+        */
+       bm_isr_bscn_disable(p);
+
+       /* Write-to-clear any stale interrupt status bits */
+       bm_out(p, BM_REG_ISDR, 0xffffffff);
+       portal->irq_sources = 0;
+       bm_out(p, BM_REG_IER, 0);
+       bm_out(p, BM_REG_ISR, 0xffffffff);
+       snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+       if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+               dev_err(c->dev, "request_irq() failed\n");
+               goto fail_irq;
+       }
+       if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+           irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+               dev_err(c->dev, "irq_set_affinity() failed\n");
+               goto fail_affinity;
+       }
+
+       /* Need RCR to be empty before continuing */
+       ret = bm_rcr_get_fill(p);
+       if (ret) {
+               dev_err(c->dev, "RCR unclean\n");
+               goto fail_rcr_empty;
+       }
+       /* Success */
+       portal->config = c;
+
+       bm_out(p, BM_REG_ISDR, 0);
+       bm_out(p, BM_REG_IIR, 0);
+
+       return 0;
+
+fail_rcr_empty:
+fail_affinity:
+       free_irq(c->irq, portal);
+fail_irq:
+       bm_mc_finish(p);
+fail_mc:
+       bm_rcr_finish(p);
+fail_rcr:
+       return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
+{
+       struct bman_portal *portal;
+       int err;
+
+       portal = &per_cpu(bman_affine_portal, c->cpu);
+       err = bman_create_portal(portal, c);
+       if (err)
+               return NULL;
+
+       spin_lock(&affine_mask_lock);
+       cpumask_set_cpu(c->cpu, &affine_mask);
+       spin_unlock(&affine_mask_lock);
+
+       return portal;
+}
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+       u32 ret = is;
+
+       if (is & BM_PIRQ_RCRI) {
+               bm_rcr_cce_update(&p->p);
+               bm_rcr_set_ithresh(&p->p, 0);
+               bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+               is &= ~BM_PIRQ_RCRI;
+       }
+
+       /* There should be no status register bits left undefined */
+       DPAA_ASSERT(!is);
+       return ret;
+}
+
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+       unsigned long irqflags;
+
+       local_irq_save(irqflags);
+       set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+       bm_out(&p->p, BM_REG_IER, p->irq_sources);
+       local_irq_restore(irqflags);
+       return 0;
+}
+
+static int bm_shutdown_pool(u32 bpid)
+{
+       struct bm_mc_command *bm_cmd;
+       union bm_mc_result *bm_res;
+
+       while (1) {
+               struct bman_portal *p = get_affine_portal();
+               /* Acquire buffers until empty */
+               bm_cmd = bm_mc_start(&p->p);
+               bm_cmd->bpid = bpid;
+               bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+               if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+                       put_affine_portal();
+                       pr_crit("BMan Acquire Command timedout\n");
+                       return -ETIMEDOUT;
+               }
+               if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+                       put_affine_portal();
+                       /* Pool is empty */
+                       return 0;
+               }
+               put_affine_portal();
+       }
+
+       return 0;
+}
+
+struct gen_pool *bm_bpalloc;
+
+static int bm_alloc_bpid_range(u32 *result, u32 count)
+{
+       unsigned long addr;
+
+       addr = gen_pool_alloc(bm_bpalloc, count);
+       if (!addr)
+               return -ENOMEM;
+
+       *result = addr & ~DPAA_GENALLOC_OFF;
+
+       return 0;
+}
+
+static int bm_release_bpid(u32 bpid)
+{
+       int ret;
+
+       ret = bm_shutdown_pool(bpid);
+       if (ret) {
+               pr_debug("BPID %d leaked\n", bpid);
+               return ret;
+       }
+
+       gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+       return 0;
+}
+
+struct bman_pool *bman_new_pool(void)
+{
+       struct bman_pool *pool = NULL;
+       u32 bpid;
+
+       if (bm_alloc_bpid_range(&bpid, 1))
+               return NULL;
+
+       pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool)
+               goto err;
+
+       pool->bpid = bpid;
+
+       return pool;
+err:
+       bm_release_bpid(bpid);
+       kfree(pool);
+       return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+       bm_release_bpid(pool->bpid);
+
+       kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+int bman_get_bpid(const struct bman_pool *pool)
+{
+       return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+       if (avail)
+               bm_rcr_cce_prefetch(&p->p);
+       else
+               bm_rcr_cce_update(&p->p);
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+       struct bman_portal *p;
+       struct bm_rcr_entry *r;
+       unsigned long irqflags;
+       int avail, timeout = 1000; /* 1ms */
+       int i = num - 1;
+
+       DPAA_ASSERT(num > 0 && num <= 8);
+
+       do {
+               p = get_affine_portal();
+               local_irq_save(irqflags);
+               avail = bm_rcr_get_avail(&p->p);
+               if (avail < 2)
+                       update_rcr_ci(p, avail);
+               r = bm_rcr_start(&p->p);
+               local_irq_restore(irqflags);
+               put_affine_portal();
+               if (likely(r))
+                       break;
+
+               udelay(1);
+       } while (--timeout);
+
+       if (unlikely(!timeout))
+               return -ETIMEDOUT;
+
+       p = get_affine_portal();
+       local_irq_save(irqflags);
+       /*
+        * we can copy all but the first entry, as this can trigger badness
+        * with the valid-bit
+        */
+       bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+       bm_buffer_set_bpid(r->bufs, pool->bpid);
+       if (i)
+               memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+       bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+                         (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+       local_irq_restore(irqflags);
+       put_affine_portal();
+       return 0;
+}
+EXPORT_SYMBOL(bman_release);
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
+{
+       struct bman_portal *p = get_affine_portal();
+       struct bm_mc_command *mcc;
+       union bm_mc_result *mcr;
+       int ret;
+
+       DPAA_ASSERT(num > 0 && num <= 8);
+
+       mcc = bm_mc_start(&p->p);
+       mcc->bpid = pool->bpid;
+       bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+                    (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+       if (!bm_mc_result_timeout(&p->p, &mcr)) {
+               put_affine_portal();
+               pr_crit("BMan Acquire Timeout\n");
+               return -ETIMEDOUT;
+       }
+       ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+       if (bufs)
+               memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
+
+       put_affine_portal();
+       if (ret != num)
+               ret = -ENOMEM;
+       return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+       return portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644 (file)
index 0000000..9deb052
--- /dev/null
@@ -0,0 +1,263 @@
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC           0x0800
+#define REG_ECSR               0x0a00
+#define REG_ECIR               0x0a04
+#define REG_EADR               0x0a08
+#define REG_EDATA(n)           (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)            (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1           0x0bf8
+#define REG_IP_REV_2           0x0bfc
+#define REG_FBPR_BARE          0x0c00
+#define REG_FBPR_BAR           0x0c04
+#define REG_FBPR_AR            0x0c10
+#define REG_SRCIDR             0x0d04
+#define REG_LIODNR             0x0d08
+#define REG_ERR_ISR            0x0e00
+#define REG_ERR_IER            0x0e04
+#define REG_ERR_ISDR           0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI   0x00000010      /* Invalid Command Verb */
+#define BM_EIRQ_FLWI   0x00000008      /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI   0x00000004      /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI   0x00000002      /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN   0x00000001      /* pool State Change Notification */
+
+struct bman_hwerr_txt {
+       u32 mask;
+       const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+       { BM_EIRQ_IVCI, "Invalid Command Verb" },
+       { BM_EIRQ_FLWI, "FBPR Low Watermark" },
+       { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+       { BM_EIRQ_SBEI, "Single-bit ECC Error" },
+       { BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+       return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+       iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+       u32 v = bm_ccsr_in(REG_IP_REV_1);
+       *id = (v >> 16);
+       *major = (v >> 8) & 0xff;
+       *minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+static void bm_set_memory(u64 ba, u32 size)
+{
+       u32 exp = ilog2(size);
+       /* choke if size isn't within range */
+       DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+                  is_power_of_2(size));
+       /* choke if '[e]ba' has lower-alignment than 'size' */
+       DPAA_ASSERT(!(ba & (size - 1)));
+       bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+       bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+       bm_ccsr_out(REG_FBPR_AR, exp - 1);
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved).  Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+       fbpr_a = rmem->base;
+       fbpr_sz = rmem->size;
+
+       WARN_ON(!(fbpr_a && fbpr_sz));
+
+       return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+       u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+       struct device *dev = ptr;
+
+       ier_val = bm_ccsr_in(REG_ERR_IER);
+       isr_val = bm_ccsr_in(REG_ERR_ISR);
+       ecsr_val = bm_ccsr_in(REG_ECSR);
+       isr_mask = isr_val & ier_val;
+
+       if (!isr_mask)
+               return IRQ_NONE;
+
+       for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+               if (bman_hwerr_txts[i].mask & isr_mask) {
+                       dev_err_ratelimited(dev, "ErrInt: %s\n",
+                                           bman_hwerr_txts[i].txt);
+                       if (bman_hwerr_txts[i].mask & ecsr_val) {
+                               /* Re-arm error capture registers */
+                               bm_ccsr_out(REG_ECSR, ecsr_val);
+                       }
+                       if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+                               dev_dbg(dev, "Disabling error 0x%x\n",
+                                       bman_hwerr_txts[i].mask);
+                               ier_val &= ~bman_hwerr_txts[i].mask;
+                               bm_ccsr_out(REG_ERR_IER, ier_val);
+                       }
+               }
+       }
+       bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+       return IRQ_HANDLED;
+}
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+       int ret, err_irq;
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct resource *res;
+       u16 id, bm_pool_cnt;
+       u8 major, minor;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+       bm_ccsr_start = devm_ioremap(dev, res->start,
+                                    res->end - res->start + 1);
+       if (!bm_ccsr_start)
+               return -ENXIO;
+
+       bm_get_version(&id, &major, &minor);
+       if (major == 1 && minor == 0) {
+               bman_ip_rev = BMAN_REV10;
+               bm_pool_cnt = BM_POOL_MAX;
+       } else if (major == 2 && minor == 0) {
+               bman_ip_rev = BMAN_REV20;
+               bm_pool_cnt = 8;
+       } else if (major == 2 && minor == 1) {
+               bman_ip_rev = BMAN_REV21;
+               bm_pool_cnt = BM_POOL_MAX;
+       } else {
+               dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+                       id, major, minor);
+               return -ENODEV;
+       }
+
+       bm_set_memory(fbpr_a, fbpr_sz);
+
+       err_irq = platform_get_irq(pdev, 0);
+       if (err_irq <= 0) {
+               dev_info(dev, "Can't get %s IRQ\n", node->full_name);
+               return -ENODEV;
+       }
+       ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+                              dev);
+       if (ret)  {
+               dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+                       ret, node->full_name);
+               return ret;
+       }
+       /* Disable Buffer Pool State Change */
+       bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+       /*
+        * Write-to-clear any stale bits, (eg. starvation being asserted prior
+        * to resource allocation during driver init).
+        */
+       bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+       /* Enable Error Interrupts */
+       bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+       bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+       if (IS_ERR(bm_bpalloc)) {
+               ret = PTR_ERR(bm_bpalloc);
+               dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+               return ret;
+       }
+
+       /* seed BMan resource pool */
+       ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+       if (ret) {
+               dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+                       0, bm_pool_cnt - 1, ret);
+               return ret;
+       }
+
+       return 0;
+};
+
+static const struct of_device_id fsl_bman_ids[] = {
+       {
+               .compatible = "fsl,bman",
+       },
+       {}
+};
+
+static struct platform_driver fsl_bman_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .of_match_table = fsl_bman_ids,
+               .suppress_bind_attrs = true,
+       },
+       .probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644 (file)
index 0000000..6579cc1
--- /dev/null
@@ -0,0 +1,219 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct bman_portal *affine_bportals[NR_CPUS];
+static struct cpumask portal_cpus;
+/* protect bman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(bman_lock);
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+       struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+       if (!p) {
+               dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+                        __func__, pcfg->cpu);
+               return NULL;
+       }
+
+       bman_p_irqsource_add(p, BM_PIRQ_RCRI);
+       affine_bportals[pcfg->cpu] = p;
+
+       dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+       return p;
+}
+
+static void bman_offline_cpu(unsigned int cpu)
+{
+       struct bman_portal *p = affine_bportals[cpu];
+       const struct bm_portal_config *pcfg;
+
+       if (!p)
+               return;
+
+       pcfg = bman_get_bm_portal_config(p);
+       if (!pcfg)
+               return;
+
+       irq_set_affinity(pcfg->irq, cpumask_of(0));
+}
+
+static void bman_online_cpu(unsigned int cpu)
+{
+       struct bman_portal *p = affine_bportals[cpu];
+       const struct bm_portal_config *pcfg;
+
+       if (!p)
+               return;
+
+       pcfg = bman_get_bm_portal_config(p);
+       if (!pcfg)
+               return;
+
+       irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+}
+
+static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
+                                    unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               bman_online_cpu(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               bman_offline_cpu(cpu);
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block bman_hotplug_cpu_notifier = {
+       .notifier_call = bman_hotplug_cpu_callback,
+};
+
+static int bman_portal_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct bm_portal_config *pcfg;
+       struct resource *addr_phys[2];
+       void __iomem *va;
+       int irq, cpu;
+
+       pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+       if (!pcfg)
+               return -ENOMEM;
+
+       pcfg->dev = dev;
+
+       addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+                                            DPAA_PORTAL_CE);
+       if (!addr_phys[0]) {
+               dev_err(dev, "Can't get %s property 'reg::CE'\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+
+       addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+                                            DPAA_PORTAL_CI);
+       if (!addr_phys[1]) {
+               dev_err(dev, "Can't get %s property 'reg::CI'\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+
+       pcfg->cpu = -1;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               dev_err(dev, "Can't get %s IRQ'\n", node->full_name);
+               return -ENXIO;
+       }
+       pcfg->irq = irq;
+
+       va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+       if (!va)
+               goto err_ioremap1;
+
+       pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+       va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+                         _PAGE_GUARDED | _PAGE_NO_CACHE);
+       if (!va)
+               goto err_ioremap2;
+
+       pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+       spin_lock(&bman_lock);
+       cpu = cpumask_next_zero(-1, &portal_cpus);
+       if (cpu >= nr_cpu_ids) {
+               /* unassigned portal, skip init */
+               spin_unlock(&bman_lock);
+               return 0;
+       }
+
+       cpumask_set_cpu(cpu, &portal_cpus);
+       spin_unlock(&bman_lock);
+       pcfg->cpu = cpu;
+
+       if (!init_pcfg(pcfg))
+               goto err_ioremap2;
+
+       /* clear irq affinity if assigned cpu is offline */
+       if (!cpu_online(cpu))
+               bman_offline_cpu(cpu);
+
+       return 0;
+
+err_ioremap2:
+       iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+       dev_err(dev, "ioremap failed\n");
+       return -ENXIO;
+}
+
+static const struct of_device_id bman_portal_ids[] = {
+       {
+               .compatible = "fsl,bman-portal",
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .of_match_table = bman_portal_ids,
+       },
+       .probe = bman_portal_probe,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+       int ret;
+
+       ret = platform_driver_register(drv);
+       if (ret < 0)
+               return ret;
+
+       register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
+
+       return 0;
+}
+
+module_driver(bman_portal_driver,
+             bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644 (file)
index 0000000..f6896a2
--- /dev/null
@@ -0,0 +1,80 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI   0x00000002      /* RCR Ring (below threshold) */
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev;        /* 0 if uninitialised, otherwise BMAN_REVx */
+
+extern struct gen_pool *bm_bpalloc;
+
+struct bm_portal_config {
+       /*
+        * Corenet portal addresses;
+        * [0]==cache-enabled, [1]==cache-inhibited.
+        */
+       void __iomem *addr_virt[2];
+       /* Allow these to be joined in lists */
+       struct list_head list;
+       struct device *dev;
+       /* User-visible portal configuration settings */
+       /* portal is affined to this cpu */
+       int cpu;
+       /* portal interrupt line */
+       int irq;
+};
+
+struct bman_portal *bman_create_affine_portal(
+                       const struct bm_portal_config *config);
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+ */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE        BM_PIRQ_RCRI
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644 (file)
index 0000000..09b1c96
--- /dev/null
@@ -0,0 +1,53 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+       int loop = 1;
+
+       while (loop--)
+               bman_test_api();
+#endif
+       return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644 (file)
index 0000000..037ed34
--- /dev/null
@@ -0,0 +1,35 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+void bman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644 (file)
index 0000000..6f6bdd1
--- /dev/null
@@ -0,0 +1,151 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+#define NUM_BUFS       93
+#define LOOPS          3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+static struct bman_pool *pool;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+static void bufs_init(void)
+{
+       int i;
+
+       for (i = 0; i < NUM_BUFS; i++)
+               bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+       bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+       if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
+
+               /*
+                * On SoCs with BMan revison 2.0, BMan only respects the 40
+                * LS-bits of buffer addresses, masking off the upper 8-bits on
+                * release commands. The API provides for 48-bit addresses
+                * because some SoCs support all 48-bits. When generating
+                * garbage addresses for testing, we either need to zero the
+                * upper 8-bits when releasing to BMan (otherwise we'll be
+                * disappointed when the buffers we acquire back from BMan
+                * don't match), or we need to mask the upper 8-bits off when
+                * comparing. We do the latter.
+                */
+               if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
+                   (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+                       return -1;
+               if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
+                   (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+                       return 1;
+       } else {
+               if (bm_buffer_get64(a) < bm_buffer_get64(b))
+                       return -1;
+               if (bm_buffer_get64(a) > bm_buffer_get64(b))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static void bufs_confirm(void)
+{
+       int i, j;
+
+       for (i = 0; i < NUM_BUFS; i++) {
+               int matches = 0;
+
+               for (j = 0; j < NUM_BUFS; j++)
+                       if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+                               matches++;
+               WARN_ON(matches != 1);
+       }
+}
+
+/* test */
+void bman_test_api(void)
+{
+       int i, loops = LOOPS;
+
+       bufs_init();
+
+       pr_info("%s(): Starting\n", __func__);
+
+       pool = bman_new_pool();
+       if (!pool) {
+               pr_crit("bman_new_pool() failed\n");
+               goto failed;
+       }
+
+       /* Release buffers */
+do_loop:
+       i = 0;
+       while (i < NUM_BUFS) {
+               int num = 8;
+
+               if (i + num > NUM_BUFS)
+                       num = NUM_BUFS - i;
+               if (bman_release(pool, bufs_in + i, num)) {
+                       pr_crit("bman_release() failed\n");
+                       goto failed;
+               }
+               i += num;
+       }
+
+       /* Acquire buffers */
+       while (i > 0) {
+               int tmp, num = 8;
+
+               if (num > i)
+                       num = i;
+               tmp = bman_acquire(pool, bufs_out + i - num, num);
+               WARN_ON(tmp != num);
+               i -= num;
+       }
+       i = bman_acquire(pool, NULL, 1);
+       WARN_ON(i > 0);
+
+       bufs_confirm();
+
+       if (--loops)
+               goto do_loop;
+
+       /* Clean up */
+       bman_free_pool(pool);
+       pr_info("%s(): Finished\n", __func__);
+       return;
+
+failed:
+       WARN_ON(1);
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644 (file)
index 0000000..b63fd72
--- /dev/null
@@ -0,0 +1,103 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
+#error "Unsupported Cacheline Size"
+#endif
+
+static inline void dpaa_flush(void *p)
+{
+#ifdef CONFIG_PPC
+       flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#elif defined(CONFIG_ARM32)
+       __cpuc_flush_dcache_area(p, 64);
+#elif defined(CONFIG_ARM64)
+       __flush_dcache_area(p, 64);
+#endif
+}
+
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#define dpaa_zero(p) memset(p, 0, 64)
+
+static inline void dpaa_touch_ro(void *p)
+{
+#if (L1_CACHE_BYTES == 32)
+       prefetch(p+32);
+#endif
+       prefetch(p);
+}
+
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
+{
+       dpaa_invalidate(p);
+       dpaa_touch_ro(p);
+}
+
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
+
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+       /* 'first' is included, 'last' is excluded */
+       if (first <= last)
+               return last - first;
+       return ringsize + last - first;
+}
+
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF      0x80000000
+
+#endif /* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644 (file)
index 0000000..119054b
--- /dev/null
@@ -0,0 +1,2881 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define DQRR_MAXFILL   15
+#define EQCR_ITHRESH   4       /* if EQCR congests, interrupt threshold */
+#define IRQNAME                "QMan portal %d"
+#define MAX_IRQNAME    16      /* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH    0x0000
+#define QM_REG_EQCR_CI_CINH    0x0004
+#define QM_REG_EQCR_ITR                0x0008
+#define QM_REG_DQRR_PI_CINH    0x0040
+#define QM_REG_DQRR_CI_CINH    0x0044
+#define QM_REG_DQRR_ITR                0x0048
+#define QM_REG_DQRR_DCAP       0x0050
+#define QM_REG_DQRR_SDQCR      0x0054
+#define QM_REG_DQRR_VDQCR      0x0058
+#define QM_REG_DQRR_PDQCR      0x005c
+#define QM_REG_MR_PI_CINH      0x0080
+#define QM_REG_MR_CI_CINH      0x0084
+#define QM_REG_MR_ITR          0x0088
+#define QM_REG_CFG             0x0100
+#define QM_REG_ISR             0x0e00
+#define QM_REG_IER             0x0e04
+#define QM_REG_ISDR            0x0e08
+#define QM_REG_IIR             0x0e0c
+#define QM_REG_ITPR            0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR             0x0000
+#define QM_CL_DQRR             0x1000
+#define QM_CL_MR               0x2000
+#define QM_CL_EQCR_PI_CENA     0x3000
+#define QM_CL_EQCR_CI_CENA     0x3100
+#define QM_CL_DQRR_PI_CENA     0x3200
+#define QM_CL_DQRR_CI_CENA     0x3300
+#define QM_CL_MR_PI_CENA       0x3400
+#define QM_CL_MR_CI_CENA       0x3500
+#define QM_CL_CR               0x3800
+#define QM_CL_RR0              0x3900
+#define QM_CL_RR1              0x3940
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx)       ((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *     dmode == h/w dequeue mode.
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ *     dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ *   As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode {           /* matches QCSP_CFG::EPM */
+       qm_eqcr_pci = 0,        /* PI index, cache-inhibited */
+       qm_eqcr_pce = 1,        /* PI index, cache-enabled */
+       qm_eqcr_pvb = 2         /* valid-bit */
+};
+enum qm_dqrr_dmode {           /* matches QCSP_CFG::DP */
+       qm_dqrr_dpush = 0,      /* SDQCR  + VDQCR */
+       qm_dqrr_dpull = 1       /* PDQCR */
+};
+enum qm_dqrr_pmode {           /* s/w-only */
+       qm_dqrr_pci,            /* reads DQRR_PI_CINH */
+       qm_dqrr_pce,            /* reads DQRR_PI_CENA */
+       qm_dqrr_pvb             /* reads valid-bit */
+};
+enum qm_dqrr_cmode {           /* matches QCSP_CFG::DCM */
+       qm_dqrr_cci = 0,        /* CI index, cache-inhibited */
+       qm_dqrr_cce = 1,        /* CI index, cache-enabled */
+       qm_dqrr_cdc = 2         /* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode {             /* s/w-only */
+       qm_mr_pci,              /* reads MR_PI_CINH */
+       qm_mr_pce,              /* reads MR_PI_CENA */
+       qm_mr_pvb               /* reads valid-bit */
+};
+enum qm_mr_cmode {             /* matches QCSP_CFG::MM */
+       qm_mr_cci = 0,          /* CI index, cache-inhibited */
+       qm_mr_cce = 1           /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE           8
+#define QM_DQRR_SIZE           16
+#define QM_MR_SIZE             8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+       u8 _ncw_verb; /* writes to this are non-coherent */
+       u8 dca;
+       u16 seqnum;
+       u32 orp;        /* 24-bit */
+       u32 fqid;       /* 24-bit */
+       u32 tag;
+       struct qm_fd fd;
+       u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT              0x80
+#define QM_EQCR_VERB_CMD_MASK          0x61    /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE       0x01
+#define QM_EQCR_SEQNUM_NESN            0x8000  /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS            0x4000  /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK         0x3fff  /* sequence number goes here */
+
+struct qm_eqcr {
+       struct qm_eqcr_entry *ring, *cursor;
+       u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       u32 busy;
+       enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+       const struct qm_dqrr_entry *ring, *cursor;
+       u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       enum qm_dqrr_dmode dmode;
+       enum qm_dqrr_pmode pmode;
+       enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+       union qm_mr_entry *ring, *cursor;
+       u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       enum qm_mr_pmode pmode;
+       enum qm_mr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+/* "Query FQ" */
+struct qm_mcc_queryfq {
+       u8 _ncw_verb;
+       u8 __reserved1[3];
+       u32 fqid;       /* 24-bit */
+       u8 __reserved2[56];
+} __packed;
+/* "Alter FQ State Commands " */
+struct qm_mcc_alterfq {
+       u8 _ncw_verb;
+       u8 __reserved1[3];
+       u32 fqid;       /* 24-bit */
+       u8 __reserved2;
+       u8 count;       /* number of consecutive FQID */
+       u8 __reserved3[10];
+       u32 context_b;  /* frame queue context b */
+       u8 __reserved4[40];
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcc_querycgr {
+       u8 _ncw_verb;
+       u8 __reserved1[30];
+       u8 cgid;
+       u8 __reserved2[32];
+};
+
+struct qm_mcc_querywq {
+       u8 _ncw_verb;
+       u8 __reserved;
+       /* select channel if verb != QUERYWQ_DEDICATED */
+       u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+       u8 __reserved2[60];
+} __packed;
+
+#define QM_MCC_VERB_VBIT               0x80
+#define QM_MCC_VERB_MASK               0x7f    /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED      0x40
+#define QM_MCC_VERB_INITFQ_SCHED       0x41
+#define QM_MCC_VERB_QUERYFQ            0x44
+#define QM_MCC_VERB_QUERYFQ_NP         0x45    /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ            0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED  0x47
+#define QM_MCC_VERB_ALTER_SCHED                0x48    /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE           0x49    /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE       0x4a    /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS          0x4b    /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON                0x4d    /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF       0x4e    /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR            0x50
+#define QM_MCC_VERB_MODIFYCGR          0x51
+#define QM_MCC_VERB_CGRTESTWRITE       0x52
+#define QM_MCC_VERB_QUERYCGR           0x58
+#define QM_MCC_VERB_QUERYCONGESTION    0x59
+union qm_mc_command {
+       struct {
+               u8 _ncw_verb; /* writes to this are non-coherent */
+               u8 __reserved[63];
+       };
+       struct qm_mcc_initfq initfq;
+       struct qm_mcc_queryfq queryfq;
+       struct qm_mcc_alterfq alterfq;
+       struct qm_mcc_initcgr initcgr;
+       struct qm_mcc_querycgr querycgr;
+       struct qm_mcc_querywq querywq;
+       struct qm_mcc_queryfq_np queryfq_np;
+};
+
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+       u8 verb;
+       u8 result;
+       u8 __reserved1[8];
+       struct qm_fqd fqd;      /* the FQD fields are here */
+       u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+       u8 verb;
+       u8 result;
+       u8 fqs;         /* Frame Queue Status */
+       u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID               0x80
+#define QM_MCR_VERB_MASK               QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED      QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED       QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ            QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP         QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ            QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED  QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED                QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE           QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE       QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS          QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL             0x00
+#define QM_MCR_RESULT_OK               0xf0
+#define QM_MCR_RESULT_ERR_FQID         0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE      0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY     0xf3    /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL   0xf4
+#define QM_MCR_RESULT_PENDING          0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND   0xff
+#define QM_MCR_FQS_ORLPRESENT          0x02    /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY            0x01    /* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT                 10000   /* us */
+union qm_mc_result {
+       struct {
+               u8 verb;
+               u8 result;
+               u8 __reserved1[62];
+       };
+       struct qm_mcr_queryfq queryfq;
+       struct qm_mcr_alterfq alterfq;
+       struct qm_mcr_querycgr querycgr;
+       struct qm_mcr_querycongestion querycongestion;
+       struct qm_mcr_querywq querywq;
+       struct qm_mcr_queryfq_np queryfq_np;
+};
+
+struct qm_mc {
+       union qm_mc_command *cr;
+       union qm_mc_result *rr;
+       u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       enum {
+               /* Can be _mc_start()ed */
+               qman_mc_idle,
+               /* Can be _mc_commit()ed or _mc_abort()ed */
+               qman_mc_user,
+               /* Can only be _mc_retry()ed */
+               qman_mc_hw
+       } state;
+#endif
+};
+
+struct qm_addr {
+       void __iomem *ce;       /* cache-enabled */
+       void __iomem *ci;       /* cache-inhibited */
+};
+
+struct qm_portal {
+       /*
+        * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+        * and including 'mc' fits within a cacheline (yay!). The 'config' part
+        * is setup-only, so isn't a cause for a concern. In other words, don't
+        * rearrange this structure on a whim, there be dragons ...
+        */
+       struct qm_addr addr;
+       struct qm_eqcr eqcr;
+       struct qm_dqrr dqrr;
+       struct qm_mr mr;
+       struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+       return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+       __raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+       dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+       dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
+{
+       return __raw_readl(p->addr.ce + offset);
+}
+
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT     ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY     (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
+{
+       uintptr_t addr = (uintptr_t)p;
+
+       addr &= ~EQCR_CARRY;
+
+       return (struct qm_eqcr_entry *)addr;
+}
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
+{
+       return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
+{
+       /* increment to the next EQCR pointer and handle overflow and 'vbit' */
+       struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+       eqcr->cursor = eqcr_carryclear(partial);
+       if (partial != eqcr->cursor)
+               eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+                               enum qm_eqcr_pmode pmode,
+                               unsigned int eq_stash_thresh,
+                               int eq_stash_prio)
+{
+       struct qm_eqcr *eqcr = &portal->eqcr;
+       u32 cfg;
+       u8 pi;
+
+       eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+       eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+       qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+       pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+       eqcr->cursor = eqcr->ring + pi;
+       eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+                    QM_EQCR_VERB_VBIT : 0;
+       eqcr->available = QM_EQCR_SIZE - 1 -
+                         dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+       eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       eqcr->busy = 0;
+       eqcr->pmode = pmode;
+#endif
+       cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+             (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+             (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+             ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+       qm_out(portal, QM_REG_CFG, cfg);
+       return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+       return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+       struct qm_eqcr *eqcr = &portal->eqcr;
+       u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+       u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+       DPAA_ASSERT(!eqcr->busy);
+       if (pi != eqcr_ptr2idx(eqcr->cursor))
+               pr_crit("losing uncommited EQCR entries\n");
+       if (ci != eqcr->ci)
+               pr_crit("missing existing EQCR completions\n");
+       if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+               pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+                                                                *portal)
+{
+       struct qm_eqcr *eqcr = &portal->eqcr;
+
+       DPAA_ASSERT(!eqcr->busy);
+       if (!eqcr->available)
+               return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       eqcr->busy = 1;
+#endif
+       dpaa_zero(eqcr->cursor);
+       return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+                                                               *portal)
+{
+       struct qm_eqcr *eqcr = &portal->eqcr;
+       u8 diff, old_ci;
+
+       DPAA_ASSERT(!eqcr->busy);
+       if (!eqcr->available) {
+               old_ci = eqcr->ci;
+               eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+                          (QM_EQCR_SIZE - 1);
+               diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+               eqcr->available += diff;
+               if (!diff)
+                       return NULL;
+       }
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       eqcr->busy = 1;
+#endif
+       dpaa_zero(eqcr->cursor);
+       return eqcr->cursor;
+}
+
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
+{
+       DPAA_ASSERT(eqcr->busy);
+       DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
+       DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+       DPAA_ASSERT(eqcr->available >= 1);
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+       struct qm_eqcr *eqcr = &portal->eqcr;
+       struct qm_eqcr_entry *eqcursor;
+
+       eqcr_commit_checks(eqcr);
+       DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+       dma_wmb();
+       eqcursor = eqcr->cursor;
+       eqcursor->_ncw_verb = myverb | eqcr->vbit;
+       dpaa_flush(eqcursor);
+       eqcr_inc(eqcr);
+       eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+       qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+       struct qm_eqcr *eqcr = &portal->eqcr;
+       u8 diff, old_ci = eqcr->ci;
+
+       eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+       qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+       diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+       eqcr->available += diff;
+       return diff;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+       struct qm_eqcr *eqcr = &portal->eqcr;
+
+       eqcr->ithresh = ithresh;
+       qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+       struct qm_eqcr *eqcr = &portal->eqcr;
+
+       return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+       struct qm_eqcr *eqcr = &portal->eqcr;
+
+       return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+#define DQRR_SHIFT     ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY     (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+                                       const struct qm_dqrr_entry *p)
+{
+       uintptr_t addr = (uintptr_t)p;
+
+       addr &= ~DQRR_CARRY;
+
+       return (const struct qm_dqrr_entry *)addr;
+}
+
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
+{
+       return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
+}
+
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
+{
+       return dqrr_carryclear(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+       qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+                                  ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+                              const struct qm_portal_config *config,
+                              enum qm_dqrr_dmode dmode,
+                              enum qm_dqrr_pmode pmode,
+                              enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+       struct qm_dqrr *dqrr = &portal->dqrr;
+       u32 cfg;
+
+       /* Make sure the DQRR will be idle when we enable */
+       qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+       qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+       qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+       dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+       dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+       dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+       dqrr->cursor = dqrr->ring + dqrr->ci;
+       dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+       dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+                       QM_DQRR_VERB_VBIT : 0;
+       dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       dqrr->dmode = dmode;
+       dqrr->pmode = pmode;
+       dqrr->cmode = cmode;
+#endif
+       /* Invalidate every ring entry before beginning */
+       for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+               dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+       cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+               ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+               ((dmode & 1) << 18) |                   /* DP */
+               ((cmode & 3) << 16) |                   /* DCM */
+               0xa0 |                                  /* RE+SE */
+               (0 ? 0x40 : 0) |                        /* Ignore RP */
+               (0 ? 0x10 : 0);                         /* Ignore SP */
+       qm_out(portal, QM_REG_CFG, cfg);
+       qm_dqrr_set_maxfill(portal, max_fill);
+       return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       struct qm_dqrr *dqrr = &portal->dqrr;
+
+       if (dqrr->cmode != qm_dqrr_cdc &&
+           dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+               pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+                                               struct qm_portal *portal)
+{
+       struct qm_dqrr *dqrr = &portal->dqrr;
+
+       if (!dqrr->fill)
+               return NULL;
+       return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+       struct qm_dqrr *dqrr = &portal->dqrr;
+
+       DPAA_ASSERT(dqrr->fill);
+       dqrr->cursor = dqrr_inc(dqrr->cursor);
+       return --dqrr->fill;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+       struct qm_dqrr *dqrr = &portal->dqrr;
+       struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+       DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+       /*
+        * If PAMU is not available we need to invalidate the cache.
+        * When PAMU is available the cache is updated by stash
+        */
+       dpaa_invalidate_touch_ro(res);
+#endif
+       /*
+        *  when accessing 'verb', use __raw_readb() to ensure that compiler
+        * inlining doesn't try to optimise out "excess reads".
+        */
+       if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+               dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+               if (!dqrr->pi)
+                       dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+               dqrr->fill++;
+       }
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+                                       const struct qm_dqrr_entry *dq,
+                                       int park)
+{
+       __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+       int idx = dqrr_ptr2idx(dq);
+
+       DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+       DPAA_ASSERT((dqrr->ring + idx) == dq);
+       DPAA_ASSERT(idx < QM_DQRR_SIZE);
+       qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+              ((park ? 1 : 0) << 6) |              /* DQRR_DCAP::PK */
+              idx);                                /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+       __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+       DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+       qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+              (bitmask << 16));                    /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+       qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+       qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+       qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT       ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY       (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+       uintptr_t addr = (uintptr_t)p;
+
+       addr &= ~MR_CARRY;
+
+       return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+       return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+       return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+                            enum qm_mr_cmode cmode)
+{
+       struct qm_mr *mr = &portal->mr;
+       u32 cfg;
+
+       mr->ring = portal->addr.ce + QM_CL_MR;
+       mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+       mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+       mr->cursor = mr->ring + mr->ci;
+       mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+       mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+               ? QM_MR_VERB_VBIT : 0;
+       mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mr->pmode = pmode;
+       mr->cmode = cmode;
+#endif
+       cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+             ((cmode & 1) << 8);       /* QCSP_CFG:MM */
+       qm_out(portal, QM_REG_CFG, cfg);
+       return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+       struct qm_mr *mr = &portal->mr;
+
+       if (mr->ci != mr_ptr2idx(mr->cursor))
+               pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+       struct qm_mr *mr = &portal->mr;
+
+       if (!mr->fill)
+               return NULL;
+       return mr->cursor;
+}
+
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+       struct qm_mr *mr = &portal->mr;
+
+       DPAA_ASSERT(mr->fill);
+       mr->cursor = mr_inc(mr->cursor);
+       return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+       struct qm_mr *mr = &portal->mr;
+       union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+       DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+       /*
+        *  when accessing 'verb', use __raw_readb() to ensure that compiler
+        * inlining doesn't try to optimise out "excess reads".
+        */
+       if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+               mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+               if (!mr->pi)
+                       mr->vbit ^= QM_MR_VERB_VBIT;
+               mr->fill++;
+               res = mr_inc(res);
+       }
+       dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+       struct qm_mr *mr = &portal->mr;
+
+       DPAA_ASSERT(mr->cmode == qm_mr_cci);
+       mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+       qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+       struct qm_mr *mr = &portal->mr;
+
+       DPAA_ASSERT(mr->cmode == qm_mr_cci);
+       mr->ci = mr_ptr2idx(mr->cursor);
+       qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+       qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+       struct qm_mc *mc = &portal->mc;
+
+       mc->cr = portal->addr.ce + QM_CL_CR;
+       mc->rr = portal->addr.ce + QM_CL_RR0;
+       mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
+                   ? 0 : 1;
+       mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = qman_mc_idle;
+#endif
+       return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       struct qm_mc *mc = &portal->mc;
+
+       DPAA_ASSERT(mc->state == qman_mc_idle);
+       if (mc->state != qman_mc_idle)
+               pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+       struct qm_mc *mc = &portal->mc;
+
+       DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = qman_mc_user;
+#endif
+       dpaa_zero(mc->cr);
+       return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+       struct qm_mc *mc = &portal->mc;
+       union qm_mc_result *rr = mc->rr + mc->rridx;
+
+       DPAA_ASSERT(mc->state == qman_mc_user);
+       dma_wmb();
+       mc->cr->_ncw_verb = myverb | mc->vbit;
+       dpaa_flush(mc->cr);
+       dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+       struct qm_mc *mc = &portal->mc;
+       union qm_mc_result *rr = mc->rr + mc->rridx;
+
+       DPAA_ASSERT(mc->state == qman_mc_hw);
+       /*
+        *  The inactive response register's verb byte always returns zero until
+        * its command is submitted and completed. This includes the valid-bit,
+        * in case you were wondering...
+        */
+       if (!__raw_readb(&rr->verb)) {
+               dpaa_invalidate_touch_ro(rr);
+               return NULL;
+       }
+       mc->rridx ^= 1;
+       mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = qman_mc_idle;
+#endif
+       return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+                                      union qm_mc_result **mcr)
+{
+       int timeout = QM_MCR_TIMEOUT;
+
+       do {
+               *mcr = qm_mc_result(portal);
+               if (*mcr)
+                       break;
+               udelay(1);
+       } while (--timeout);
+
+       return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+       set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+       clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+       return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+       return !(fq->flags & mask);
+}
+
+struct qman_portal {
+       struct qm_portal p;
+       /* PORTAL_BITS_*** - dynamic, strictly internal */
+       unsigned long bits;
+       /* interrupt sources processed by portal_isr(), configurable */
+       unsigned long irq_sources;
+       u32 use_eqcr_ci_stashing;
+       /* only 1 volatile dequeue at a time */
+       struct qman_fq *vdqcr_owned;
+       u32 sdqcr;
+       /* probing time config params for cpu-affine portals */
+       const struct qm_portal_config *config;
+       /* needed for providing a non-NULL device to dma_map_***() */
+       struct platform_device *pdev;
+       /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+       struct qman_cgrs *cgrs;
+       /* linked-list of CSCN handlers. */
+       struct list_head cgr_cbs;
+       /* list lock */
+       spinlock_t cgr_lock;
+       struct work_struct congestion_work;
+       struct work_struct mr_work;
+       char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+struct qman_portal *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+       return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+       put_cpu_var(qman_affine_portal);
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_wq_alloc(void)
+{
+       qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+       if (!qm_portal_wq)
+               return -ENOMEM;
+       return 0;
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+       num_fqids = _num_fqids;
+
+       fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
+       if (!fq_table)
+               return -ENOMEM;
+
+       pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+                fq_table, num_fqids * 2);
+       return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+       struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       if (WARN_ON(idx >= num_fqids * 2))
+               return NULL;
+#endif
+       fq = fq_table[idx];
+       DPAA_ASSERT(!fq || idx == fq->idx);
+
+       return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+       return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+       return idx_to_fq(tag);
+#else
+       return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+       return fq->idx;
+#else
+       return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+                                       unsigned int poll_limit);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+       struct qman_portal *p = ptr;
+
+       u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
+       u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+
+       if (unlikely(!is))
+               return IRQ_NONE;
+
+       /* DQRR-handling if it's interrupt-driven */
+       if (is & QM_PIRQ_DQRI)
+               __poll_portal_fast(p, QMAN_POLL_LIMIT);
+       /* Handling of anything else that's interrupt-driven */
+       clear |= __poll_portal_slow(p, is);
+       qm_out(&p->p, QM_REG_ISR, clear);
+       return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+       const union qm_mr_entry *msg;
+loop:
+       msg = qm_mr_current(p);
+       if (!msg) {
+               /*
+                * if MR was full and h/w had other FQRNI entries to produce, we
+                * need to allow it time to produce those entries once the
+                * existing entries are consumed. A worst-case situation
+                * (fully-loaded system) means h/w sequencers may have to do 3-4
+                * other things before servicing the portal's MR pump, each of
+                * which (if slow) may take ~50 qman cycles (which is ~200
+                * processor cycles). So rounding up and then multiplying this
+                * worst-case estimate by a factor of 10, just to be
+                * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+                * one entry at a time, so h/w has an opportunity to produce new
+                * entries well before the ring has been fully consumed, so
+                * we're being *really* paranoid here.
+                */
+               u64 now, then = jiffies;
+
+               do {
+                       now = jiffies;
+               } while ((then + 10000) > now);
+               msg = qm_mr_current(p);
+               if (!msg)
+                       return 0;
+       }
+       if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+               /* We aren't draining anything but FQRNIs */
+               pr_err("Found verb 0x%x in MR\n", msg->verb);
+               return -1;
+       }
+       qm_mr_next(p);
+       qm_mr_cci_consume(p, 1);
+       goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+                             const struct qm_portal_config *c,
+                             const struct qman_cgrs *cgrs)
+{
+       struct qm_portal *p;
+       char buf[16];
+       int ret;
+       u32 isdr;
+
+       p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+       /* PAMU is required for stashing */
+       portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+       portal->use_eqcr_ci_stashing = 0;
+#endif
+       /*
+        * prep the low-level portal struct with the mapped addresses from the
+        * config, everything that follows depends on it and "config" is more
+        * for (de)reference
+        */
+       p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+       p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+       /*
+        * If CI-stashing is used, the current defaults use a threshold of 3,
+        * and stash with high-than-DQRR priority.
+        */
+       if (qm_eqcr_init(p, qm_eqcr_pvb,
+                       portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+               dev_err(c->dev, "EQCR initialisation failed\n");
+               goto fail_eqcr;
+       }
+       if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+                       qm_dqrr_cdc, DQRR_MAXFILL)) {
+               dev_err(c->dev, "DQRR initialisation failed\n");
+               goto fail_dqrr;
+       }
+       if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+               dev_err(c->dev, "MR initialisation failed\n");
+               goto fail_mr;
+       }
+       if (qm_mc_init(p)) {
+               dev_err(c->dev, "MC initialisation failed\n");
+               goto fail_mc;
+       }
+       /* static interrupt-gating controls */
+       qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+       qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+       qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+       portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+       if (!portal->cgrs)
+               goto fail_cgrs;
+       /* initial snapshot is no-depletion */
+       qman_cgrs_init(&portal->cgrs[1]);
+       if (cgrs)
+               portal->cgrs[0] = *cgrs;
+       else
+               /* if the given mask is NULL, assume all CGRs can be seen */
+               qman_cgrs_fill(&portal->cgrs[0]);
+       INIT_LIST_HEAD(&portal->cgr_cbs);
+       spin_lock_init(&portal->cgr_lock);
+       INIT_WORK(&portal->congestion_work, qm_congestion_task);
+       INIT_WORK(&portal->mr_work, qm_mr_process_task);
+       portal->bits = 0;
+       portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+                       QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+                       QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+       sprintf(buf, "qportal-%d", c->channel);
+       portal->pdev = platform_device_alloc(buf, -1);
+       if (!portal->pdev)
+               goto fail_devalloc;
+       if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
+               goto fail_devadd;
+       ret = platform_device_add(portal->pdev);
+       if (ret)
+               goto fail_devadd;
+       isdr = 0xffffffff;
+       qm_out(p, QM_REG_ISDR, isdr);
+       portal->irq_sources = 0;
+       qm_out(p, QM_REG_IER, 0);
+       qm_out(p, QM_REG_ISR, 0xffffffff);
+       snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+       if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+               dev_err(c->dev, "request_irq() failed\n");
+               goto fail_irq;
+       }
+       if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+           irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+               dev_err(c->dev, "irq_set_affinity() failed\n");
+               goto fail_affinity;
+       }
+
+       /* Need EQCR to be empty before continuing */
+       isdr &= ~QM_PIRQ_EQCI;
+       qm_out(p, QM_REG_ISDR, isdr);
+       ret = qm_eqcr_get_fill(p);
+       if (ret) {
+               dev_err(c->dev, "EQCR unclean\n");
+               goto fail_eqcr_empty;
+       }
+       isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+       qm_out(p, QM_REG_ISDR, isdr);
+       if (qm_dqrr_current(p)) {
+               dev_err(c->dev, "DQRR unclean\n");
+               qm_dqrr_cdc_consume_n(p, 0xffff);
+       }
+       if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+               /* special handling, drain just in case it's a few FQRNIs */
+               const union qm_mr_entry *e = qm_mr_current(p);
+
+               dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
+                       e->verb, e->ern.rc, e->ern.fd.addr_lo);
+               goto fail_dqrr_mr_empty;
+       }
+       /* Success */
+       portal->config = c;
+       qm_out(p, QM_REG_ISDR, 0);
+       qm_out(p, QM_REG_IIR, 0);
+       /* Write a sane SDQCR */
+       qm_dqrr_sdqcr_set(p, portal->sdqcr);
+       return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+       free_irq(c->irq, portal);
+fail_irq:
+       platform_device_del(portal->pdev);
+fail_devadd:
+       platform_device_put(portal->pdev);
+fail_devalloc:
+       kfree(portal->cgrs);
+fail_cgrs:
+       qm_mc_finish(p);
+fail_mc:
+       qm_mr_finish(p);
+fail_mr:
+       qm_dqrr_finish(p);
+fail_dqrr:
+       qm_eqcr_finish(p);
+fail_eqcr:
+       return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+                                             const struct qman_cgrs *cgrs)
+{
+       struct qman_portal *portal;
+       int err;
+
+       portal = &per_cpu(qman_affine_portal, c->cpu);
+       err = qman_create_portal(portal, c, cgrs);
+       if (err)
+               return NULL;
+
+       spin_lock(&affine_mask_lock);
+       cpumask_set_cpu(c->cpu, &affine_mask);
+       affine_channels[c->cpu] = c->channel;
+       affine_portals[c->cpu] = portal;
+       spin_unlock(&affine_mask_lock);
+
+       return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+       const struct qm_portal_config *pcfg;
+
+       /* Stop dequeues on the portal */
+       qm_dqrr_sdqcr_set(&qm->p, 0);
+
+       /*
+        * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+        * something related to QM_PIRQ_EQCI, this may need fixing.
+        * Also, due to the prefetching model used for CI updates in the enqueue
+        * path, this update will only invalidate the CI cacheline *after*
+        * working on it, so we need to call this twice to ensure a full update
+        * irrespective of where the enqueue processing was at when the teardown
+        * began.
+        */
+       qm_eqcr_cce_update(&qm->p);
+       qm_eqcr_cce_update(&qm->p);
+       pcfg = qm->config;
+
+       free_irq(pcfg->irq, qm);
+
+       kfree(qm->cgrs);
+       qm_mc_finish(&qm->p);
+       qm_mr_finish(&qm->p);
+       qm_dqrr_finish(&qm->p);
+       qm_eqcr_finish(&qm->p);
+
+       platform_device_del(qm->pdev);
+       platform_device_put(qm->pdev);
+
+       qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+       struct qman_portal *qm = get_affine_portal();
+       const struct qm_portal_config *pcfg;
+       int cpu;
+
+       pcfg = qm->config;
+       cpu = pcfg->cpu;
+
+       qman_destroy_portal(qm);
+
+       spin_lock(&affine_mask_lock);
+       cpumask_clear_cpu(cpu, &affine_mask);
+       spin_unlock(&affine_mask_lock);
+       put_affine_portal();
+       return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+                                  const union qm_mr_entry *msg, u8 verb)
+{
+       switch (verb) {
+       case QM_MR_VERB_FQRL:
+               DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+               fq_clear(fq, QMAN_FQ_STATE_ORL);
+               break;
+       case QM_MR_VERB_FQRN:
+               DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+                           fq->state == qman_fq_state_sched);
+               DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+               fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+               if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+                       fq_set(fq, QMAN_FQ_STATE_NE);
+               if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+                       fq_set(fq, QMAN_FQ_STATE_ORL);
+               fq->state = qman_fq_state_retired;
+               break;
+       case QM_MR_VERB_FQPN:
+               DPAA_ASSERT(fq->state == qman_fq_state_sched);
+               DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+               fq->state = qman_fq_state_parked;
+       }
+}
+
+static void qm_congestion_task(struct work_struct *work)
+{
+       struct qman_portal *p = container_of(work, struct qman_portal,
+                                            congestion_work);
+       struct qman_cgrs rr, c;
+       union qm_mc_result *mcr;
+       struct qman_cgr *cgr;
+
+       spin_lock(&p->cgr_lock);
+       qm_mc_start(&p->p);
+       qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               spin_unlock(&p->cgr_lock);
+               dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+               return;
+       }
+       /* mask out the ones I'm not interested in */
+       qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+                     &p->cgrs[0]);
+       /* check previous snapshot for delta, enter/exit congestion */
+       qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+       /* update snapshot */
+       qman_cgrs_cp(&p->cgrs[1], &rr);
+       /* Invoke callback */
+       list_for_each_entry(cgr, &p->cgr_cbs, node)
+               if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+                       cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+       spin_unlock(&p->cgr_lock);
+}
+
+static void qm_mr_process_task(struct work_struct *work)
+{
+       struct qman_portal *p = container_of(work, struct qman_portal,
+                                            mr_work);
+       const union qm_mr_entry *msg;
+       struct qman_fq *fq;
+       u8 verb, num = 0;
+
+       preempt_disable();
+
+       while (1) {
+               qm_mr_pvb_update(&p->p);
+               msg = qm_mr_current(&p->p);
+               if (!msg)
+                       break;
+
+               verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+               /* The message is a software ERN iff the 0x20 bit is clear */
+               if (verb & 0x20) {
+                       switch (verb) {
+                       case QM_MR_VERB_FQRNI:
+                               /* nada, we drop FQRNIs on the floor */
+                               break;
+                       case QM_MR_VERB_FQRN:
+                       case QM_MR_VERB_FQRL:
+                               /* Lookup in the retirement table */
+                               fq = fqid_to_fq(msg->fq.fqid);
+                               if (WARN_ON(!fq))
+                                       break;
+                               fq_state_change(p, fq, msg, verb);
+                               if (fq->cb.fqs)
+                                       fq->cb.fqs(p, fq, msg);
+                               break;
+                       case QM_MR_VERB_FQPN:
+                               /* Parked */
+                               fq = tag_to_fq(msg->fq.contextB);
+                               fq_state_change(p, fq, msg, verb);
+                               if (fq->cb.fqs)
+                                       fq->cb.fqs(p, fq, msg);
+                               break;
+                       case QM_MR_VERB_DC_ERN:
+                               /* DCP ERN */
+                               pr_crit_once("Leaking DCP ERNs!\n");
+                               break;
+                       default:
+                               pr_crit("Invalid MR verb 0x%02x\n", verb);
+                       }
+               } else {
+                       /* Its a software ERN */
+                       fq = tag_to_fq(msg->ern.tag);
+                       fq->cb.ern(p, fq, msg);
+               }
+               num++;
+               qm_mr_next(&p->p);
+       }
+
+       qm_mr_cci_consume(&p->p, num);
+       preempt_enable();
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+       if (is & QM_PIRQ_CSCI) {
+               queue_work_on(smp_processor_id(), qm_portal_wq,
+                             &p->congestion_work);
+       }
+
+       if (is & QM_PIRQ_EQRI) {
+               qm_eqcr_cce_update(&p->p);
+               qm_eqcr_set_ithresh(&p->p, 0);
+               wake_up(&affine_queue);
+       }
+
+       if (is & QM_PIRQ_MRI) {
+               queue_work_on(smp_processor_id(), qm_portal_wq,
+                             &p->mr_work);
+       }
+
+       return is;
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+       p->vdqcr_owned = NULL;
+       fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+       wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ *   (i) setting/clearing vdqcr_owned, and
+ *  (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ *   (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ *      vdqcr_owned field (which it does before setting VDQCR), and
+ *      qman_volatile_dequeue() blocks interrupts and preemption while this is
+ *      done so that we can't interfere.
+ *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ *      with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+                                       unsigned int poll_limit)
+{
+       const struct qm_dqrr_entry *dq;
+       struct qman_fq *fq;
+       enum qman_cb_dqrr_result res;
+       unsigned int limit = 0;
+
+       do {
+               qm_dqrr_pvb_update(&p->p);
+               dq = qm_dqrr_current(&p->p);
+               if (!dq)
+                       break;
+
+               if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+                       /*
+                        * VDQCR: don't trust contextB as the FQ may have
+                        * been configured for h/w consumption and we're
+                        * draining it post-retirement.
+                        */
+                       fq = p->vdqcr_owned;
+                       /*
+                        * We only set QMAN_FQ_STATE_NE when retiring, so we
+                        * only need to check for clearing it when doing
+                        * volatile dequeues.  It's one less thing to check
+                        * in the critical path (SDQCR).
+                        */
+                       if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+                               fq_clear(fq, QMAN_FQ_STATE_NE);
+                       /*
+                        * This is duplicated from the SDQCR code, but we
+                        * have stuff to do before *and* after this callback,
+                        * and we don't want multiple if()s in the critical
+                        * path (SDQCR).
+                        */
+                       res = fq->cb.dqrr(p, fq, dq);
+                       if (res == qman_cb_dqrr_stop)
+                               break;
+                       /* Check for VDQCR completion */
+                       if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+                               clear_vdqcr(p, fq);
+               } else {
+                       /* SDQCR: contextB points to the FQ */
+                       fq = tag_to_fq(dq->contextB);
+                       /* Now let the callback do its stuff */
+                       res = fq->cb.dqrr(p, fq, dq);
+                       /*
+                        * The callback can request that we exit without
+                        * consuming this entry nor advancing;
+                        */
+                       if (res == qman_cb_dqrr_stop)
+                               break;
+               }
+               /* Interpret 'dq' from a driver perspective. */
+               /*
+                * Parking isn't possible unless HELDACTIVE was set. NB,
+                * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+                * check for HELDACTIVE to cover both.
+                */
+               DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+                           (res != qman_cb_dqrr_park));
+               /* just means "skip it, I'll consume it myself later on" */
+               if (res != qman_cb_dqrr_defer)
+                       qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+                                                res == qman_cb_dqrr_park);
+               /* Move forward */
+               qm_dqrr_next(&p->p);
+               /*
+                * Entry processed and consumed, increment our counter.  The
+                * callback can request that we exit after consuming the
+                * entry, and we also exit if we reach our processing limit,
+                * so loop back only if neither of these conditions is met.
+                */
+       } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+       return limit;
+}
+
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+       unsigned long irqflags;
+
+       local_irq_save(irqflags);
+       set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+       qm_out(&p->p, QM_REG_IER, p->irq_sources);
+       local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+       unsigned long irqflags;
+       u32 ier;
+
+       /*
+        * Our interrupt handler only processes+clears status register bits that
+        * are in p->irq_sources. As we're trimming that mask, if one of them
+        * were to assert in the status register just before we remove it from
+        * the enable register, there would be an interrupt-storm when we
+        * release the IRQ lock. So we wait for the enable register update to
+        * take effect in h/w (by reading it back) and then clear all other bits
+        * in the status register. Ie. we clear them from ISR once it's certain
+        * IER won't allow them to reassert.
+        */
+       local_irq_save(irqflags);
+       bits &= QM_PIRQ_VISIBLE;
+       clear_bits(bits, &p->irq_sources);
+       qm_out(&p->p, QM_REG_IER, p->irq_sources);
+       ier = qm_in(&p->p, QM_REG_IER);
+       /*
+        * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+        * data-dependency, ie. to protect against re-ordering.
+        */
+       qm_out(&p->p, QM_REG_ISR, ~ier);
+       local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+       return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+       if (cpu < 0) {
+               struct qman_portal *portal = get_affine_portal();
+
+               cpu = portal->config->cpu;
+               put_affine_portal();
+       }
+       WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+       return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+struct qman_portal *qman_get_affine_portal(int cpu)
+{
+       return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+       return __poll_portal_fast(p, limit);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+       unsigned long irqflags;
+
+       local_irq_save(irqflags);
+       pools &= p->config->pools;
+       p->sdqcr |= pools;
+       qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+       local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+       switch (result) {
+       case QM_MCR_RESULT_NULL:
+               return "QM_MCR_RESULT_NULL";
+       case QM_MCR_RESULT_OK:
+               return "QM_MCR_RESULT_OK";
+       case QM_MCR_RESULT_ERR_FQID:
+               return "QM_MCR_RESULT_ERR_FQID";
+       case QM_MCR_RESULT_ERR_FQSTATE:
+               return "QM_MCR_RESULT_ERR_FQSTATE";
+       case QM_MCR_RESULT_ERR_NOTEMPTY:
+               return "QM_MCR_RESULT_ERR_NOTEMPTY";
+       case QM_MCR_RESULT_PENDING:
+               return "QM_MCR_RESULT_PENDING";
+       case QM_MCR_RESULT_ERR_BADCOMMAND:
+               return "QM_MCR_RESULT_ERR_BADCOMMAND";
+       }
+       return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+       if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+               int ret = qman_alloc_fqid(&fqid);
+
+               if (ret)
+                       return ret;
+       }
+       fq->fqid = fqid;
+       fq->flags = flags;
+       fq->state = qman_fq_state_oos;
+       fq->cgr_groupid = 0;
+
+       /* A context_b of 0 is allegedly special, so don't use that fqid */
+       if (fqid == 0 || fqid >= num_fqids) {
+               WARN(1, "bad fqid %d\n", fqid);
+               return -EINVAL;
+       }
+
+       fq->idx = fqid * 2;
+       if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+               fq->idx++;
+
+       WARN_ON(fq_table[fq->idx]);
+       fq_table[fq->idx] = fq;
+
+       return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+       /*
+        * We don't need to lock the FQ as it is a pre-condition that the FQ be
+        * quiesced. Instead, run some checks.
+        */
+       switch (fq->state) {
+       case qman_fq_state_parked:
+       case qman_fq_state_oos:
+               if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+                       qman_release_fqid(fq->fqid);
+
+               DPAA_ASSERT(fq_table[fq->idx]);
+               fq_table[fq->idx] = NULL;
+               return;
+       default:
+               break;
+       }
+       DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+       return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+       union qm_mc_command *mcc;
+       union qm_mc_result *mcr;
+       struct qman_portal *p;
+       u8 res, myverb;
+       int ret = 0;
+
+       myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+               ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+       if (fq->state != qman_fq_state_oos &&
+           fq->state != qman_fq_state_parked)
+               return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+               return -EINVAL;
+#endif
+       if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+               /* And can't be set at the same time as TDTHRESH */
+               if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+                       return -EINVAL;
+       }
+       /* Issue an INITFQ_[PARKED|SCHED] management command */
+       p = get_affine_portal();
+       if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+           (fq->state != qman_fq_state_oos &&
+            fq->state != qman_fq_state_parked)) {
+               ret = -EBUSY;
+               goto out;
+       }
+       mcc = qm_mc_start(&p->p);
+       if (opts)
+               mcc->initfq = *opts;
+       mcc->initfq.fqid = fq->fqid;
+       mcc->initfq.count = 0;
+       /*
+        * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+        * demux pointer. Otherwise, the caller-provided value is allowed to
+        * stand, don't overwrite it.
+        */
+       if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+               dma_addr_t phys_fq;
+
+               mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+               mcc->initfq.fqd.context_b = fq_to_tag(fq);
+               /*
+                *  and the physical address - NB, if the user wasn't trying to
+                * set CONTEXTA, clear the stashing settings.
+                */
+               if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+                       mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+                       memset(&mcc->initfq.fqd.context_a, 0,
+                               sizeof(mcc->initfq.fqd.context_a));
+               } else {
+                       phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+                                                DMA_TO_DEVICE);
+                       qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+               }
+       }
+       if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+               int wq = 0;
+
+               if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+                       mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+                       wq = 4;
+               }
+               qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+       }
+       qm_mc_commit(&p->p, myverb);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               dev_err(p->config->dev, "MCR timeout\n");
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+       res = mcr->result;
+       if (res != QM_MCR_RESULT_OK) {
+               ret = -EIO;
+               goto out;
+       }
+       if (opts) {
+               if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+                       if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+                               fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+                       else
+                               fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+               }
+               if (opts->we_mask & QM_INITFQ_WE_CGID)
+                       fq->cgr_groupid = opts->fqd.cgid;
+       }
+       fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+               qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+       put_affine_portal();
+       return ret;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+       union qm_mc_command *mcc;
+       union qm_mc_result *mcr;
+       struct qman_portal *p;
+       int ret = 0;
+
+       if (fq->state != qman_fq_state_parked)
+               return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+               return -EINVAL;
+#endif
+       /* Issue a ALTERFQ_SCHED management command */
+       p = get_affine_portal();
+       if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+           fq->state != qman_fq_state_parked) {
+               ret = -EBUSY;
+               goto out;
+       }
+       mcc = qm_mc_start(&p->p);
+       mcc->alterfq.fqid = fq->fqid;
+       qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+       if (mcr->result != QM_MCR_RESULT_OK) {
+               ret = -EIO;
+               goto out;
+       }
+       fq->state = qman_fq_state_sched;
+out:
+       put_affine_portal();
+       return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+       union qm_mc_command *mcc;
+       union qm_mc_result *mcr;
+       struct qman_portal *p;
+       int ret;
+       u8 res;
+
+       if (fq->state != qman_fq_state_parked &&
+           fq->state != qman_fq_state_sched)
+               return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+               return -EINVAL;
+#endif
+       p = get_affine_portal();
+       if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+           fq->state == qman_fq_state_retired ||
+           fq->state == qman_fq_state_oos) {
+               ret = -EBUSY;
+               goto out;
+       }
+       mcc = qm_mc_start(&p->p);
+       mcc->alterfq.fqid = fq->fqid;
+       qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+       res = mcr->result;
+       /*
+        * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+        * and defer the flags until FQRNI or FQRN (respectively) show up. But
+        * "Friendly" is to process OK immediately, and not set CHANGING. We do
+        * friendly, otherwise the caller doesn't necessarily have a fully
+        * "retired" FQ on return even if the retirement was immediate. However
+        * this does mean some code duplication between here and
+        * fq_state_change().
+        */
+       if (res == QM_MCR_RESULT_OK) {
+               ret = 0;
+               /* Process 'fq' right away, we'll ignore FQRNI */
+               if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+                       fq_set(fq, QMAN_FQ_STATE_NE);
+               if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+                       fq_set(fq, QMAN_FQ_STATE_ORL);
+               if (flags)
+                       *flags = fq->flags;
+               fq->state = qman_fq_state_retired;
+               if (fq->cb.fqs) {
+                       /*
+                        * Another issue with supporting "immediate" retirement
+                        * is that we're forced to drop FQRNIs, because by the
+                        * time they're seen it may already be "too late" (the
+                        * fq may have been OOS'd and free()'d already). But if
+                        * the upper layer wants a callback whether it's
+                        * immediate or not, we have to fake a "MR" entry to
+                        * look like an FQRNI...
+                        */
+                       union qm_mr_entry msg;
+
+                       msg.verb = QM_MR_VERB_FQRNI;
+                       msg.fq.fqs = mcr->alterfq.fqs;
+                       msg.fq.fqid = fq->fqid;
+                       msg.fq.contextB = fq_to_tag(fq);
+                       fq->cb.fqs(p, fq, &msg);
+               }
+       } else if (res == QM_MCR_RESULT_PENDING) {
+               ret = 1;
+               fq_set(fq, QMAN_FQ_STATE_CHANGING);
+       } else {
+               ret = -EIO;
+       }
+out:
+       put_affine_portal();
+       return ret;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+       union qm_mc_command *mcc;
+       union qm_mc_result *mcr;
+       struct qman_portal *p;
+       int ret = 0;
+
+       if (fq->state != qman_fq_state_retired)
+               return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+               return -EINVAL;
+#endif
+       p = get_affine_portal();
+       if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+           fq->state != qman_fq_state_retired) {
+               ret = -EBUSY;
+               goto out;
+       }
+       mcc = qm_mc_start(&p->p);
+       mcc->alterfq.fqid = fq->fqid;
+       qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+       DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+       if (mcr->result != QM_MCR_RESULT_OK) {
+               ret = -EIO;
+               goto out;
+       }
+       fq->state = qman_fq_state_oos;
+out:
+       put_affine_portal();
+       return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+       union qm_mc_command *mcc;
+       union qm_mc_result *mcr;
+       struct qman_portal *p = get_affine_portal();
+       int ret = 0;
+
+       mcc = qm_mc_start(&p->p);
+       mcc->queryfq.fqid = fq->fqid;
+       qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+       if (mcr->result == QM_MCR_RESULT_OK)
+               *fqd = mcr->queryfq.fqd;
+       else
+               ret = -EIO;
+out:
+       put_affine_portal();
+       return ret;
+}
+
+static int qman_query_fq_np(struct qman_fq *fq,
+                           struct qm_mcr_queryfq_np *np)
+{
+       union qm_mc_command *mcc;
+       union qm_mc_result *mcr;
+       struct qman_portal *p = get_affine_portal();
+       int ret = 0;
+
+       mcc = qm_mc_start(&p->p);
+       mcc->queryfq.fqid = fq->fqid;
+       qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+       if (mcr->result == QM_MCR_RESULT_OK)
+               *np = mcr->queryfq_np;
+       else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+               ret = -ERANGE;
+       else
+               ret = -EIO;
+out:
+       put_affine_portal();
+       return ret;
+}
+
+static int qman_query_cgr(struct qman_cgr *cgr,
+                         struct qm_mcr_querycgr *cgrd)
+{
+       union qm_mc_command *mcc;
+       union qm_mc_result *mcr;
+       struct qman_portal *p = get_affine_portal();
+       int ret = 0;
+
+       mcc = qm_mc_start(&p->p);
+       mcc->querycgr.cgid = cgr->cgrid;
+       qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+       DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+       if (mcr->result == QM_MCR_RESULT_OK)
+               *cgrd = mcr->querycgr;
+       else {
+               dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+                       mcr_result_str(mcr->result));
+               ret = -EIO;
+       }
+out:
+       put_affine_portal();
+       return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+       struct qm_mcr_querycgr query_cgr;
+       int err;
+
+       err = qman_query_cgr(cgr, &query_cgr);
+       if (err)
+               return err;
+
+       *result = !!query_cgr.cgr.cs;
+       return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+       unsigned long irqflags;
+       int ret = -EBUSY;
+
+       local_irq_save(irqflags);
+       if (p->vdqcr_owned)
+               goto out;
+       if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+               goto out;
+
+       fq_set(fq, QMAN_FQ_STATE_VDQCR);
+       p->vdqcr_owned = fq;
+       qm_dqrr_vdqcr_set(&p->p, vdqcr);
+       ret = 0;
+out:
+       local_irq_restore(irqflags);
+       return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+       int ret;
+
+       *p = get_affine_portal();
+       ret = set_p_vdqcr(*p, fq, vdqcr);
+       put_affine_portal();
+       return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+                               u32 vdqcr, u32 flags)
+{
+       int ret = 0;
+
+       if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+               ret = wait_event_interruptible(affine_queue,
+                               !set_vdqcr(p, fq, vdqcr));
+       else
+               wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+       return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+       struct qman_portal *p;
+       int ret;
+
+       if (fq->state != qman_fq_state_parked &&
+           fq->state != qman_fq_state_retired)
+               return -EINVAL;
+       if (vdqcr & QM_VDQCR_FQID_MASK)
+               return -EINVAL;
+       if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+               return -EBUSY;
+       vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+       if (flags & QMAN_VOLATILE_FLAG_WAIT)
+               ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+       else
+               ret = set_vdqcr(&p, fq, vdqcr);
+       if (ret)
+               return ret;
+       /* VDQCR is set */
+       if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+               if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+                       /*
+                        * NB: don't propagate any error - the caller wouldn't
+                        * know whether the VDQCR was issued or not. A signal
+                        * could arrive after returning anyway, so the caller
+                        * can check signal_pending() if that's an issue.
+                        */
+                       wait_event_interruptible(affine_queue,
+                               !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+               else
+                       wait_event(affine_queue,
+                               !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+       }
+       return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+       if (avail)
+               qm_eqcr_cce_prefetch(&p->p);
+       else
+               qm_eqcr_cce_update(&p->p);
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
+{
+       struct qman_portal *p;
+       struct qm_eqcr_entry *eq;
+       unsigned long irqflags;
+       u8 avail;
+
+       p = get_affine_portal();
+       local_irq_save(irqflags);
+
+       if (p->use_eqcr_ci_stashing) {
+               /*
+                * The stashing case is easy, only update if we need to in
+                * order to try and liberate ring entries.
+                */
+               eq = qm_eqcr_start_stash(&p->p);
+       } else {
+               /*
+                * The non-stashing case is harder, need to prefetch ahead of
+                * time.
+                */
+               avail = qm_eqcr_get_avail(&p->p);
+               if (avail < 2)
+                       update_eqcr_ci(p, avail);
+               eq = qm_eqcr_start_no_stash(&p->p);
+       }
+
+       if (unlikely(!eq))
+               goto out;
+
+       eq->fqid = fq->fqid;
+       eq->tag = fq_to_tag(fq);
+       eq->fd = *fd;
+
+       qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+       local_irq_restore(irqflags);
+       put_affine_portal();
+       return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+                        struct qm_mcc_initcgr *opts)
+{
+       union qm_mc_command *mcc;
+       union qm_mc_result *mcr;
+       struct qman_portal *p = get_affine_portal();
+       u8 verb = QM_MCC_VERB_MODIFYCGR;
+       int ret = 0;
+
+       mcc = qm_mc_start(&p->p);
+       if (opts)
+               mcc->initcgr = *opts;
+       mcc->initcgr.cgid = cgr->cgrid;
+       if (flags & QMAN_CGR_FLAG_USE_INIT)
+               verb = QM_MCC_VERB_INITCGR;
+       qm_mc_commit(&p->p, verb);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+       if (mcr->result != QM_MCR_RESULT_OK)
+               ret = -EIO;
+
+out:
+       put_affine_portal();
+       return ret;
+}
+
+#define PORTAL_IDX(n)  (n->config->channel - QM_CHANNEL_SWPORTAL0)
+#define TARG_MASK(n)   (BIT(31) >> PORTAL_IDX(n))
+
+static u8 qman_cgr_cpus[CGR_NUM];
+
+void qman_init_cgr_all(void)
+{
+       struct qman_cgr cgr;
+       int err_cnt = 0;
+
+       for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+               if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+                       err_cnt++;
+       }
+
+       if (err_cnt)
+               pr_err("Warning: %d error%s while initialising CGR h/w\n",
+                      err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+                   struct qm_mcc_initcgr *opts)
+{
+       struct qm_mcr_querycgr cgr_state;
+       struct qm_mcc_initcgr local_opts = {};
+       int ret;
+       struct qman_portal *p;
+
+       /*
+        * We have to check that the provided CGRID is within the limits of the
+        * data-structures, for obvious reasons. However we'll let h/w take
+        * care of determining whether it's within the limits of what exists on
+        * the SoC.
+        */
+       if (cgr->cgrid >= CGR_NUM)
+               return -EINVAL;
+
+       preempt_disable();
+       p = get_affine_portal();
+       qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+       preempt_enable();
+
+       cgr->chan = p->config->channel;
+       spin_lock(&p->cgr_lock);
+
+       if (opts) {
+               ret = qman_query_cgr(cgr, &cgr_state);
+               if (ret)
+                       goto out;
+               if (opts)
+                       local_opts = *opts;
+               if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+                       local_opts.cgr.cscn_targ_upd_ctrl =
+                               QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+               else
+                       /* Overwrite TARG */
+                       local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+                                                  TARG_MASK(p);
+               local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+               /* send init if flags indicate so */
+               if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+                       ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+                                           &local_opts);
+               else
+                       ret = qm_modify_cgr(cgr, 0, &local_opts);
+               if (ret)
+                       goto out;
+       }
+
+       list_add(&cgr->node, &p->cgr_cbs);
+
+       /* Determine if newly added object requires its callback to be called */
+       ret = qman_query_cgr(cgr, &cgr_state);
+       if (ret) {
+               /* we can't go back, so proceed and return success */
+               dev_err(p->config->dev, "CGR HW state partially modified\n");
+               ret = 0;
+               goto out;
+       }
+       if (cgr->cb && cgr_state.cgr.cscn_en &&
+           qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+               cgr->cb(p, cgr, 1);
+out:
+       spin_unlock(&p->cgr_lock);
+       put_affine_portal();
+       return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+       unsigned long irqflags;
+       struct qm_mcr_querycgr cgr_state;
+       struct qm_mcc_initcgr local_opts;
+       int ret = 0;
+       struct qman_cgr *i;
+       struct qman_portal *p = get_affine_portal();
+
+       if (cgr->chan != p->config->channel) {
+               /* attempt to delete from other portal than creator */
+               dev_err(p->config->dev, "CGR not owned by current portal");
+               dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+                       cgr->chan, p->config->channel);
+
+               ret = -EINVAL;
+               goto put_portal;
+       }
+       memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+       spin_lock_irqsave(&p->cgr_lock, irqflags);
+       list_del(&cgr->node);
+       /*
+        * If there are no other CGR objects for this CGRID in the list,
+        * update CSCN_TARG accordingly
+        */
+       list_for_each_entry(i, &p->cgr_cbs, node)
+               if (i->cgrid == cgr->cgrid && i->cb)
+                       goto release_lock;
+       ret = qman_query_cgr(cgr, &cgr_state);
+       if (ret)  {
+               /* add back to the list */
+               list_add(&cgr->node, &p->cgr_cbs);
+               goto release_lock;
+       }
+       /* Overwrite TARG */
+       local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+       if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+               local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+       else
+               local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+                                                        ~(TARG_MASK(p));
+       ret = qm_modify_cgr(cgr, 0, &local_opts);
+       if (ret)
+               /* add back to the list */
+               list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+       spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+       put_affine_portal();
+       return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+       struct qman_cgr *cgr;
+       struct completion completion;
+};
+
+static int qman_delete_cgr_thread(void *p)
+{
+       struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
+       int ret;
+
+       ret = qman_delete_cgr(cgr_comp->cgr);
+       complete(&cgr_comp->completion);
+
+       return ret;
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+       struct task_struct *thread;
+       struct cgr_comp cgr_comp;
+
+       preempt_disable();
+       if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+               init_completion(&cgr_comp.completion);
+               cgr_comp.cgr = cgr;
+               thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
+                                       "cgr_del");
+
+               if (IS_ERR(thread))
+                       goto out;
+
+               kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
+               wake_up_process(thread);
+               wait_for_completion(&cgr_comp.completion);
+               preempt_enable();
+               return;
+       }
+out:
+       qman_delete_cgr(cgr);
+       preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+/* Cleanup FQs */
+
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
+{
+       const union qm_mr_entry *msg;
+       int found = 0;
+
+       qm_mr_pvb_update(p);
+       msg = qm_mr_current(p);
+       while (msg) {
+               if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+                       found = 1;
+               qm_mr_next(p);
+               qm_mr_cci_consume_to_current(p);
+               qm_mr_pvb_update(p);
+               msg = qm_mr_current(p);
+       }
+       return found;
+}
+
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+                                     bool wait)
+{
+       const struct qm_dqrr_entry *dqrr;
+       int found = 0;
+
+       do {
+               qm_dqrr_pvb_update(p);
+               dqrr = qm_dqrr_current(p);
+               if (!dqrr)
+                       cpu_relax();
+       } while (wait && !dqrr);
+
+       while (dqrr) {
+               if (dqrr->fqid == fqid && (dqrr->stat & s))
+                       found = 1;
+               qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+               qm_dqrr_pvb_update(p);
+               qm_dqrr_next(p);
+               dqrr = qm_dqrr_current(p);
+       }
+       return found;
+}
+
+#define qm_mr_drain(p, V) \
+       _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+       _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
+
+#define qm_dqrr_drain_wait(p, f, S) \
+       _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+       _qm_dqrr_consume_and_match(p, 0, 0, false)
+
+static int qman_shutdown_fq(u32 fqid)
+{
+       struct qman_portal *p;
+       struct device *dev;
+       union qm_mc_command *mcc;
+       union qm_mc_result *mcr;
+       int orl_empty, drain = 0, ret = 0;
+       u32 channel, wq, res;
+       u8 state;
+
+       p = get_affine_portal();
+       dev = p->config->dev;
+       /* Determine the state of the FQID */
+       mcc = qm_mc_start(&p->p);
+       mcc->queryfq_np.fqid = fqid;
+       qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               dev_err(dev, "QUERYFQ_NP timeout\n");
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+       state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+       if (state == QM_MCR_NP_STATE_OOS)
+               goto out; /* Already OOS, no need to do anymore checks */
+
+       /* Query which channel the FQ is using */
+       mcc = qm_mc_start(&p->p);
+       mcc->queryfq.fqid = fqid;
+       qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+       if (!qm_mc_result_timeout(&p->p, &mcr)) {
+               dev_err(dev, "QUERYFQ timeout\n");
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+       /* Need to store these since the MCR gets reused */
+       channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+       wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+       switch (state) {
+       case QM_MCR_NP_STATE_TEN_SCHED:
+       case QM_MCR_NP_STATE_TRU_SCHED:
+       case QM_MCR_NP_STATE_ACTIVE:
+       case QM_MCR_NP_STATE_PARKED:
+               orl_empty = 0;
+               mcc = qm_mc_start(&p->p);
+               mcc->alterfq.fqid = fqid;
+               qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+               if (!qm_mc_result_timeout(&p->p, &mcr)) {
+                       dev_err(dev, "QUERYFQ_NP timeout\n");
+                       ret = -ETIMEDOUT;
+                       goto out;
+               }
+               DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+                           QM_MCR_VERB_ALTER_RETIRE);
+               res = mcr->result; /* Make a copy as we reuse MCR below */
+
+               if (res == QM_MCR_RESULT_PENDING) {
+                       /*
+                        * Need to wait for the FQRN in the message ring, which
+                        * will only occur once the FQ has been drained.  In
+                        * order for the FQ to drain the portal needs to be set
+                        * to dequeue from the channel the FQ is scheduled on
+                        */
+                       int found_fqrn = 0;
+                       u16 dequeue_wq = 0;
+
+                       /* Flag that we need to drain FQ */
+                       drain = 1;
+
+                       if (channel >= qm_channel_pool1 &&
+                           channel < qm_channel_pool1 + 15) {
+                               /* Pool channel, enable the bit in the portal */
+                               dequeue_wq = (channel -
+                                             qm_channel_pool1 + 1)<<4 | wq;
+                       } else if (channel < qm_channel_pool1) {
+                               /* Dedicated channel */
+                               dequeue_wq = wq;
+                       } else {
+                               dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+                                       fqid, channel);
+                               ret = -EBUSY;
+                               goto out;
+                       }
+                       /* Set the sdqcr to drain this channel */
+                       if (channel < qm_channel_pool1)
+                               qm_dqrr_sdqcr_set(&p->p,
+                                                 QM_SDQCR_TYPE_ACTIVE |
+                                                 QM_SDQCR_CHANNELS_DEDICATED);
+                       else
+                               qm_dqrr_sdqcr_set(&p->p,
+                                                 QM_SDQCR_TYPE_ACTIVE |
+                                                 QM_SDQCR_CHANNELS_POOL_CONV
+                                                 (channel));
+                       do {
+                               /* Keep draining DQRR while checking the MR*/
+                               qm_dqrr_drain_nomatch(&p->p);
+                               /* Process message ring too */
+                               found_fqrn = qm_mr_drain(&p->p, FQRN);
+                               cpu_relax();
+                       } while (!found_fqrn);
+
+               }
+               if (res != QM_MCR_RESULT_OK &&
+                   res != QM_MCR_RESULT_PENDING) {
+                       dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+                               fqid, res);
+                       ret = -EIO;
+                       goto out;
+               }
+               if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+                       /*
+                        * ORL had no entries, no need to wait until the
+                        * ERNs come in
+                        */
+                       orl_empty = 1;
+               }
+               /*
+                * Retirement succeeded, check to see if FQ needs
+                * to be drained
+                */
+               if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+                       /* FQ is Not Empty, drain using volatile DQ commands */
+                       do {
+                               u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+                               qm_dqrr_vdqcr_set(&p->p, vdqcr);
+                               /*
+                                * Wait for a dequeue and process the dequeues,
+                                * making sure to empty the ring completely
+                                */
+                       } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+               }
+               qm_dqrr_sdqcr_set(&p->p, 0);
+
+               while (!orl_empty) {
+                       /* Wait for the ORL to have been completely drained */
+                       orl_empty = qm_mr_drain(&p->p, FQRL);
+                       cpu_relax();
+               }
+               mcc = qm_mc_start(&p->p);
+               mcc->alterfq.fqid = fqid;
+               qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+               if (!qm_mc_result_timeout(&p->p, &mcr)) {
+                       ret = -ETIMEDOUT;
+                       goto out;
+               }
+
+               DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+                           QM_MCR_VERB_ALTER_OOS);
+               if (mcr->result != QM_MCR_RESULT_OK) {
+                       dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+                               fqid, mcr->result);
+                       ret = -EIO;
+                       goto out;
+               }
+               break;
+
+       case QM_MCR_NP_STATE_RETIRED:
+               /* Send OOS Command */
+               mcc = qm_mc_start(&p->p);
+               mcc->alterfq.fqid = fqid;
+               qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+               if (!qm_mc_result_timeout(&p->p, &mcr)) {
+                       ret = -ETIMEDOUT;
+                       goto out;
+               }
+
+               DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+                           QM_MCR_VERB_ALTER_OOS);
+               if (mcr->result) {
+                       dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+                               fqid, mcr->result);
+                       ret = -EIO;
+                       goto out;
+               }
+               break;
+
+       case QM_MCR_NP_STATE_OOS:
+               /*  Done */
+               break;
+
+       default:
+               ret = -EIO;
+       }
+
+out:
+       put_affine_portal();
+       return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+                                               struct qman_portal *portal)
+{
+       return portal->config;
+}
+
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+{
+       unsigned long addr;
+
+       addr = gen_pool_alloc(p, cnt);
+       if (!addr)
+               return -ENOMEM;
+
+       *result = addr & ~DPAA_GENALLOC_OFF;
+
+       return 0;
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+       return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+int qman_alloc_pool_range(u32 *result, u32 count)
+{
+       return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+       return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+       int ret = qman_shutdown_fq(fqid);
+
+       if (ret) {
+               pr_debug("FQID %d leaked\n", fqid);
+               return ret;
+       }
+
+       gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
+       return 0;
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+       /*
+        * We query all FQDs starting from
+        * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+        * whose destination channel is the pool-channel being released.
+        * When a non-OOS FQD is found we attempt to clean it up
+        */
+       struct qman_fq fq = {
+               .fqid = QM_FQID_RANGE_START
+       };
+       int err;
+
+       do {
+               struct qm_mcr_queryfq_np np;
+
+               err = qman_query_fq_np(&fq, &np);
+               if (err)
+                       /* FQID range exceeded, found no problems */
+                       return 0;
+               if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+                       struct qm_fqd fqd;
+
+                       err = qman_query_fq(&fq, &fqd);
+                       if (WARN_ON(err))
+                               return 0;
+                       if (qm_fqd_get_chan(&fqd) == qp) {
+                               /* The channel is the FQ's target, clean it */
+                               err = qman_shutdown_fq(fq.fqid);
+                               if (err)
+                                       /*
+                                        * Couldn't shut down the FQ
+                                        * so the pool must be leaked
+                                        */
+                                       return err;
+                       }
+               }
+               /* Move to the next FQID */
+               fq.fqid++;
+       } while (1);
+}
+
+int qman_release_pool(u32 qp)
+{
+       int ret;
+
+       ret = qpool_cleanup(qp);
+       if (ret) {
+               pr_debug("CHID %d leaked\n", qp);
+               return ret;
+       }
+
+       gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+       return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
+
+static int cgr_cleanup(u32 cgrid)
+{
+       /*
+        * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+        * error, looking for non-OOS FQDs whose CGR is the CGR being released
+        */
+       struct qman_fq fq = {
+               .fqid = 1
+       };
+       int err;
+
+       do {
+               struct qm_mcr_queryfq_np np;
+
+               err = qman_query_fq_np(&fq, &np);
+               if (err)
+                       /* FQID range exceeded, found no problems */
+                       return 0;
+               if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+                       struct qm_fqd fqd;
+
+                       err = qman_query_fq(&fq, &fqd);
+                       if (WARN_ON(err))
+                               return 0;
+                       if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+                           fqd.cgid == cgrid) {
+                               pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+                                      cgrid, fq.fqid);
+                               return -EIO;
+                       }
+               }
+               /* Move to the next FQID */
+               fq.fqid++;
+       } while (1);
+}
+
+int qman_release_cgrid(u32 cgrid)
+{
+       int ret;
+
+       ret = cgr_cleanup(cgrid);
+       if (ret) {
+               pr_debug("CGRID %d leaked\n", cgrid);
+               return ret;
+       }
+
+       gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+       return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644 (file)
index 0000000..0cace9e
--- /dev/null
@@ -0,0 +1,808 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n)    (0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n)     (0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n)     (0x000c + ((n) * 0x10))
+#define REG_DD_CFG             0x0200
+#define REG_DCP_CFG(n)         (0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n)      (0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n)     (0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC           0x0400
+#define REG_PFDR_FP_HEAD       0x0404
+#define REG_PFDR_FP_TAIL       0x0408
+#define REG_PFDR_FP_LWIT       0x0410
+#define REG_PFDR_CFG           0x0414
+#define REG_SFDR_CFG           0x0500
+#define REG_SFDR_IN_USE                0x0504
+#define REG_WQ_CS_CFG(n)       (0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID    0x0630
+#define REG_WQ_SC_DD_CFG(n)    (0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n)    (0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n)   (0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n)   (0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n)   (0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG             0x0800
+#define REG_ECSR               0x0a00
+#define REG_ECIR               0x0a04
+#define REG_EADR               0x0a08
+#define REG_ECIR2              0x0a0c
+#define REG_EDATA(n)           (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)            (0x0a80 + ((n) * 0x04))
+#define REG_MCR                        0x0b00
+#define REG_MCP(n)             (0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG           0x0be0
+#define REG_HID_CFG            0x0bf0
+#define REG_IDLE_STAT          0x0bf4
+#define REG_IP_REV_1           0x0bf8
+#define REG_IP_REV_2           0x0bfc
+#define REG_FQD_BARE           0x0c00
+#define REG_PFDR_BARE          0x0c20
+#define REG_offset_BAR         0x0004  /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR          0x0010  /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE          0x0c80
+#define REG_QCSP_BAR           0x0c84
+#define REG_CI_SCHED_CFG       0x0d00
+#define REG_SRCIDR             0x0d04
+#define REG_LIODNR             0x0d08
+#define REG_CI_RLM_AVG         0x0d14
+#define REG_ERR_ISR            0x0e00
+#define REG_ERR_IER            0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n)       (0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n)        (0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n)        (0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR          0x01000000
+#define MCR_get_rslt(v)                (u8)((v) >> 24)
+#define MCR_rslt_idle(r)       (!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r)         ((r) == 0xf0)
+#define MCR_rslt_eaccess(r)    ((r) == 0xf8)
+#define MCR_rslt_inval(r)      ((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV         4
+#define QM_CI_SCHED_CFG_SRQ_W          3
+#define QM_CI_SCHED_CFG_RW_W           2
+#define QM_CI_SCHED_CFG_BMAN_W         2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN      BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+       qm_wq_portal = 0,
+       qm_wq_pool = 1,
+       qm_wq_fman0 = 2,
+       qm_wq_fman1 = 3,
+       qm_wq_caam = 4,
+       qm_wq_pme = 5,
+       qm_wq_first = qm_wq_portal,
+       qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+       qm_memory_fqd,
+       qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE   0x20000000      /* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE   0x10000000      /* Corenet Target Data Error */
+#define QM_EIRQ_CITT   0x08000000      /* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI   0x04000000      /* PFDR Low Watermark */
+#define QM_EIRQ_MBEI   0x02000000      /* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI   0x01000000      /* Single-bit ECC Error */
+#define QM_EIRQ_PEBI   0x00800000      /* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI   0x00020000      /* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI   0x00010000      /* Invalid Command Verb */
+#define QM_EIRQ_IDDI   0x00000800      /* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI   0x00000400      /* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI   0x00000200      /* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI   0x00000100      /* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE   0x00000010      /* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI   0x00000008      /* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI   0x00000004      /* Invalid Enqueue State */
+#define QM_EIRQ_IECI   0x00000002      /* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI   0x00000001      /* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR        (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+                        QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+                        QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR  (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+                        QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+                        QM_EIRQ_IFSI)
+
+struct qm_ecir {
+       u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+       return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+       return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+       return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+       u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+       return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+       return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+       u32 info; /* memid[24-27], eadr[0-11] */
+                 /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+       return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+       return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+       return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+       return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+       u32 mask;
+       const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+       { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+       { QM_EIRQ_CTDE, "Corenet Target Data Error" },
+       { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+       { QM_EIRQ_PLWI, "PFDR Low Watermark" },
+       { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+       { QM_EIRQ_SBEI, "Single-bit ECC Error" },
+       { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+       { QM_EIRQ_ICVI, "Invalid Command Verb" },
+       { QM_EIRQ_IFSI, "Invalid Flow Control State" },
+       { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+       { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+       { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+       { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+       { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+       { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+       { QM_EIRQ_IESI, "Invalid Enqueue State" },
+       { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+       { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+       u16 addr_mask;
+       u16 bits;
+       const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+       { 0x01FF, 24, "FQD cache tag memory 0" },
+       { 0x01FF, 24, "FQD cache tag memory 1" },
+       { 0x01FF, 24, "FQD cache tag memory 2" },
+       { 0x01FF, 24, "FQD cache tag memory 3" },
+       { 0x0FFF, 512, "FQD cache memory" },
+       { 0x07FF, 128, "SFDR memory" },
+       { 0x01FF, 72, "WQ context memory" },
+       { 0x00FF, 240, "CGR memory" },
+       { 0x00FF, 302, "Internal Order Restoration List memory" },
+       { 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+       return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+       iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+       return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+       qm_dc_portal_fman0 = 0,
+       qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+       DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+                   portal == qm_dc_portal_fman1);
+       if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+               qm_ccsr_out(REG_DCP_CFG(portal),
+                           (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+       else
+               qm_ccsr_out(REG_DCP_CFG(portal),
+                           (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+                                u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+                                u8 csw5, u8 csw6, u8 csw7)
+{
+       qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+                   ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+                   ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+                   ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+       qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+       qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+                   (QM_CI_SCHED_CFG_SRCCIV << 24) |
+                   (QM_CI_SCHED_CFG_SRQ_W << 8) |
+                   (QM_CI_SCHED_CFG_RW_W << 4) |
+                   QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+       u32 v = qm_ccsr_in(REG_IP_REV_1);
+       *id = (v >> 16);
+       *major = (v >> 8) & 0xff;
+       *minor = v & 0xff;
+}
+
+#define PFDR_AR_EN             BIT(31)
+static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+       u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+       u32 exp = ilog2(size);
+
+       /* choke if size isn't within range */
+       DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+                   is_power_of_2(size));
+       /* choke if 'ba' has lower-alignment than 'size' */
+       DPAA_ASSERT(!(ba & (size - 1)));
+       qm_ccsr_out(offset, upper_32_bits(ba));
+       qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+       qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+       qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+       qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+       qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+       u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+       DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+       /* Make sure the command interface is 'idle' */
+       if (!MCR_rslt_idle(rslt)) {
+               dev_crit(dev, "QMAN_MCR isn't idle");
+               WARN_ON(1);
+       }
+
+       /* Write the MCR command params then the verb */
+       qm_ccsr_out(REG_MCP(0), pfdr_start);
+       /*
+        * TODO: remove this - it's a workaround for a model bug that is
+        * corrected in more recent versions. We use the workaround until
+        * everyone has upgraded.
+        */
+       qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+       dma_wmb();
+       qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+       /* Poll for the result */
+       do {
+               rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+       } while (!MCR_rslt_idle(rslt));
+       if (MCR_rslt_ok(rslt))
+               return 0;
+       if (MCR_rslt_eaccess(rslt))
+               return -EACCES;
+       if (MCR_rslt_inval(rslt))
+               return -EINVAL;
+       dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+       return -ENODEV;
+}
+
+/*
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved).  Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+       fqd_a = rmem->base;
+       fqd_sz = rmem->size;
+
+       WARN_ON(!(fqd_a && fqd_sz));
+
+       return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+       pfdr_a = rmem->base;
+       pfdr_sz = rmem->size;
+
+       WARN_ON(!(pfdr_a && pfdr_sz));
+
+       return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+
+static unsigned int qm_get_fqid_maxcnt(void)
+{
+       return fqd_sz / 64;
+}
+
+/*
+ * Flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static int zero_priv_mem(struct device *dev, struct device_node *node,
+                        phys_addr_t addr, size_t sz)
+{
+       /* map as cacheable, non-guarded */
+       void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+       memset_io(tmpp, 0, sz);
+       flush_dcache_range((unsigned long)tmpp,
+                          (unsigned long)tmpp + sz);
+       iounmap(tmpp);
+
+       return 0;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+       u32 i, j, mask = 0xffffffff;
+
+       dev_warn(dev, "ErrInt, EDATA:\n");
+       i = bit_count / 32;
+       if (bit_count % 32) {
+               i++;
+               mask = ~(mask << bit_count % 32);
+       }
+       j = 16 - i;
+       dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+       j++;
+       for (; j < 16; j++)
+               dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+                                     u32 ecsr_val)
+{
+       struct qm_ecir ecir_val;
+       struct qm_eadr eadr_val;
+       int memid;
+
+       ecir_val.info = qm_ccsr_in(REG_ECIR);
+       /* Is portal info valid */
+       if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+               struct qm_ecir2 ecir2_val;
+
+               ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+               if (ecsr_val & PORTAL_ECSR_ERR) {
+                       dev_warn(dev, "ErrInt: %s id %d\n",
+                                qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+                                qm_ecir2_get_pnum(&ecir2_val));
+               }
+               if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+                       dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+                                qm_ecir_get_fqid(&ecir_val));
+
+               if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+                       eadr_val.info = qm_ccsr_in(REG_EADR);
+                       memid = qm_eadr_v3_get_memid(&eadr_val);
+                       dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+                                error_mdata[memid].txt,
+                                error_mdata[memid].addr_mask
+                                       & qm_eadr_v3_get_eadr(&eadr_val));
+                       log_edata_bits(dev, error_mdata[memid].bits);
+               }
+       } else {
+               if (ecsr_val & PORTAL_ECSR_ERR) {
+                       dev_warn(dev, "ErrInt: %s id %d\n",
+                                qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+                                qm_ecir_get_pnum(&ecir_val));
+               }
+               if (ecsr_val & FQID_ECSR_ERR)
+                       dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+                                qm_ecir_get_fqid(&ecir_val));
+
+               if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+                       eadr_val.info = qm_ccsr_in(REG_EADR);
+                       memid = qm_eadr_get_memid(&eadr_val);
+                       dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+                                error_mdata[memid].txt,
+                                error_mdata[memid].addr_mask
+                                       & qm_eadr_get_eadr(&eadr_val));
+                       log_edata_bits(dev, error_mdata[memid].bits);
+               }
+       }
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+       u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+       struct device *dev = ptr;
+
+       ier_val = qm_ccsr_in(REG_ERR_IER);
+       isr_val = qm_ccsr_in(REG_ERR_ISR);
+       ecsr_val = qm_ccsr_in(REG_ECSR);
+       isr_mask = isr_val & ier_val;
+
+       if (!isr_mask)
+               return IRQ_NONE;
+
+       for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+               if (qman_hwerr_txts[i].mask & isr_mask) {
+                       dev_err_ratelimited(dev, "ErrInt: %s\n",
+                                           qman_hwerr_txts[i].txt);
+                       if (qman_hwerr_txts[i].mask & ecsr_val) {
+                               log_additional_error_info(dev, isr_mask,
+                                                         ecsr_val);
+                               /* Re-arm error capture registers */
+                               qm_ccsr_out(REG_ECSR, ecsr_val);
+                       }
+                       if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+                               dev_dbg(dev, "Disabling error 0x%x\n",
+                                       qman_hwerr_txts[i].mask);
+                               ier_val &= ~qman_hwerr_txts[i].mask;
+                               qm_ccsr_out(REG_ERR_IER, ier_val);
+                       }
+               }
+       }
+       qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+       return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+       int i, err;
+
+       /* FQD memory */
+       qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+       /* PFDR memory */
+       qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+       err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+       if (err)
+               return err;
+       /* thresholds */
+       qm_set_pfdr_threshold(512, 64);
+       qm_set_sfdr_threshold(128);
+       /* clear stale PEBI bit from interrupt status register */
+       qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+       /* corenet initiator settings */
+       qm_set_corenet_initiator();
+       /* HID settings */
+       qm_set_hid();
+       /* Set scheduling weights to defaults */
+       for (i = qm_wq_first; i <= qm_wq_last; i++)
+               qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+       /* We are not prepared to accept ERNs for hardware enqueues */
+       qm_set_dc(qm_dc_portal_fman0, 1, 0);
+       qm_set_dc(qm_dc_portal_fman1, 1, 0);
+       return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+       static int done;
+       static u32 liodn_offset;
+       u32 before, after;
+       int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+       if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+               before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+       else
+               before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+       if (!done) {
+               liodn_offset = before & LIO_CFG_LIODN_MASK;
+               done = 1;
+               return;
+       }
+       after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+       if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+               qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+       else
+               qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+void qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+       int idx = channel - QM_CHANNEL_SWPORTAL0;
+       u32 before, after;
+
+       if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+               before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+               /* Each pair of vcpu share the same SRQ(SDEST) */
+               cpu_idx /= 2;
+               after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+               qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+       } else {
+               before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+               after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+               qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+       }
+}
+
+static int qman_resource_init(struct device *dev)
+{
+       int pool_chan_num, cgrid_num;
+       int ret, i;
+
+       switch (qman_ip_rev >> 8) {
+       case 1:
+               pool_chan_num = 15;
+               cgrid_num = 256;
+               break;
+       case 2:
+               pool_chan_num = 3;
+               cgrid_num = 64;
+               break;
+       case 3:
+               pool_chan_num = 15;
+               cgrid_num = 256;
+               break;
+       default:
+               return -ENODEV;
+       }
+
+       ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+                          pool_chan_num, -1);
+       if (ret) {
+               dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+               return ret;
+       }
+
+       ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+       if (ret) {
+               dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+               return ret;
+       }
+
+       /* parse pool channels into the SDQCR mask */
+       for (i = 0; i < cgrid_num; i++)
+               qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+       ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
+                          qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
+       if (ret) {
+               dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct resource *res;
+       int ret, err_irq;
+       u16 id;
+       u8 major, minor;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+       qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+       if (!qm_ccsr_start)
+               return -ENXIO;
+
+       qm_get_version(&id, &major, &minor);
+       if (major == 1 && minor == 0) {
+               dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+                       return -ENODEV;
+       } else if (major == 1 && minor == 1)
+               qman_ip_rev = QMAN_REV11;
+       else if (major == 1 && minor == 2)
+               qman_ip_rev = QMAN_REV12;
+       else if (major == 2 && minor == 0)
+               qman_ip_rev = QMAN_REV20;
+       else if (major == 3 && minor == 0)
+               qman_ip_rev = QMAN_REV30;
+       else if (major == 3 && minor == 1)
+               qman_ip_rev = QMAN_REV31;
+       else {
+               dev_err(dev, "Unknown QMan version\n");
+               return -ENODEV;
+       }
+
+       if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
+               qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+
+       ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
+       WARN_ON(ret);
+       if (ret)
+               return -ENODEV;
+
+       ret = qman_init_ccsr(dev);
+       if (ret) {
+               dev_err(dev, "CCSR setup failed\n");
+               return ret;
+       }
+
+       err_irq = platform_get_irq(pdev, 0);
+       if (err_irq <= 0) {
+               dev_info(dev, "Can't get %s property 'interrupts'\n",
+                        node->full_name);
+               return -ENODEV;
+       }
+       ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+                              dev);
+       if (ret)  {
+               dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+                       ret, node->full_name);
+               return ret;
+       }
+
+       /*
+        * Write-to-clear any stale bits, (eg. starvation being asserted prior
+        * to resource allocation during driver init).
+        */
+       qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+       /* Enable Error Interrupts */
+       qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+       qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+       if (IS_ERR(qm_fqalloc)) {
+               ret = PTR_ERR(qm_fqalloc);
+               dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+               return ret;
+       }
+
+       qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+       if (IS_ERR(qm_qpalloc)) {
+               ret = PTR_ERR(qm_qpalloc);
+               dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+               return ret;
+       }
+
+       qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+       if (IS_ERR(qm_cgralloc)) {
+               ret = PTR_ERR(qm_cgralloc);
+               dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+               return ret;
+       }
+
+       ret = qman_resource_init(dev);
+       if (ret)
+               return ret;
+
+       ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+       if (ret)
+               return ret;
+
+       ret = qman_wq_alloc();
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static const struct of_device_id fsl_qman_ids[] = {
+       {
+               .compatible = "fsl,qman",
+       },
+       {}
+};
+
+static struct platform_driver fsl_qman_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .of_match_table = fsl_qman_ids,
+               .suppress_bind_attrs = true,
+       },
+       .probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644 (file)
index 0000000..1486143
--- /dev/null
@@ -0,0 +1,355 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW  1
+#define CONFIG_FSL_DPA_PIRQ_FAST  1
+
+static struct cpumask portal_cpus;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+       struct device *dev = pcfg->dev;
+       int window_count = 1;
+       struct iommu_domain_geometry geom_attr;
+       struct pamu_stash_attribute stash_attr;
+       int ret;
+
+       pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+       if (!pcfg->iommu_domain) {
+               dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+               goto no_iommu;
+       }
+       geom_attr.aperture_start = 0;
+       geom_attr.aperture_end =
+               ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+       geom_attr.force_aperture = true;
+       ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+                                   &geom_attr);
+       if (ret < 0) {
+               dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+                       ret);
+               goto out_domain_free;
+       }
+       ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+                                   &window_count);
+       if (ret < 0) {
+               dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+                       ret);
+               goto out_domain_free;
+       }
+       stash_attr.cpu = cpu;
+       stash_attr.cache = PAMU_ATTR_CACHE_L1;
+       ret = iommu_domain_set_attr(pcfg->iommu_domain,
+                                   DOMAIN_ATTR_FSL_PAMU_STASH,
+                                   &stash_attr);
+       if (ret < 0) {
+               dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
+                       __func__, ret);
+               goto out_domain_free;
+       }
+       ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+                                        IOMMU_READ | IOMMU_WRITE);
+       if (ret < 0) {
+               dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+                       __func__, ret);
+               goto out_domain_free;
+       }
+       ret = iommu_attach_device(pcfg->iommu_domain, dev);
+       if (ret < 0) {
+               dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+                       ret);
+               goto out_domain_free;
+       }
+       ret = iommu_domain_set_attr(pcfg->iommu_domain,
+                                   DOMAIN_ATTR_FSL_PAMU_ENABLE,
+                                   &window_count);
+       if (ret < 0) {
+               dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+                       ret);
+               goto out_detach_device;
+       }
+
+no_iommu:
+#endif
+       qman_set_sdest(pcfg->channel, cpu);
+
+       return;
+
+#ifdef CONFIG_FSL_PAMU
+out_detach_device:
+       iommu_detach_device(pcfg->iommu_domain, NULL);
+out_domain_free:
+       iommu_domain_free(pcfg->iommu_domain);
+       pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+       struct qman_portal *p;
+       u32 irq_sources = 0;
+
+       /* We need the same LIODN offset for all portals */
+       qman_liodn_fixup(pcfg->channel);
+
+       pcfg->iommu_domain = NULL;
+       portal_set_cpu(pcfg, pcfg->cpu);
+
+       p = qman_create_affine_portal(pcfg, NULL);
+       if (!p) {
+               dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+                        __func__, pcfg->cpu);
+               return NULL;
+       }
+
+       /* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+       irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+                      QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+       irq_sources |= QM_PIRQ_DQRI;
+#endif
+       qman_p_irqsource_add(p, irq_sources);
+
+       spin_lock(&qman_lock);
+       if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+               /* all assigned portals are initialized now */
+               qman_init_cgr_all();
+       }
+       spin_unlock(&qman_lock);
+
+       dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+       return p;
+}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+                                                       unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU /* TODO */
+       struct pamu_stash_attribute stash_attr;
+       int ret;
+
+       if (pcfg->iommu_domain) {
+               stash_attr.cpu = cpu;
+               stash_attr.cache = PAMU_ATTR_CACHE_L1;
+               ret = iommu_domain_set_attr(pcfg->iommu_domain,
+                               DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+               if (ret < 0) {
+                       dev_err(pcfg->dev,
+                               "Failed to update pamu stash setting\n");
+                       return;
+               }
+       }
+#endif
+       qman_set_sdest(pcfg->channel, cpu);
+}
+
+static void qman_offline_cpu(unsigned int cpu)
+{
+       struct qman_portal *p;
+       const struct qm_portal_config *pcfg;
+
+       p = affine_portals[cpu];
+       if (p) {
+               pcfg = qman_get_qm_portal_config(p);
+               if (pcfg) {
+                       irq_set_affinity(pcfg->irq, cpumask_of(0));
+                       qman_portal_update_sdest(pcfg, 0);
+               }
+       }
+}
+
+static void qman_online_cpu(unsigned int cpu)
+{
+       struct qman_portal *p;
+       const struct qm_portal_config *pcfg;
+
+       p = affine_portals[cpu];
+       if (p) {
+               pcfg = qman_get_qm_portal_config(p);
+               if (pcfg) {
+                       irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+                       qman_portal_update_sdest(pcfg, cpu);
+               }
+       }
+}
+
+static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
+                                    unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               qman_online_cpu(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               qman_offline_cpu(cpu);
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block qman_hotplug_cpu_notifier = {
+       .notifier_call = qman_hotplug_cpu_callback,
+};
+
+static int qman_portal_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct qm_portal_config *pcfg;
+       struct resource *addr_phys[2];
+       const u32 *channel;
+       void __iomem *va;
+       int irq, len, cpu;
+
+       pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+       if (!pcfg)
+               return -ENOMEM;
+
+       pcfg->dev = dev;
+
+       addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+                                            DPAA_PORTAL_CE);
+       if (!addr_phys[0]) {
+               dev_err(dev, "Can't get %s property 'reg::CE'\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+
+       addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+                                            DPAA_PORTAL_CI);
+       if (!addr_phys[1]) {
+               dev_err(dev, "Can't get %s property 'reg::CI'\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+
+       channel = of_get_property(node, "cell-index", &len);
+       if (!channel || (len != 4)) {
+               dev_err(dev, "Can't get %s property 'cell-index'\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+       pcfg->channel = *channel;
+       pcfg->cpu = -1;
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               dev_err(dev, "Can't get %s IRQ\n", node->full_name);
+               return -ENXIO;
+       }
+       pcfg->irq = irq;
+
+       va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+       if (!va)
+               goto err_ioremap1;
+
+       pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+       va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+                         _PAGE_GUARDED | _PAGE_NO_CACHE);
+       if (!va)
+               goto err_ioremap2;
+
+       pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+       pcfg->pools = qm_get_pools_sdqcr();
+
+       spin_lock(&qman_lock);
+       cpu = cpumask_next_zero(-1, &portal_cpus);
+       if (cpu >= nr_cpu_ids) {
+               /* unassigned portal, skip init */
+               spin_unlock(&qman_lock);
+               return 0;
+       }
+
+       cpumask_set_cpu(cpu, &portal_cpus);
+       spin_unlock(&qman_lock);
+       pcfg->cpu = cpu;
+
+       if (!init_pcfg(pcfg))
+               goto err_ioremap2;
+
+       /* clear irq affinity if assigned cpu is offline */
+       if (!cpu_online(cpu))
+               qman_offline_cpu(cpu);
+
+       return 0;
+
+err_ioremap2:
+       iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+       dev_err(dev, "ioremap failed\n");
+       return -ENXIO;
+}
+
+static const struct of_device_id qman_portal_ids[] = {
+       {
+               .compatible = "fsl,qman-portal",
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .of_match_table = qman_portal_ids,
+       },
+       .probe = qman_portal_probe,
+};
+
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+       int ret;
+
+       ret = platform_driver_register(drv);
+       if (ret < 0)
+               return ret;
+
+       register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
+
+       return 0;
+}
+
+module_driver(qman_portal_driver,
+             qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644 (file)
index 0000000..5cf821e
--- /dev/null
@@ -0,0 +1,371 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+       u8 verb;
+       u8 result;
+       u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+       u8 __reserved[28];
+       u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+       return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+       u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+       u8 verb;
+       u8 result;
+       u8 __reserved[30];
+       /* Access this struct using qman_cgrs_get() */
+       struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+       u8 verb;
+       u8 result;
+       u16 __reserved1;
+       struct __qm_mc_cgr cgr; /* CGR fields */
+       u8 __reserved2[6];
+       u8 i_bcnt_hi;   /* high 8-bits of 40-bit "Instant" */
+       u32 i_bcnt_lo;  /* low 32-bits of 40-bit */
+       u8 __reserved3[3];
+       u8 a_bcnt_hi;   /* high 8-bits of 40-bit "Average" */
+       u32 a_bcnt_lo;  /* low 32-bits of 40-bit */
+       u32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+       return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+       return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+}
+
+/* "Query FQ Non-Programmable Fields" */
+struct qm_mcc_queryfq_np {
+       u8 _ncw_verb;
+       u8 __reserved1[3];
+       u32 fqid;       /* 24-bit */
+       u8 __reserved2[56];
+} __packed;
+
+struct qm_mcr_queryfq_np {
+       u8 verb;
+       u8 result;
+       u8 __reserved1;
+       u8 state;               /* QM_MCR_NP_STATE_*** */
+       u32 fqd_link;           /* 24-bit, _res2[24-31] */
+       u16 odp_seq;            /* 14-bit, _res3[14-15] */
+       u16 orp_nesn;           /* 14-bit, _res4[14-15] */
+       u16 orp_ea_hseq;        /* 15-bit, _res5[15] */
+       u16 orp_ea_tseq;        /* 15-bit, _res6[15] */
+       u32 orp_ea_hptr;        /* 24-bit, _res7[24-31] */
+       u32 orp_ea_tptr;        /* 24-bit, _res8[24-31] */
+       u32 pfdr_hptr;          /* 24-bit, _res9[24-31] */
+       u32 pfdr_tptr;          /* 24-bit, _res10[24-31] */
+       u8 __reserved2[5];
+       u8 is;                  /* 1-bit, _res12[1-7] */
+       u16 ics_surp;
+       u32 byte_cnt;
+       u32 frm_cnt;            /* 24-bit, _res13[24-31] */
+       u32 __reserved3;
+       u16 ra1_sfdr;           /* QM_MCR_NP_RA1_*** */
+       u16 ra2_sfdr;           /* QM_MCR_NP_RA2_*** */
+       u16 __reserved4;
+       u16 od1_sfdr;           /* QM_MCR_NP_OD1_*** */
+       u16 od2_sfdr;           /* QM_MCR_NP_OD2_*** */
+       u16 od3_sfdr;           /* QM_MCR_NP_OD3_*** */
+} __packed;
+
+#define QM_MCR_NP_STATE_FE             0x10
+#define QM_MCR_NP_STATE_R              0x08
+#define QM_MCR_NP_STATE_MASK           0x07    /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS            0x00
+#define QM_MCR_NP_STATE_RETIRED                0x01
+#define QM_MCR_NP_STATE_TEN_SCHED      0x02
+#define QM_MCR_NP_STATE_TRU_SCHED      0x03
+#define QM_MCR_NP_STATE_PARKED         0x04
+#define QM_MCR_NP_STATE_ACTIVE         0x05
+#define QM_MCR_NP_PTR_MASK             0x07ff  /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v)           (((v) >> 14) & 0x3)     /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v)            (((v) >> 14) & 0x1)     /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v)           (((v) >> 14) & 0x3)     /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v)           (((v) >> 14) & 0x3)     /* FQD::NPC */
+
+enum qm_mcr_queryfq_np_masks {
+       qm_mcr_fqd_link_mask = BIT(24)-1,
+       qm_mcr_odp_seq_mask = BIT(14)-1,
+       qm_mcr_orp_nesn_mask = BIT(14)-1,
+       qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
+       qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
+       qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
+       qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
+       qm_mcr_pfdr_hptr_mask = BIT(24)-1,
+       qm_mcr_pfdr_tptr_mask = BIT(24)-1,
+       qm_mcr_is_mask = BIT(1)-1,
+       qm_mcr_frm_cnt_mask = BIT(24)-1,
+};
+#define qm_mcr_np_get(np, field) \
+       ((np)->field & (qm_mcr_##field##_mask))
+
+/* Congestion Groups */
+
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x)    ((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x)     (BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM        (sizeof(struct __qm_mcr_querycongestion) << 3)
+
+struct qman_cgrs {
+       struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+       memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+       memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
+{
+       return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+                               const struct qman_cgrs *src)
+{
+       *dest = *src;
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+                       const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+       int ret;
+       u32 *_d = dest->q.state;
+       const u32 *_a = a->q.state;
+       const u32 *_b = b->q.state;
+
+       for (ret = 0; ret < 8; ret++)
+               *_d++ = *_a++ & *_b++;
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+                       const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+       int ret;
+       u32 *_d = dest->q.state;
+       const u32 *_a = a->q.state;
+       const u32 *_b = b->q.state;
+
+       for (ret = 0; ret < 8; ret++)
+               *_d++ = *_a++ ^ *_b++;
+}
+
+void qman_init_cgr_all(void);
+
+struct qm_portal_config {
+       /*
+        * Corenet portal addresses;
+        * [0]==cache-enabled, [1]==cache-inhibited.
+        */
+       void __iomem *addr_virt[2];
+       struct device *dev;
+       struct iommu_domain *iommu_domain;
+       /* Allow these to be joined in lists */
+       struct list_head list;
+       /* User-visible portal configuration settings */
+       /* portal is affined to this cpu */
+       int cpu;
+       /* portal interrupt line */
+       int irq;
+       /*
+        * the portal's dedicated channel id, used initialising
+        * frame queues to target this portal when scheduled
+        */
+       u16 channel;
+       /*
+        * mask of pool channels this portal has dequeue access to
+        * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+        */
+       u32 pools;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
+
+int qman_wq_alloc(void);
+void qman_liodn_fixup(u16 channel);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
+
+struct qman_portal *qman_create_affine_portal(
+                       const struct qm_portal_config *config,
+                       const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/*
+ * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR      0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR      0x80000000
+#define QM_VDQCR_EXACT                 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK                0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n)      (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n)      (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY   QM_VDQCR_NUMFRAMES_SET(0)
+
+#define QMAN_VOLATILE_FLAG_WAIT             0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT  0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH    0x00000004 /* wait till VDQCR completes */
+
+/*
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
+ * for the QMAN_FQ_STATE_VDQCR bit to disappear.
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+int qman_alloc_fq_table(u32 num_fqids);
+
+/*   QMan s/w corenet portal, low-level i/face  */
+
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ *   Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ *   You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ *   priority.
+ * If SOURCE == SPECIFICWQ,
+ *     Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ *     channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ *     work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ *     same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS       0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ     0x40000000
+#define QM_SDQCR_COUNT_EXACT1          0x0
+#define QM_SDQCR_COUNT_UPTO3           0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE  0x10000000
+#define QM_SDQCR_TYPE_MASK             0x03000000
+#define QM_SDQCR_TYPE_NULL             0x0
+#define QM_SDQCR_TYPE_PRIO_QOS         0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS       0x02000000
+#define QM_SDQCR_TYPE_ACTIVE           0x03000000
+#define QM_SDQCR_TOKEN_MASK            0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v)          (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v)          (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED    0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK       0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED  0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n)    ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n)      (n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK             0x00ffffff
+#define QM_VDQCR_FQID(n)               ((n) & QM_VDQCR_FQID_MASK)
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL        0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL      0x8000          /* Portal channel */
+#define QM_DQAVAIL_POOL(n)     (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK                0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE        (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+extern struct qman_portal *affine_portals[NR_CPUS];
+const struct qm_portal_config *qman_get_qm_portal_config(
+                                               struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644 (file)
index 0000000..18f7f02
--- /dev/null
@@ -0,0 +1,62 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+       int loop = 1;
+       int err = 0;
+
+       while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+               err = qman_test_stash();
+               if (err)
+                       break;
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+               err = qman_test_api();
+               if (err)
+                       break;
+#endif
+       }
+       return err;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644 (file)
index 0000000..d5f8cb2
--- /dev/null
@@ -0,0 +1,36 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+int qman_test_stash(void);
+int qman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644 (file)
index 0000000..6880ff1
--- /dev/null
@@ -0,0 +1,252 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#define CGR_ID         27
+#define POOL_ID                2
+#define FQ_FLAGS       QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES   10
+#define NUM_PARTIAL    4
+#define PORTAL_SDQCR   (QM_SDQCR_SOURCE_CHANNELS | \
+                       QM_SDQCR_TYPE_PRIO_QOS | \
+                       QM_SDQCR_TOKEN_SET(0x98) | \
+                       QM_SDQCR_CHANNELS_DEDICATED | \
+                       QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE  ((void *)0xf00dbeef)
+#define VDQCR_FLAGS    (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+                                       struct qman_fq *,
+                                       const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+                  const union qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+                  const union qm_mr_entry *);
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+       .cb.dqrr = cb_dqrr,
+       .cb.ern = cb_ern,
+       .cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *fd)
+{
+       qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
+       qm_fd_set_contig_big(fd, 0x0000ffff);
+       fd->cmd = 0xfeedf00d;
+}
+
+static void fd_inc(struct qm_fd *fd)
+{
+       u64 t = qm_fd_addr_get64(fd);
+       int z = t >> 40;
+       unsigned int len, off;
+       enum qm_fd_format fmt;
+
+       t <<= 1;
+       if (z)
+               t |= 1;
+       qm_fd_addr_set64(fd, t);
+
+       fmt = qm_fd_get_format(fd);
+       off = qm_fd_get_offset(fd);
+       len = qm_fd_get_length(fd);
+       len--;
+       qm_fd_set_param(fd, fmt, off, len);
+
+       fd->cmd++;
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+{
+       int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+
+       if (!r) {
+               enum qm_fd_format fmt_a, fmt_b;
+
+               fmt_a = qm_fd_get_format(a);
+               fmt_b = qm_fd_get_format(b);
+               r = fmt_a - fmt_b;
+       }
+       if (!r)
+               r = a->cfg - b->cfg;
+       if (!r)
+               r = a->cmd - b->cmd;
+       return r;
+}
+
+/* test */
+static int do_enqueues(struct qman_fq *fq)
+{
+       unsigned int loop;
+       int err = 0;
+
+       for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+               if (qman_enqueue(fq, &fd)) {
+                       pr_crit("qman_enqueue() failed\n");
+                       err = -EIO;
+               }
+               fd_inc(&fd);
+       }
+
+       return err;
+}
+
+int qman_test_api(void)
+{
+       unsigned int flags, frmcnt;
+       int err;
+       struct qman_fq *fq = &fq_base;
+
+       pr_info("%s(): Starting\n", __func__);
+       fd_init(&fd);
+       fd_init(&fd_dq);
+
+       /* Initialise (parked) FQ */
+       err = qman_create_fq(0, FQ_FLAGS, fq);
+       if (err) {
+               pr_crit("qman_create_fq() failed\n");
+               goto failed;
+       }
+       err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
+       if (err) {
+               pr_crit("qman_init_fq() failed\n");
+               goto failed;
+       }
+       /* Do enqueues + VDQCR, twice. (Parked FQ) */
+       err = do_enqueues(fq);
+       if (err)
+               goto failed;
+       pr_info("VDQCR (till-empty);\n");
+       frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
+       err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+       if (err) {
+               pr_crit("qman_volatile_dequeue() failed\n");
+               goto failed;
+       }
+       err = do_enqueues(fq);
+       if (err)
+               goto failed;
+       pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+       frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
+       err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+       if (err) {
+               pr_crit("qman_volatile_dequeue() failed\n");
+               goto failed;
+       }
+       pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+               NUM_ENQUEUES);
+       frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
+       err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+       if (err) {
+               pr_err("qman_volatile_dequeue() failed\n");
+               goto failed;
+       }
+
+       err = do_enqueues(fq);
+       if (err)
+               goto failed;
+       pr_info("scheduled dequeue (till-empty)\n");
+       err = qman_schedule_fq(fq);
+       if (err) {
+               pr_crit("qman_schedule_fq() failed\n");
+               goto failed;
+       }
+       wait_event(waitqueue, sdqcr_complete);
+
+       /* Retire and OOS the FQ */
+       err = qman_retire_fq(fq, &flags);
+       if (err < 0) {
+               pr_crit("qman_retire_fq() failed\n");
+               goto failed;
+       }
+       wait_event(waitqueue, retire_complete);
+       if (flags & QMAN_FQ_STATE_BLOCKOOS) {
+               err = -EIO;
+               pr_crit("leaking frames\n");
+               goto failed;
+       }
+       err = qman_oos_fq(fq);
+       if (err) {
+               pr_crit("qman_oos_fq() failed\n");
+               goto failed;
+       }
+       qman_destroy_fq(fq);
+       pr_info("%s(): Finished\n", __func__);
+       return 0;
+
+failed:
+       WARN_ON(1);
+       return err;
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+                                       struct qman_fq *fq,
+                                       const struct qm_dqrr_entry *dq)
+{
+       if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
+               pr_err("BADNESS: dequeued frame doesn't match;\n");
+               return qman_cb_dqrr_consume;
+       }
+       fd_inc(&fd_dq);
+       if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+               sdqcr_complete = 1;
+               wake_up(&waitqueue);
+       }
+       return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+                  const union qm_mr_entry *msg)
+{
+       pr_crit("cb_ern() unimplemented");
+       WARN_ON(1);
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+                  const union qm_mr_entry *msg)
+{
+       u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+       if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
+               pr_crit("unexpected FQS message");
+               WARN_ON(1);
+               return;
+       }
+       pr_info("Retirement message received\n");
+       retire_complete = 1;
+       wake_up(&waitqueue);
+}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644 (file)
index 0000000..43cf66b
--- /dev/null
@@ -0,0 +1,617 @@
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/*
+ * Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ *    into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ *    handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ *    hp_cpu's 'iterator' to point to its first handler. With each loop,
+ *    allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ *    and advance the iterator for the next loop. This includes a final fixup,
+ *    which connects the last handler to the first (and which is why phase 2
+ *    and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ *    hp_cpu's 'iterator' to point to its first handler. With each loop,
+ *    initialise FQ objects and advance the iterator for the next loop.
+ *    Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ *    initialisation targets the correct cpu.
+ */
+
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
+struct bstrap {
+       int (*fn)(void);
+       atomic_t started;
+};
+static int bstrap_fn(void *bs)
+{
+       struct bstrap *bstrap = bs;
+       int err;
+
+       atomic_inc(&bstrap->started);
+       err = bstrap->fn();
+       if (err)
+               return err;
+       while (!kthread_should_stop())
+               msleep(20);
+       return 0;
+}
+static int on_all_cpus(int (*fn)(void))
+{
+       int cpu;
+
+       for_each_cpu(cpu, cpu_online_mask) {
+               struct bstrap bstrap = {
+                       .fn = fn,
+                       .started = ATOMIC_INIT(0)
+               };
+               struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+                       "hotpotato%d", cpu);
+               int ret;
+
+               if (IS_ERR(k))
+                       return -ENOMEM;
+               kthread_bind(k, cpu);
+               wake_up_process(k);
+               /*
+                * If we call kthread_stop() before the "wake up" has had an
+                * effect, then the thread may exit with -EINTR without ever
+                * running the function. So poll until it's started before
+                * requesting it to stop.
+                */
+               while (!atomic_read(&bstrap.started))
+                       msleep(20);
+               ret = kthread_stop(k);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+struct hp_handler {
+
+       /* The following data is stashed when 'rx' is dequeued; */
+       /* -------------- */
+       /* The Rx FQ, dequeues of which will stash the entire hp_handler */
+       struct qman_fq rx;
+       /* The Tx FQ we should forward to */
+       struct qman_fq tx;
+       /* The value we XOR post-dequeue, prior to validating */
+       u32 rx_mixer;
+       /* The value we XOR pre-enqueue, after validating */
+       u32 tx_mixer;
+       /* what the hotpotato address should be on dequeue */
+       dma_addr_t addr;
+       u32 *frame_ptr;
+
+       /* The following data isn't (necessarily) stashed on dequeue; */
+       /* -------------- */
+       u32 fqid_rx, fqid_tx;
+       /* list node for linking us into 'hp_cpu' */
+       struct list_head node;
+       /* Just to check ... */
+       unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+       /* identify the cpu we run on; */
+       unsigned int processor_id;
+       /* root node for the per-cpu list of handlers */
+       struct list_head handlers;
+       /* list node for linking us into 'hp_cpu_list' */
+       struct list_head node;
+       /*
+        * when repeatedly scanning 'hp_list', each time linking the n'th
+        * handlers together, this is used as per-cpu iterator state
+        */
+       struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+static void *__frame_ptr;
+static u32 *frame_ptr;
+static dma_addr_t frame_dma;
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU     2
+#define HP_LOOPS       8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS   80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD  0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+       return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static int allocate_frame_data(void)
+{
+       u32 lfsr = HP_FIRST_WORD;
+       int loop;
+       struct platform_device *pdev = platform_device_alloc("foobar", -1);
+
+       if (!pdev) {
+               pr_crit("platform_device_alloc() failed");
+               return -EIO;
+       }
+       if (platform_device_add(pdev)) {
+               pr_crit("platform_device_add() failed");
+               return -EIO;
+       }
+       __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+       if (!__frame_ptr)
+               return -ENOMEM;
+
+       frame_ptr = PTR_ALIGN(__frame_ptr, 64);
+       for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+               frame_ptr[loop] = lfsr;
+               lfsr = do_lfsr(lfsr);
+       }
+       frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+                                  DMA_BIDIRECTIONAL);
+       platform_device_del(pdev);
+       platform_device_put(pdev);
+       return 0;
+}
+
+static void deallocate_frame_data(void)
+{
+       kfree(__frame_ptr);
+}
+
+static inline int process_frame_data(struct hp_handler *handler,
+                                    const struct qm_fd *fd)
+{
+       u32 *p = handler->frame_ptr;
+       u32 lfsr = HP_FIRST_WORD;
+       int loop;
+
+       if (qm_fd_addr_get64(fd) != handler->addr) {
+               pr_crit("bad frame address");
+               return -EIO;
+       }
+       for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+               *p ^= handler->rx_mixer;
+               if (*p != lfsr) {
+                       pr_crit("corrupt frame data");
+                       return -EIO;
+               }
+               *p ^= handler->tx_mixer;
+               lfsr = do_lfsr(lfsr);
+       }
+       return 0;
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+                                           struct qman_fq *fq,
+                                           const struct qm_dqrr_entry *dqrr)
+{
+       struct hp_handler *handler = (struct hp_handler *)fq;
+
+       if (process_frame_data(handler, &dqrr->fd)) {
+               WARN_ON(1);
+               goto skip;
+       }
+       if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+               pr_crit("qman_enqueue() failed");
+               WARN_ON(1);
+       }
+skip:
+       return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+                                            struct qman_fq *fq,
+                                            const struct qm_dqrr_entry *dqrr)
+{
+       struct hp_handler *handler = (struct hp_handler *)fq;
+
+       process_frame_data(handler, &dqrr->fd);
+       if (++loop_counter < HP_LOOPS) {
+               if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+                       pr_crit("qman_enqueue() failed");
+                       WARN_ON(1);
+                       goto skip;
+               }
+       } else {
+               pr_info("Received final (%dth) frame\n", loop_counter);
+               wake_up(&queue);
+       }
+skip:
+       return qman_cb_dqrr_consume;
+}
+
+static int create_per_cpu_handlers(void)
+{
+       struct hp_handler *handler;
+       int loop;
+       struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+       hp_cpu->processor_id = smp_processor_id();
+       spin_lock(&hp_lock);
+       list_add_tail(&hp_cpu->node, &hp_cpu_list);
+       hp_cpu_list_length++;
+       spin_unlock(&hp_lock);
+       INIT_LIST_HEAD(&hp_cpu->handlers);
+       for (loop = 0; loop < HP_PER_CPU; loop++) {
+               handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+               if (!handler) {
+                       pr_crit("kmem_cache_alloc() failed");
+                       WARN_ON(1);
+                       return -EIO;
+               }
+               handler->processor_id = hp_cpu->processor_id;
+               handler->addr = frame_dma;
+               handler->frame_ptr = frame_ptr;
+               list_add_tail(&handler->node, &hp_cpu->handlers);
+       }
+       return 0;
+}
+
+static int destroy_per_cpu_handlers(void)
+{
+       struct list_head *loop, *tmp;
+       struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+       spin_lock(&hp_lock);
+       list_del(&hp_cpu->node);
+       spin_unlock(&hp_lock);
+       list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+               u32 flags = 0;
+               struct hp_handler *handler = list_entry(loop, struct hp_handler,
+                                                       node);
+               if (qman_retire_fq(&handler->rx, &flags) ||
+                   (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+                       pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+                       WARN_ON(1);
+                       return -EIO;
+               }
+               if (qman_oos_fq(&handler->rx)) {
+                       pr_crit("qman_oos_fq(rx) failed");
+                       WARN_ON(1);
+                       return -EIO;
+               }
+               qman_destroy_fq(&handler->rx);
+               qman_destroy_fq(&handler->tx);
+               qman_release_fqid(handler->fqid_rx);
+               list_del(&handler->node);
+               kmem_cache_free(hp_handler_slab, handler);
+       }
+       return 0;
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+       u8 res = (offset + (L1_CACHE_BYTES - 1))
+                        / (L1_CACHE_BYTES);
+       if (res > 3)
+               return 3;
+       return res;
+}
+#define STASH_DATA_CL \
+       num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+       num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static int init_handler(void *h)
+{
+       struct qm_mcc_initfq opts;
+       struct hp_handler *handler = h;
+       int err;
+
+       if (handler->processor_id != smp_processor_id()) {
+               err = -EIO;
+               goto failed;
+       }
+       /* Set up rx */
+       memset(&handler->rx, 0, sizeof(handler->rx));
+       if (handler == special_handler)
+               handler->rx.cb.dqrr = special_dqrr;
+       else
+               handler->rx.cb.dqrr = normal_dqrr;
+       err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+       if (err) {
+               pr_crit("qman_create_fq(rx) failed");
+               goto failed;
+       }
+       memset(&opts, 0, sizeof(opts));
+       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+       opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+       qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+       err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+                          QMAN_INITFQ_FLAG_LOCAL, &opts);
+       if (err) {
+               pr_crit("qman_init_fq(rx) failed");
+               goto failed;
+       }
+       /* Set up tx */
+       memset(&handler->tx, 0, sizeof(handler->tx));
+       err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+                            &handler->tx);
+       if (err) {
+               pr_crit("qman_create_fq(tx) failed");
+               goto failed;
+       }
+
+       return 0;
+failed:
+       return err;
+}
+
+static void init_handler_cb(void *h)
+{
+       if (init_handler(h))
+               WARN_ON(1);
+}
+
+static int init_phase2(void)
+{
+       int loop;
+       u32 fqid = 0;
+       u32 lfsr = 0xdeadbeef;
+       struct hp_cpu *hp_cpu;
+       struct hp_handler *handler;
+
+       for (loop = 0; loop < HP_PER_CPU; loop++) {
+               list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+                       int err;
+
+                       if (!loop)
+                               hp_cpu->iterator = list_first_entry(
+                                               &hp_cpu->handlers,
+                                               struct hp_handler, node);
+                       else
+                               hp_cpu->iterator = list_entry(
+                                               hp_cpu->iterator->node.next,
+                                               struct hp_handler, node);
+                       /* Rx FQID is the previous handler's Tx FQID */
+                       hp_cpu->iterator->fqid_rx = fqid;
+                       /* Allocate new FQID for Tx */
+                       err = qman_alloc_fqid(&fqid);
+                       if (err) {
+                               pr_crit("qman_alloc_fqid() failed");
+                               return err;
+                       }
+                       hp_cpu->iterator->fqid_tx = fqid;
+                       /* Rx mixer is the previous handler's Tx mixer */
+                       hp_cpu->iterator->rx_mixer = lfsr;
+                       /* Get new mixer for Tx */
+                       lfsr = do_lfsr(lfsr);
+                       hp_cpu->iterator->tx_mixer = lfsr;
+               }
+       }
+       /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+       hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+       handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+       if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+               return 1;
+       handler->fqid_rx = fqid;
+       handler->rx_mixer = lfsr;
+       /* and tag it as our "special" handler */
+       special_handler = handler;
+       return 0;
+}
+
+static int init_phase3(void)
+{
+       int loop, err;
+       struct hp_cpu *hp_cpu;
+
+       for (loop = 0; loop < HP_PER_CPU; loop++) {
+               list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+                       if (!loop)
+                               hp_cpu->iterator = list_first_entry(
+                                               &hp_cpu->handlers,
+                                               struct hp_handler, node);
+                       else
+                               hp_cpu->iterator = list_entry(
+                                               hp_cpu->iterator->node.next,
+                                               struct hp_handler, node);
+                       preempt_disable();
+                       if (hp_cpu->processor_id == smp_processor_id()) {
+                               err = init_handler(hp_cpu->iterator);
+                               if (err)
+                                       return err;
+                       } else {
+                               smp_call_function_single(hp_cpu->processor_id,
+                                       init_handler_cb, hp_cpu->iterator, 1);
+                       }
+                       preempt_enable();
+               }
+       }
+       return 0;
+}
+
+static int send_first_frame(void *ignore)
+{
+       u32 *p = special_handler->frame_ptr;
+       u32 lfsr = HP_FIRST_WORD;
+       int loop, err;
+       struct qm_fd fd;
+
+       if (special_handler->processor_id != smp_processor_id()) {
+               err = -EIO;
+               goto failed;
+       }
+       memset(&fd, 0, sizeof(fd));
+       qm_fd_addr_set64(&fd, special_handler->addr);
+       qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
+       for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+               if (*p != lfsr) {
+                       err = -EIO;
+                       pr_crit("corrupt frame data");
+                       goto failed;
+               }
+               *p ^= special_handler->tx_mixer;
+               lfsr = do_lfsr(lfsr);
+       }
+       pr_info("Sending first frame\n");
+       err = qman_enqueue(&special_handler->tx, &fd);
+       if (err) {
+               pr_crit("qman_enqueue() failed");
+               goto failed;
+       }
+
+       return 0;
+failed:
+       return err;
+}
+
+static void send_first_frame_cb(void *ignore)
+{
+       if (send_first_frame(NULL))
+               WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+       int err;
+
+       if (cpumask_weight(cpu_online_mask) < 2) {
+               pr_info("%s(): skip - only 1 CPU\n", __func__);
+               return 0;
+       }
+
+       pr_info("%s(): Starting\n", __func__);
+
+       hp_cpu_list_length = 0;
+       loop_counter = 0;
+       hp_handler_slab = kmem_cache_create("hp_handler_slab",
+                       sizeof(struct hp_handler), L1_CACHE_BYTES,
+                       SLAB_HWCACHE_ALIGN, NULL);
+       if (!hp_handler_slab) {
+               err = -EIO;
+               pr_crit("kmem_cache_create() failed");
+               goto failed;
+       }
+
+       err = allocate_frame_data();
+       if (err)
+               goto failed;
+
+       /* Init phase 1 */
+       pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+       if (on_all_cpus(create_per_cpu_handlers)) {
+               err = -EIO;
+               pr_crit("on_each_cpu() failed");
+               goto failed;
+       }
+       pr_info("Number of cpus: %d, total of %d handlers\n",
+               hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+       err = init_phase2();
+       if (err)
+               goto failed;
+
+       err = init_phase3();
+       if (err)
+               goto failed;
+
+       preempt_disable();
+       if (special_handler->processor_id == smp_processor_id()) {
+               err = send_first_frame(NULL);
+               if (err)
+                       goto failed;
+       } else {
+               smp_call_function_single(special_handler->processor_id,
+                                        send_first_frame_cb, NULL, 1);
+       }
+       preempt_enable();
+
+       wait_event(queue, loop_counter == HP_LOOPS);
+       deallocate_frame_data();
+       if (on_all_cpus(destroy_per_cpu_handlers)) {
+               err = -EIO;
+               pr_crit("on_each_cpu() failed");
+               goto failed;
+       }
+       kmem_cache_destroy(hp_handler_slab);
+       pr_info("%s(): Finished\n", __func__);
+
+       return 0;
+failed:
+       WARN_ON(1);
+       return err;
+}
index 333eb22..0aaf429 100644 (file)
@@ -41,7 +41,8 @@ struct qe_gpio_chip {
 
 static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc);
+       struct qe_gpio_chip *qe_gc =
+               container_of(mm_gc, struct qe_gpio_chip, mm_gc);
        struct qe_pio_regs __iomem *regs = mm_gc->regs;
 
        qe_gc->cpdata = in_be32(&regs->cpdata);
index 7026507..2707a82 100644 (file)
@@ -69,8 +69,8 @@ static phys_addr_t qebase = -1;
 phys_addr_t get_qe_base(void)
 {
        struct device_node *qe;
-       int size;
-       const u32 *prop;
+       int ret;
+       struct resource res;
 
        if (qebase != -1)
                return qebase;
@@ -82,9 +82,9 @@ phys_addr_t get_qe_base(void)
                        return qebase;
        }
 
-       prop = of_get_property(qe, "reg", &size);
-       if (prop && size >= sizeof(*prop))
-               qebase = of_translate_address(qe, prop);
+       ret = of_address_to_resource(qe, 0, &res);
+       if (!ret)
+               qebase = res.start;
        of_node_put(qe);
 
        return qebase;
index 41eff80..104e68d 100644 (file)
@@ -70,6 +70,11 @@ int cpm_muram_init(void)
        }
 
        muram_pool = gen_pool_create(0, -1);
+       if (!muram_pool) {
+               pr_err("Cannot allocate memory pool for CPM/QE muram");
+               ret = -ENOMEM;
+               goto out_muram;
+       }
        muram_pbase = of_translate_address(np, zero);
        if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
                pr_err("Cannot translate zero through CPM muram node");
@@ -116,6 +121,9 @@ static unsigned long cpm_muram_alloc_common(unsigned long size,
        struct muram_block *entry;
        unsigned long start;
 
+       if (!muram_pool && cpm_muram_init())
+               goto out2;
+
        start = gen_pool_alloc_algo(muram_pool, size, algo, data);
        if (!start)
                goto out2;
index 5e48b14..a1048b4 100644 (file)
@@ -99,7 +99,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
        utdm->tdm_port = val;
        ut_info->uf_info.tdm_num = utdm->tdm_port;
 
-       if (of_get_property(np, "fsl,tdm-internal-loopback", NULL))
+       if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
                utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
        else
                utdm->tdm_mode = TDM_NORMAL;
@@ -167,7 +167,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
        }
 
        if (siram_init_flag == 0) {
-               memset_io(utdm->siram, 0,  res->end - res->start + 1);
+               memset_io(utdm->siram, 0,  resource_size(res));
                siram_init_flag = 1;
        }
 
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
new file mode 100644 (file)
index 0000000..eaaf56d
--- /dev/null
@@ -0,0 +1,129 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_BMAN_H
+#define __FSL_BMAN_H
+
+/* wrapper for 48-bit buffers */
+struct bm_buffer {
+       union {
+               struct {
+                       __be16 bpid; /* hi 8-bits reserved */
+                       __be16 hi; /* High 16-bits of 48-bit address */
+                       __be32 lo; /* Low 32-bits of 48-bit address */
+               };
+               __be64 data;
+       };
+} __aligned(8);
+/*
+ * Restore the 48 bit address previously stored in BMan
+ * hardware pools as a dma_addr_t
+ */
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+       return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
+}
+
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+       return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
+}
+
+static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr)
+{
+       buf->hi = cpu_to_be16(upper_32_bits(addr));
+       buf->lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf)
+{
+       return be16_to_cpu(buf->bpid) & 0xff;
+}
+
+static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid)
+{
+       buf->bpid = cpu_to_be16(bpid & 0xff);
+}
+
+/* Managed portal, high-level i/face */
+
+/* Portal and Buffer Pools */
+struct bman_portal;
+struct bman_pool;
+
+#define BM_POOL_MAX            64 /* max # of buffer pools */
+
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ *
+ * Creates a pool object, and returns a reference to it or NULL on error.
+ */
+struct bman_pool *bman_new_pool(void);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_bpid - Returns a pool object's BPID.
+ * @pool: the pool object
+ *
+ * The returned value is the index of the encapsulated buffer pool,
+ * in the range of [0, @BM_POOL_MAX-1].
+ */
+int bman_get_bpid(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ *
+ * Adds the given buffers to RCR entries. If the RCR ring is unresponsive,
+ * the function will return -ETIMEDOUT. Otherwise, it returns zero.
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered. In
+ * the latter case, the content of @bufs is undefined.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
+
+#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
new file mode 100644 (file)
index 0000000..37f3eb0
--- /dev/null
@@ -0,0 +1,1074 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#include <linux/bitops.h>
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+extern u16 qm_channel_pool1;
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CSCI   0x00100000      /* Congestion State Change */
+#define QM_PIRQ_EQCI   0x00080000      /* Enqueue Command Committed */
+#define QM_PIRQ_EQRI   0x00040000      /* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI   0x00020000      /* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI    0x00010000      /* MR Ring (non-empty) */
+/*
+ * This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing.
+ */
+#define QM_PIRQ_SLOW   (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+                        QM_PIRQ_MRI)
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK    0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n)      (0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+       return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* --- QMan data structures (and associated constants) --- */
+
+/* "Frame Descriptor (FD)" */
+struct qm_fd {
+       union {
+               struct {
+                       u8 cfg8b_w1;
+                       u8 bpid;        /* Buffer Pool ID */
+                       u8 cfg8b_w3;
+                       u8 addr_hi;     /* high 8-bits of 40-bit address */
+                       __be32 addr_lo; /* low 32-bits of 40-bit address */
+               } __packed;
+               __be64 data;
+       };
+       __be32 cfg;     /* format, offset, length / congestion */
+       union {
+               __be32 cmd;
+               __be32 status;
+       };
+} __aligned(8);
+
+#define QM_FD_FORMAT_SG                BIT(31)
+#define QM_FD_FORMAT_LONG      BIT(30)
+#define QM_FD_FORMAT_COMPOUND  BIT(29)
+#define QM_FD_FORMAT_MASK      GENMASK(31, 29)
+#define QM_FD_OFF_SHIFT                20
+#define QM_FD_OFF_MASK         GENMASK(28, 20)
+#define QM_FD_LEN_MASK         GENMASK(19, 0)
+#define QM_FD_LEN_BIG_MASK     GENMASK(28, 0)
+
+enum qm_fd_format {
+       /*
+        * 'contig' implies a contiguous buffer, whereas 'sg' implies a
+        * scatter-gather table. 'big' implies a 29-bit length with no offset
+        * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+        * implies a s/g-like table, where each entry itself represents a frame
+        * (contiguous or scatter-gather) and the 29-bit "length" is
+        * interpreted purely for congestion calculations, ie. a "congestion
+        * weight".
+        */
+       qm_fd_contig = 0,
+       qm_fd_contig_big = QM_FD_FORMAT_LONG,
+       qm_fd_sg = QM_FD_FORMAT_SG,
+       qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+       qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+       return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
+
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+       return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
+
+static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
+{
+       fd->addr_hi = upper_32_bits(addr);
+       fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+/*
+ * The 'format' field indicates the interpretation of the remaining
+ * 29 bits of the 32-bit word.
+ * If 'format' is _contig or _sg, 20b length and 9b offset.
+ * If 'format' is _contig_big or _sg_big, 29b length.
+ * If 'format' is _compound, 29b "congestion weight".
+ */
+static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
+{
+       return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
+}
+
+static inline int qm_fd_get_offset(const struct qm_fd *fd)
+{
+       return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
+}
+
+static inline int qm_fd_get_length(const struct qm_fd *fd)
+{
+       return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
+}
+
+static inline int qm_fd_get_len_big(const struct qm_fd *fd)
+{
+       return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
+}
+
+static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
+                                  int off, int len)
+{
+       fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
+                             ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
+}
+
+#define qm_fd_set_contig(fd, off, len) \
+       qm_fd_set_param(fd, qm_fd_contig, off, len)
+#define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
+#define qm_fd_set_contig_big(fd, len) \
+       qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
+#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
+
+static inline void qm_fd_clear_fd(struct qm_fd *fd)
+{
+       fd->data = 0;
+       fd->cfg = 0;
+       fd->cmd = 0;
+}
+
+/* Scatter/Gather table entry */
+struct qm_sg_entry {
+       union {
+               struct {
+                       u8 __reserved1[3];
+                       u8 addr_hi;     /* high 8-bits of 40-bit address */
+                       __be32 addr_lo; /* low 32-bits of 40-bit address */
+               };
+               __be64 data;
+       };
+       __be32 cfg;     /* E bit, F bit, length */
+       u8 __reserved2;
+       u8 bpid;
+       __be16 offset; /* 13-bit, _res[13-15]*/
+} __packed;
+
+#define QM_SG_LEN_MASK GENMASK(29, 0)
+#define QM_SG_OFF_MASK GENMASK(12, 0)
+#define QM_SG_FIN      BIT(30)
+#define QM_SG_EXT      BIT(31)
+
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+       return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+       return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
+static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
+{
+       sg->addr_hi = upper_32_bits(addr);
+       sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
+{
+       return be32_to_cpu(sg->cfg) & QM_SG_FIN;
+}
+
+static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
+{
+       return be32_to_cpu(sg->cfg) & QM_SG_EXT;
+}
+
+static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
+{
+       return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
+}
+
+static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
+{
+       sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
+}
+
+static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
+{
+       sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
+}
+
+static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
+{
+       return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
+}
+
+/* "Frame Dequeue Response" */
+struct qm_dqrr_entry {
+       u8 verb;
+       u8 stat;
+       u16 seqnum;     /* 15-bit */
+       u8 tok;
+       u8 __reserved2[3];
+       u32 fqid;       /* 24-bit */
+       u32 contextB;
+       struct qm_fd fd;
+       u8 __reserved4[32];
+} __packed;
+#define QM_DQRR_VERB_VBIT              0x80
+#define QM_DQRR_VERB_MASK              0x7f    /* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE     0x60    /* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY          0x80    /* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE     0x40    /* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE  0x20    /* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID          0x10    /* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED       0x02    /* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED      0x01    /* VDQCR or PDQCR expired*/
+
+/* "ERN Message Response" */
+/* "FQ State Change Notification" */
+union qm_mr_entry {
+       struct {
+               u8 verb;
+               u8 __reserved[63];
+       };
+       struct {
+               u8 verb;
+               u8 dca;
+               u16 seqnum;
+               u8 rc;          /* Rej Code: 8-bit */
+               u8 orp_hi;      /* ORP: 24-bit */
+               u16 orp_lo;
+               u32 fqid;       /* 24-bit */
+               u32 tag;
+               struct qm_fd fd;
+               u8 __reserved1[32];
+       } __packed ern;
+       struct {
+               u8 verb;
+               u8 fqs;         /* Frame Queue Status */
+               u8 __reserved1[6];
+               u32 fqid;       /* 24-bit */
+               u32 contextB;
+               u8 __reserved2[48];
+       } __packed fq;          /* FQRN/FQRNI/FQRL/FQPN */
+};
+#define QM_MR_VERB_VBIT                        0x80
+/*
+ * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
+ * from the other MR types by noting if the 0x20 bit is unset.
+ */
+#define QM_MR_VERB_TYPE_MASK           0x27
+#define QM_MR_VERB_DC_ERN              0x20
+#define QM_MR_VERB_FQRN                        0x21
+#define QM_MR_VERB_FQRNI               0x22
+#define QM_MR_VERB_FQRL                        0x23
+#define QM_MR_VERB_FQPN                        0x24
+#define QM_MR_RC_MASK                  0xf0    /* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP          0x00
+#define QM_MR_RC_WRED                  0x10
+#define QM_MR_RC_ERROR                 0x20
+#define QM_MR_RC_ORPWINDOW_EARLY       0x30
+#define QM_MR_RC_ORPWINDOW_LATE                0x40
+#define QM_MR_RC_FQ_TAILDROP           0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED     0x60
+#define QM_MR_RC_ORP_ZERO              0x70
+#define QM_MR_FQS_ORLPRESENT           0x02    /* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY             0x01    /* FQ has enqueued frames */
+
+/*
+ * An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation.
+ */
+struct qm_fqd_stashing {
+       /* See QM_STASHING_EXCL_<...> */
+       u8 exclusive;
+       /* Numbers of cachelines */
+       u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
+};
+
+struct qm_fqd_oac {
+       /* "Overhead Accounting Control", see QM_OAC_<...> */
+       u8 oac; /* oac[6-7], _res[0-5] */
+       /* Two's-complement value (-128 to +127) */
+       s8 oal; /* "Overhead Accounting Length" */
+};
+
+struct qm_fqd {
+       /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
+       u8 orpc;
+       u8 cgid;
+       __be16 fq_ctrl; /* See QM_FQCTRL_<...> */
+       __be16 dest_wq; /* channel[3-15], wq[0-2] */
+       __be16 ics_cred; /* 15-bit */
+       /*
+        * For "Initialize Frame Queue" commands, the write-enable mask
+        * determines whether 'td' or 'oac_init' is observed. For query
+        * commands, this field is always 'td', and 'oac_query' (below) reflects
+        * the Overhead ACcounting values.
+        */
+       union {
+               __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
+               struct qm_fqd_oac oac_init;
+       };
+       __be32 context_b;
+       union {
+               /* Treat it as 64-bit opaque */
+               __be64 opaque;
+               struct {
+                       __be32 hi;
+                       __be32 lo;
+               };
+               /* Treat it as s/w portal stashing config */
+               /* see "FQD Context_A field used for [...]" */
+               struct {
+                       struct qm_fqd_stashing stashing;
+                       /*
+                        * 48-bit address of FQ context to
+                        * stash, must be cacheline-aligned
+                        */
+                       __be16 context_hi;
+                       __be32 context_lo;
+               } __packed;
+       } context_a;
+       struct qm_fqd_oac oac_query;
+} __packed;
+
+#define QM_FQD_CHAN_OFF                3
+#define QM_FQD_WQ_MASK         GENMASK(2, 0)
+#define QM_FQD_TD_EXP_MASK     GENMASK(4, 0)
+#define QM_FQD_TD_MANT_OFF     5
+#define QM_FQD_TD_MANT_MASK    GENMASK(12, 5)
+#define QM_FQD_TD_MAX          0xe0000000
+#define QM_FQD_TD_MANT_MAX     0xff
+#define QM_FQD_OAC_OFF         6
+#define QM_FQD_AS_OFF          4
+#define QM_FQD_DS_OFF          2
+#define QM_FQD_XS_MASK         0x3
+
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+       return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
+}
+
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+       return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
+}
+
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+       return qm_fqd_stashing_get64(fqd);
+}
+
+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
+{
+       fqd->context_a.context_hi = upper_32_bits(addr);
+       fqd->context_a.context_lo = lower_32_bits(addr);
+}
+
+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
+{
+       fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr));
+       fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
+                                     int roundup)
+{
+       u32 e = 0;
+       int td, oddbit = 0;
+
+       if (val > QM_FQD_TD_MAX)
+               return -ERANGE;
+
+       while (val > QM_FQD_TD_MANT_MAX) {
+               oddbit = val & 1;
+               val >>= 1;
+               e++;
+               if (roundup && oddbit)
+                       val++;
+       }
+
+       td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
+       td |= (e & QM_FQD_TD_EXP_MASK);
+       fqd->td = cpu_to_be16(td);
+       return 0;
+}
+/* and the other direction */
+static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
+{
+       int td = be16_to_cpu(fqd->td);
+
+       return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
+               << (td & QM_FQD_TD_EXP_MASK);
+}
+
+static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
+{
+       struct qm_fqd_stashing *st = &fqd->context_a.stashing;
+
+       st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
+                ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
+                (cs & QM_FQD_XS_MASK);
+}
+
+static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
+{
+       return fqd->context_a.stashing.cl;
+}
+
+static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
+{
+       fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
+}
+
+static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
+{
+       fqd->oac_init.oal = val;
+}
+
+static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
+{
+       fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
+                                  (wq & QM_FQD_WQ_MASK));
+}
+
+static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
+{
+       return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
+}
+
+static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
+{
+       return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
+}
+
+/* See "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK         0x07ff  /* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE          0x0400  /* Congestion Group Enable */
+#define QM_FQCTRL_TDE          0x0200  /* Tail-Drop Enable */
+#define QM_FQCTRL_CTXASTASHING 0x0080  /* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH     0x0040  /* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR    0x0008  /* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK   0x0004  /* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE   0x0002  /* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE        0x0001  /* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE  QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION    0x04
+#define QM_STASHING_EXCL_DATA          0x02
+#define QM_STASHING_EXCL_CTX           0x01
+
+/* See "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS             0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG              0x1 /* Accounting for Congestion Groups */
+
+/*
+ * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ *   MaxTH = MA * (2 ^ Mn)
+ *   Slope = SA / (2 ^ Sn)
+ *    MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+       /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
+       u32 word;
+};
+/*
+ * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ *   CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+       /* _res[13-15], TA[5-12], Tn[0-4] */
+       u16 word;
+};
+/*
+ * This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct.
+ */
+struct __qm_mc_cgr {
+       struct qm_cgr_wr_parm wr_parm_g;
+       struct qm_cgr_wr_parm wr_parm_y;
+       struct qm_cgr_wr_parm wr_parm_r;
+       u8 wr_en_g;     /* boolean, use QM_CGR_EN */
+       u8 wr_en_y;     /* boolean, use QM_CGR_EN */
+       u8 wr_en_r;     /* boolean, use QM_CGR_EN */
+       u8 cscn_en;     /* boolean, use QM_CGR_EN */
+       union {
+               struct {
+                       u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+                       u16 cscn_targ_dcp_low;  /* CSCN_TARG_DCP low-16bits */
+               };
+               u32 cscn_targ;  /* use QM_CGR_TARG_* */
+       };
+       u8 cstd_en;     /* boolean, use QM_CGR_EN */
+       u8 cs;          /* boolean, only used in query response */
+       struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
+       u8 mode;        /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN              0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP       0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n)  (0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0      0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1      0x00100000 /*                      : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+       return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
+}
+
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+                                       int roundup)
+{
+       u32 e = 0;
+       int oddbit = 0;
+
+       while (val > 0xff) {
+               oddbit = val & 1;
+               val >>= 1;
+               e++;
+               if (roundup && oddbit)
+                       val++;
+       }
+       th->word = ((val & 0xff) << 5) | (e & 0x1f);
+       return 0;
+}
+
+/* "Initialize FQ" */
+struct qm_mcc_initfq {
+       u8 __reserved1[2];
+       u16 we_mask;    /* Write Enable Mask */
+       u32 fqid;       /* 24-bit */
+       u16 count;      /* Initialises 'count+1' FQDs */
+       struct qm_fqd fqd; /* the FQD fields go here */
+       u8 __reserved2[30];
+} __packed;
+/* "Initialize/Modify CGR" */
+struct qm_mcc_initcgr {
+       u8 __reserve1[2];
+       u16 we_mask;    /* Write Enable Mask */
+       struct __qm_mc_cgr cgr; /* CGR fields */
+       u8 __reserved2[2];
+       u8 cgid;
+       u8 __reserved3[32];
+} __packed;
+
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK              0x01ff  /* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC               0x0100
+#define QM_INITFQ_WE_ORPC              0x0080
+#define QM_INITFQ_WE_CGID              0x0040
+#define QM_INITFQ_WE_FQCTRL            0x0020
+#define QM_INITFQ_WE_DESTWQ            0x0010
+#define QM_INITFQ_WE_ICSCRED           0x0008
+#define QM_INITFQ_WE_TDTHRESH          0x0004
+#define QM_INITFQ_WE_CONTEXTB          0x0002
+#define QM_INITFQ_WE_CONTEXTA          0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK                 0x07ff  /* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G            0x0400
+#define QM_CGR_WE_WR_PARM_Y            0x0200
+#define QM_CGR_WE_WR_PARM_R            0x0100
+#define QM_CGR_WE_WR_EN_G              0x0080
+#define QM_CGR_WE_WR_EN_Y              0x0040
+#define QM_CGR_WE_WR_EN_R              0x0020
+#define QM_CGR_WE_CSCN_EN              0x0010
+#define QM_CGR_WE_CSCN_TARG            0x0008
+#define QM_CGR_WE_CSTD_EN              0x0004
+#define QM_CGR_WE_CS_THRES             0x0002
+#define QM_CGR_WE_MODE                 0x0001
+
+#define QMAN_CGR_FLAG_USE_INIT      0x00000001
+
+       /* Portal and Frame Queues */
+/* Represents a managed portal */
+struct qman_portal;
+
+/*
+ * This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down.
+ */
+struct qman_fq;
+
+/*
+ * This object type represents a QMan congestion group, it is defined further
+ * down.
+ */
+struct qman_cgr;
+
+/*
+ * This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because contextB is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.
+ */
+enum qman_cb_dqrr_result {
+       /* DQRR entry can be consumed */
+       qman_cb_dqrr_consume,
+       /* Like _consume, but requests parking - FQ must be held-active */
+       qman_cb_dqrr_park,
+       /* Does not consume, for DCA mode only. */
+       qman_cb_dqrr_defer,
+       /*
+        * Stop processing without consuming this ring entry. Exits the current
+        * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
+        * an interrupt handler, the callback would typically call
+        * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+        * otherwise the interrupt will reassert immediately.
+        */
+       qman_cb_dqrr_stop,
+       /* Like qman_cb_dqrr_stop, but consumes the current entry. */
+       qman_cb_dqrr_consume_stop
+};
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+                                       struct qman_fq *fq,
+                                       const struct qm_dqrr_entry *dqrr);
+
+/*
+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns.
+ */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+                          const union qm_mr_entry *msg);
+
+/*
+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked".
+ */
+enum qman_fq_state {
+       qman_fq_state_oos,
+       qman_fq_state_parked,
+       qman_fq_state_sched,
+       qman_fq_state_retired
+};
+
+#define QMAN_FQ_STATE_CHANGING      0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE            0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL           0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS      0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN        0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR         0x08000000 /* being volatile dequeued */
+
+/*
+ * Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ *     // myfq is allocated and driver_fq callbacks filled in;
+ *     struct my_fq {
+ *        struct qman_fq base;
+ *        int an_extra_field;
+ *        [ ... add other fields to be associated with each FQ ...]
+ *     } *myfq = some_my_fq_allocator();
+ *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ *     // in a dequeue callback, access extra fields from 'fq' via a cast;
+ *     struct my_fq *myfq = (struct my_fq *)fq;
+ *     do_something_with(myfq->an_extra_field);
+ *     [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ *     many cachelines are required to stash 'struct my_fq', to accelerate not
+ *     only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+       qman_cb_dqrr dqrr;      /* for dequeued frames */
+       qman_cb_mr ern;         /* for s/w ERNs */
+       qman_cb_mr fqs;         /* frame-queue state changes*/
+};
+
+struct qman_fq {
+       /* Caller of qman_create_fq() provides these demux callbacks */
+       struct qman_fq_cb cb;
+       /*
+        * These are internal to the driver, don't touch. In particular, they
+        * may change, be removed, or extended (so you shouldn't rely on
+        * sizeof(qman_fq) being a constant).
+        */
+       u32 fqid, idx;
+       unsigned long flags;
+       enum qman_fq_state state;
+       int cgr_groupid;
+};
+
+/*
+ * This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+                           struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+       /* Set these prior to qman_create_cgr() */
+       u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+       qman_cb_cgr cb;
+       /* These are private to the driver */
+       u16 chan; /* portal channel this object is created on */
+       struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE             0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY      0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL     0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID    0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED      0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL      0x00000004 /* set dest portal */
+
+       /* Portal Management */
+/**
+ * qman_p_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions).
+ */
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_p_irqsource_remove - remove processing sources from being int-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions.
+ */
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *qman_affine_cpus(void);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the mask returned from qman_affine_cpus().
+ */
+u16 qman_affine_channel(int cpu);
+
+/**
+ * qman_get_affine_portal - return the portal pointer affine to cpu
+ * @cpu: the cpu whose affine portal is the subject of the query
+ */
+struct qman_portal *qman_get_affine_portal(int cpu);
+
+/**
+ * qman_p_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * The return value represents the number of DQRR entries processed.
+ */
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
+
+/**
+ * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
+
+       /* FQ management */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' or in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ *   - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ *     initialised to a value used by the driver for demux.
+ *   - if context_b is initialised for demux, so is context_a in case stashing
+ *     is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full.
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count);
+#define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
+
+/**
+ * qman_release_fqid - Release the specified frame queue ID
+ * @fqid: the FQID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * FQID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_fqid(u32 fqid);
+
+       /* Pool-channel management */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count);
+#define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
+
+/**
+ * qman_release_pool - Release the specified pool-channel ID
+ * @id: the pool-chan ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * pool-channel ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_pool(u32 id);
+
+       /* CGR management */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+                   struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
+ * @cgr: the 'cgr' object to deregister
+ *
+ * This will select the proper CPU and run there qman_delete_cgr().
+ */
+void qman_delete_cgr_safe(struct qman_cgr *cgr);
+
+/**
+ * qman_query_cgr_congested - Queries CGR's congestion status
+ * @cgr: the 'cgr' object to query
+ * @result: returns 'cgr's congestion status, 1 (true) if congested
+ */
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count);
+#define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
+
+/**
+ * qman_release_cgrid - Release the specified CGR ID
+ * @id: the CGR ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * CGR ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_cgrid(u32 id);
+
+#endif /* __FSL_QMAN_H */
diff --git a/tools/testing/selftests/powerpc/copyloops/asm/export.h b/tools/testing/selftests/powerpc/copyloops/asm/export.h
new file mode 100644 (file)
index 0000000..2d14a9b
--- /dev/null
@@ -0,0 +1 @@
+#define EXPORT_SYMBOL(x)
diff --git a/tools/testing/selftests/powerpc/signal/.gitignore b/tools/testing/selftests/powerpc/signal/.gitignore
new file mode 100644 (file)
index 0000000..1b89224
--- /dev/null
@@ -0,0 +1,2 @@
+signal
+signal_tm
diff --git a/tools/testing/selftests/powerpc/stringloops/asm/export.h b/tools/testing/selftests/powerpc/stringloops/asm/export.h
new file mode 100644 (file)
index 0000000..2d14a9b
--- /dev/null
@@ -0,0 +1 @@
+#define EXPORT_SYMBOL(x)
index 82c0a9c..4276217 100644 (file)
@@ -7,3 +7,7 @@ tm-fork
 tm-tar
 tm-tmspr
 tm-exec
+tm-signal-context-chk-fpu
+tm-signal-context-chk-gpr
+tm-signal-context-chk-vmx
+tm-signal-context-chk-vsx