Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Mon, 3 Oct 2016 01:17:07 +0000 (21:17 -0400)
committerDavid S. Miller <davem@davemloft.net>
Mon, 3 Oct 2016 02:20:41 +0000 (22:20 -0400)
Three sets of overlapping changes.  Nothing serious.

Signed-off-by: David S. Miller <davem@davemloft.net>
141 files changed:
.mailmap
Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
MAINTAINERS
Makefile
arch/arm/boot/compressed/head.S
arch/arm/include/asm/dma-mapping.h
arch/arm/kernel/devtree.c
arch/arm64/include/asm/debug-monitors.h
arch/arm64/kernel/kgdb.c
arch/arm64/kernel/smp.c
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/Makefile
arch/mips/ath79/clock.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/cavium-octeon/octeon-platform.c
arch/mips/dec/int-handler.S
arch/mips/include/asm/asmmacro.h
arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
arch/mips/include/asm/mips-cm.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/uprobes.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/genex.S
arch/mips/kernel/mips-r2-to-r6-emul.c
arch/mips/kernel/process.c
arch/mips/kernel/setup.c
arch/mips/kernel/smp-cps.c
arch/mips/kernel/smp.c
arch/mips/kernel/uprobes.c
arch/mips/kernel/vdso.c
arch/mips/math-emu/dsemul.c
arch/mips/mm/c-r4k.c
arch/mips/mm/init.c
arch/mips/mti-malta/malta-setup.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/sh/include/asm/atomic-llsc.h
arch/sparc/include/asm/page_64.h
arch/sparc/include/asm/smp_64.h
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/smp_64.c
arch/sparc/mm/fault_64.c
arch/sparc/mm/init_64.c
arch/sparc/mm/tlb.c
arch/sparc/mm/tsb.c
arch/x86/entry/entry_64.S
arch/x86/entry/vdso/vdso2c.h
arch/x86/events/intel/bts.c
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/setup.c
arch/x86/mm/pageattr.c
arch/x86/platform/efi/efi_64.c
block/blk-mq.c
block/blk-throttle.c
crypto/rsa-pkcs1pad.c
drivers/acpi/nfit/core.c
drivers/base/regmap/regmap.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/udl/udl_fb.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-qup.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/input/joydev.c
drivers/input/touchscreen/silead.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-mips-gic.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/dw_mmc.h
drivers/mtd/nand/davinci_nand.c
drivers/mtd/nand/mtk_ecc.c
drivers/mtd/nand/mtk_nand.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/omap2.c
drivers/net/can/dev.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/freescale/fec_main.c
drivers/nvdimm/core.c
drivers/nvdimm/nd.h
drivers/nvdimm/region_devs.c
drivers/nvme/host/rdma.c
drivers/scsi/hosts.c
drivers/scsi/scsi.c
drivers/scsi/scsi_priv.h
fs/btrfs/extent-tree.c
fs/btrfs/ioctl.c
fs/configfs/file.c
fs/ocfs2/aops.c
include/linux/can/dev.h
include/linux/dma-mapping.h
include/linux/mroute.h
include/linux/mroute6.h
include/linux/pagemap.h
include/linux/property.h
include/linux/swap.h
include/net/sctp/structs.h
include/scsi/scsi_host.h
kernel/cgroup.c
kernel/cpuset.c
kernel/events/core.c
kernel/irq/chip.c
kernel/trace/trace.c
lib/Kconfig.debug
lib/radix-tree.c
mm/filemap.c
mm/huge_memory.c
mm/ksm.c
mm/memory.c
mm/memory_hotplug.c
mm/shmem.c
mm/vmscan.c
mm/workingset.c
net/ipv4/ipmr.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/ip6_gre.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/sched/act_ife.c
net/sched/sch_qfq.c
net/sched/sch_sfb.c
net/sctp/chunk.c
net/sctp/outqueue.c
net/sctp/sctp_diag.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/vmw_vsock/af_vsock.c
scripts/recordmcount.c
scripts/recordmcount.pl
security/keys/encrypted-keys/encrypted.c
tools/testing/nvdimm/test/nfit.c
tools/testing/radix-tree/Makefile
tools/testing/radix-tree/multiorder.c

index de22dae..1dab0a1 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -69,6 +69,7 @@ James Bottomley <jejb@mulgrave.(none)>
 James Bottomley <jejb@titanic.il.steeleye.com>
 James E Wilson <wilson@specifix.com>
 James Ketrenos <jketreno@io.(none)>
+Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
 <javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
 Jean Tourrilhes <jt@hpl.hp.com>
 Jeff Garzik <jgarzik@pretzel.yyz.us>
index 1112e0d..820fee4 100644 (file)
@@ -13,6 +13,7 @@ Required properties:
 - touchscreen-size-y     : See touchscreen.txt
 
 Optional properties:
+- firmware-name                  : File basename (string) for board specific firmware
 - touchscreen-inverted-x  : See touchscreen.txt
 - touchscreen-inverted-y  : See touchscreen.txt
 - touchscreen-swapped-x-y : See touchscreen.txt
index 20de5f9..669909e 100644 (file)
@@ -8753,7 +8753,7 @@ F:        drivers/oprofile/
 F:     include/linux/oprofile.h
 
 ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
-M:     Mark Fasheh <mfasheh@suse.com>
+M:     Mark Fasheh <mfasheh@versity.com>
 M:     Joel Becker <jlbec@evilplan.org>
 L:     ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
 W:     http://ocfs2.wiki.kernel.org
@@ -11641,7 +11641,7 @@ F:      Documentation/devicetree/bindings/thermal/
 THERMAL/CPU_COOLING
 M:     Amit Daniel Kachhap <amit.kachhap@gmail.com>
 M:     Viresh Kumar <viresh.kumar@linaro.org>
-M:     Javi Merino <javi.merino@arm.com>
+M:     Javi Merino <javi.merino@kernel.org>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/thermal/cpu-cooling-api.txt
index 74e22c2..80b8671 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
index af11c2f..fc6d541 100644 (file)
@@ -779,7 +779,7 @@ __armv7_mmu_cache_on:
                orrne   r0, r0, #1              @ MMU enabled
                movne   r1, #0xfffffffd         @ domain 0 = client
                bic     r6, r6, #1 << 31        @ 32-bit translation system
-               bic     r6, r6, #3 << 0         @ use only ttbr0
+               bic     r6, r6, #(7 << 0) | (1 << 4)    @ use only ttbr0
                mcrne   p15, 0, r3, c2, c0, 0   @ load page table pointer
                mcrne   p15, 0, r1, c3, c0, 0   @ load domain access control
                mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
index d009f79..bf02dbd 100644 (file)
@@ -111,7 +111,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
 /* The ARM override for dma_max_pfn() */
 static inline unsigned long dma_max_pfn(struct device *dev)
 {
-       return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
+       return dma_to_pfn(dev, *dev->dma_mask);
 }
 #define dma_max_pfn(dev) dma_max_pfn(dev)
 
index 40ecd5f..f676feb 100644 (file)
@@ -88,6 +88,8 @@ void __init arm_dt_init_cpu_maps(void)
                return;
 
        for_each_child_of_node(cpus, cpu) {
+               const __be32 *cell;
+               int prop_bytes;
                u32 hwid;
 
                if (of_node_cmp(cpu->type, "cpu"))
@@ -99,7 +101,8 @@ void __init arm_dt_init_cpu_maps(void)
                 * properties is considered invalid to build the
                 * cpu_logical_map.
                 */
-               if (of_property_read_u32(cpu, "reg", &hwid)) {
+               cell = of_get_property(cpu, "reg", &prop_bytes);
+               if (!cell || prop_bytes < sizeof(*cell)) {
                        pr_debug(" * %s missing reg property\n",
                                     cpu->full_name);
                        of_node_put(cpu);
@@ -107,10 +110,15 @@ void __init arm_dt_init_cpu_maps(void)
                }
 
                /*
-                * 8 MSBs must be set to 0 in the DT since the reg property
+                * Bits n:24 must be set to 0 in the DT since the reg property
                 * defines the MPIDR[23:0].
                 */
-               if (hwid & ~MPIDR_HWID_BITMASK) {
+               do {
+                       hwid = be32_to_cpu(*cell++);
+                       prop_bytes -= sizeof(*cell);
+               } while (!hwid && prop_bytes > 0);
+
+               if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) {
                        of_node_put(cpu);
                        return;
                }
index 4b6b3f7..b71420a 100644 (file)
@@ -61,8 +61,6 @@
 
 #define AARCH64_BREAK_KGDB_DYN_DBG     \
        (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
-#define KGDB_DYN_BRK_INS_BYTE(x)       \
-       ((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
 
 #define CACHE_FLUSH_IS_SAFE            1
 
index 8c57f64..e017a94 100644 (file)
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/bug.h>
 #include <linux/irq.h>
 #include <linux/kdebug.h>
 #include <linux/kgdb.h>
 #include <linux/kprobes.h>
+#include <asm/debug-monitors.h>
+#include <asm/insn.h>
 #include <asm/traps.h>
 
 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
@@ -338,15 +341,24 @@ void kgdb_arch_exit(void)
        unregister_die_notifier(&kgdb_notifier);
 }
 
-/*
- * ARM instructions are always in LE.
- * Break instruction is encoded in LE format
- */
-struct kgdb_arch arch_kgdb_ops = {
-       .gdb_bpt_instr = {
-               KGDB_DYN_BRK_INS_BYTE(0),
-               KGDB_DYN_BRK_INS_BYTE(1),
-               KGDB_DYN_BRK_INS_BYTE(2),
-               KGDB_DYN_BRK_INS_BYTE(3),
-       }
-};
+struct kgdb_arch arch_kgdb_ops;
+
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+       int err;
+
+       BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE);
+
+       err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr);
+       if (err)
+               return err;
+
+       return aarch64_insn_write((void *)bpt->bpt_addr,
+                       (u32)AARCH64_BREAK_KGDB_DYN_DBG);
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+       return aarch64_insn_write((void *)bpt->bpt_addr,
+                       *(u32 *)bpt->saved_instr);
+}
index d93d433..3ff173e 100644 (file)
@@ -201,12 +201,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        return ret;
 }
 
-static void smp_store_cpu_info(unsigned int cpuid)
-{
-       store_cpu_topology(cpuid);
-       numa_store_cpu_info(cpuid);
-}
-
 /*
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
@@ -254,7 +248,7 @@ asmlinkage void secondary_start_kernel(void)
         */
        notify_cpu_starting(cpu);
 
-       smp_store_cpu_info(cpu);
+       store_cpu_topology(cpu);
 
        /*
         * OK, now it's safe to let the boot CPU continue.  Wait for
@@ -689,10 +683,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        int err;
        unsigned int cpu;
+       unsigned int this_cpu;
 
        init_cpu_topology();
 
-       smp_store_cpu_info(smp_processor_id());
+       this_cpu = smp_processor_id();
+       store_cpu_topology(this_cpu);
+       numa_store_cpu_info(this_cpu);
 
        /*
         * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
@@ -719,6 +716,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                        continue;
 
                set_cpu_present(cpu, true);
+               numa_store_cpu_info(cpu);
        }
 }
 
index 2638856..212ff92 100644 (file)
@@ -65,6 +65,7 @@ config MIPS
        select ARCH_CLOCKSOURCE_DATA
        select HANDLE_DOMAIN_IRQ
        select HAVE_EXIT_THREAD
+       select HAVE_REGS_AND_STACK_ACCESS_API
 
 menu "Machine selection"
 
index f0e314c..7f975b2 100644 (file)
@@ -113,42 +113,6 @@ config SPINLOCK_TEST
        help
          Add several files to the debugfs to test spinlock speed.
 
-if CPU_MIPSR6
-
-choice
-       prompt "Compact branch policy"
-       default MIPS_COMPACT_BRANCHES_OPTIMAL
-
-config MIPS_COMPACT_BRANCHES_NEVER
-       bool "Never (force delay slot branches)"
-       help
-         Pass the -mcompact-branches=never flag to the compiler in order to
-         force it to always emit branches with delay slots, and make no use
-         of the compact branch instructions introduced by MIPSr6. This is
-         useful if you suspect there may be an issue with compact branches in
-         either the compiler or the CPU.
-
-config MIPS_COMPACT_BRANCHES_OPTIMAL
-       bool "Optimal (use where beneficial)"
-       help
-         Pass the -mcompact-branches=optimal flag to the compiler in order for
-         it to make use of compact branch instructions where it deems them
-         beneficial, and use branches with delay slots elsewhere. This is the
-         default compiler behaviour, and should be used unless you have a
-         reason to choose otherwise.
-
-config MIPS_COMPACT_BRANCHES_ALWAYS
-       bool "Always (force compact branches)"
-       help
-         Pass the -mcompact-branches=always flag to the compiler in order to
-         force it to always emit compact branches, making no use of branch
-         instructions with delay slots. This can result in more compact code
-         which may be beneficial in some scenarios.
-
-endchoice
-
-endif # CPU_MIPSR6
-
 config SCACHE_DEBUGFS
        bool "L2 cache debugfs entries"
        depends on DEBUG_FS
index efd7a9d..598ab29 100644 (file)
@@ -203,10 +203,6 @@ endif
 toolchain-virt                         := $(call cc-option-yn,$(mips-cflags) -mvirt)
 cflags-$(toolchain-virt)               += -DTOOLCHAIN_SUPPORTS_VIRT
 
-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER)   += -mcompact-branches=never
-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS)  += -mcompact-branches=always
-
 #
 # Firmware support
 #
index 2e73784..cc3a1e3 100644 (file)
@@ -96,7 +96,7 @@ static struct clk * __init ath79_reg_ffclk(const char *name,
        struct clk *clk;
 
        clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
-       if (!clk)
+       if (IS_ERR(clk))
                panic("failed to allocate %s clock structure", name);
 
        return clk;
index 5a9b87b..c1eb1ff 100644 (file)
@@ -1619,6 +1619,12 @@ static int __init octeon_irq_init_gpio(
                return -ENOMEM;
        }
 
+       /*
+        * Clear the OF_POPULATED flag that was set by of_irq_init()
+        * so that all GPIO devices will be probed.
+        */
+       of_node_clear_flag(gpio_node, OF_POPULATED);
+
        return 0;
 }
 /*
index b31fbc9..37a932d 100644 (file)
@@ -1059,7 +1059,7 @@ static int __init octeon_publish_devices(void)
 {
        return of_platform_bus_probe(NULL, octeon_ids, NULL);
 }
-device_initcall(octeon_publish_devices);
+arch_initcall(octeon_publish_devices);
 
 MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
 MODULE_LICENSE("GPL");
index d7b9918..1910223 100644 (file)
                /*
                 * Find irq with highest priority
                 */
-                PTR_LA t1,cpu_mask_nr_tbl
+               # open coded PTR_LA t1, cpu_mask_nr_tbl
+#if (_MIPS_SZPTR == 32)
+               # open coded la t1, cpu_mask_nr_tbl
+               lui     t1, %hi(cpu_mask_nr_tbl)
+               addiu   t1, %lo(cpu_mask_nr_tbl)
+
+#endif
+#if (_MIPS_SZPTR == 64)
+               # open coded dla t1, cpu_mask_nr_tbl
+               .set    push
+               .set    noat
+               lui     t1, %highest(cpu_mask_nr_tbl)
+               lui     AT, %hi(cpu_mask_nr_tbl)
+               daddiu  t1, t1, %higher(cpu_mask_nr_tbl)
+               daddiu  AT, AT, %lo(cpu_mask_nr_tbl)
+               dsll    t1, 32
+               daddu   t1, t1, AT
+               .set    pop
+#endif
 1:             lw      t2,(t1)
                nop
                and     t2,t0
                /*
                 * Find irq with highest priority
                 */
-                PTR_LA t1,asic_mask_nr_tbl
+               # open coded PTR_LA t1,asic_mask_nr_tbl
+#if (_MIPS_SZPTR == 32)
+               # open coded la t1, asic_mask_nr_tbl
+               lui     t1, %hi(asic_mask_nr_tbl)
+               addiu   t1, %lo(asic_mask_nr_tbl)
+
+#endif
+#if (_MIPS_SZPTR == 64)
+               # open coded dla t1, asic_mask_nr_tbl
+               .set    push
+               .set    noat
+               lui     t1, %highest(asic_mask_nr_tbl)
+               lui     AT, %hi(asic_mask_nr_tbl)
+               daddiu  t1, t1, %higher(asic_mask_nr_tbl)
+               daddiu  AT, AT, %lo(asic_mask_nr_tbl)
+               dsll    t1, 32
+               daddu   t1, t1, AT
+               .set    pop
+#endif
 2:             lw      t2,(t1)
                nop
                and     t2,t0
index 56584a6..83054f7 100644 (file)
        ldc1    $f28, THREAD_FPR28(\thread)
        ldc1    $f30, THREAD_FPR30(\thread)
        ctc1    \tmp, fcr31
+       .set    pop
        .endm
 
        .macro  fpu_restore_16odd thread
index 0cf5ac1..8ff2cbd 100644 (file)
@@ -15,8 +15,8 @@
 static inline bool __should_swizzle_bits(volatile void *a)
 {
        extern const bool octeon_should_swizzle_table[];
+       u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
 
-       unsigned long did = ((unsigned long)a >> 40) & 0xff;
        return octeon_should_swizzle_table[did];
 }
 
@@ -29,7 +29,7 @@ static inline bool __should_swizzle_bits(volatile void *a)
 
 #define __should_swizzle_bits(a)       false
 
-static inline bool __should_swizzle_addr(unsigned long p)
+static inline bool __should_swizzle_addr(u64 p)
 {
        /* boot bus? */
        return ((p >> 40) & 0xff) == 0;
index 2f82bfa..c9f5769 100644 (file)
 #define CP0_EBASE $15, 1
 
        .macro  kernel_entry_setup
+#ifdef CONFIG_SMP
        mfc0    t0, CP0_EBASE
        andi    t0, t0, 0x3ff           # CPUNum
        beqz    t0, 1f
        # CPUs other than zero goto smp_bootstrap
        j       smp_bootstrap
+#endif /* CONFIG_SMP */
 
 1:
        .endm
index 58e7874..4fafeef 100644 (file)
@@ -458,10 +458,21 @@ static inline int mips_cm_revision(void)
 static inline unsigned int mips_cm_max_vp_width(void)
 {
        extern int smp_num_siblings;
+       uint32_t cfg;
 
        if (mips_cm_revision() >= CM_REV_CM3)
                return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
 
+       if (mips_cm_present()) {
+               /*
+                * We presume that all cores in the system will have the same
+                * number of VP(E)s, and if that ever changes then this will
+                * need revisiting.
+                */
+               cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
+               return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
+       }
+
        if (IS_ENABLED(CONFIG_SMP))
                return smp_num_siblings;
 
index def9d8d..7dd2dd4 100644 (file)
 
 #define MIPS_CONF7_IAR         (_ULCAST_(1) << 10)
 #define MIPS_CONF7_AR          (_ULCAST_(1) << 16)
-/* FTLB probability bits for R6 */
-#define MIPS_CONF7_FTLBP_SHIFT (18)
 
 /* WatchLo* register definitions */
 #define MIPS_WATCHLO_IRW       (_ULCAST_(0x7) << 0)
index 34c325c..70a4a2f 100644 (file)
@@ -36,7 +36,6 @@ struct arch_uprobe {
        unsigned long   resume_epc;
        u32     insn[2];
        u32     ixol[2];
-       union   mips_instruction orig_inst[MAX_UINSN_BYTES / 4];
 };
 
 struct arch_uprobe_task {
index a88d442..dd31754 100644 (file)
@@ -352,7 +352,12 @@ __setup("nohtw", htw_disable);
 static int mips_ftlb_disabled;
 static int mips_has_ftlb_configured;
 
-static int set_ftlb_enable(struct cpuinfo_mips *c, int enable);
+enum ftlb_flags {
+       FTLB_EN         = 1 << 0,
+       FTLB_SET_PROB   = 1 << 1,
+};
+
+static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags);
 
 static int __init ftlb_disable(char *s)
 {
@@ -371,8 +376,6 @@ static int __init ftlb_disable(char *s)
                return 1;
        }
 
-       back_to_back_c0_hazard();
-
        config4 = read_c0_config4();
 
        /* Check that FTLB has been disabled */
@@ -531,7 +534,7 @@ static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
                return 3;
 }
 
-static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
+static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
 {
        unsigned int config;
 
@@ -542,33 +545,33 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
        case CPU_P6600:
                /* proAptiv & related cores use Config6 to enable the FTLB */
                config = read_c0_config6();
-               /* Clear the old probability value */
-               config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
-               if (enable)
-                       /* Enable FTLB */
-                       write_c0_config6(config |
-                                        (calculate_ftlb_probability(c)
-                                         << MIPS_CONF6_FTLBP_SHIFT)
-                                        | MIPS_CONF6_FTLBEN);
+
+               if (flags & FTLB_EN)
+                       config |= MIPS_CONF6_FTLBEN;
                else
-                       /* Disable FTLB */
-                       write_c0_config6(config &  ~MIPS_CONF6_FTLBEN);
+                       config &= ~MIPS_CONF6_FTLBEN;
+
+               if (flags & FTLB_SET_PROB) {
+                       config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
+                       config |= calculate_ftlb_probability(c)
+                                 << MIPS_CONF6_FTLBP_SHIFT;
+               }
+
+               write_c0_config6(config);
+               back_to_back_c0_hazard();
                break;
        case CPU_I6400:
-               /* I6400 & related cores use Config7 to configure FTLB */
-               config = read_c0_config7();
-               /* Clear the old probability value */
-               config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT);
-               write_c0_config7(config | (calculate_ftlb_probability(c)
-                                          << MIPS_CONF7_FTLBP_SHIFT));
-               break;
+               /* There's no way to disable the FTLB */
+               if (!(flags & FTLB_EN))
+                       return 1;
+               return 0;
        case CPU_LOONGSON3:
                /* Flush ITLB, DTLB, VTLB and FTLB */
                write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
                              LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
                /* Loongson-3 cores use Config6 to enable the FTLB */
                config = read_c0_config6();
-               if (enable)
+               if (flags & FTLB_EN)
                        /* Enable FTLB */
                        write_c0_config6(config & ~MIPS_CONF6_FTLBDIS);
                else
@@ -788,6 +791,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
                                       PAGE_SIZE, config4);
                                /* Switch FTLB off */
                                set_ftlb_enable(c, 0);
+                               mips_ftlb_disabled = 1;
                                break;
                        }
                        c->tlbsizeftlbsets = 1 <<
@@ -852,7 +856,7 @@ static void decode_configs(struct cpuinfo_mips *c)
        c->scache.flags = MIPS_CACHE_NOT_PRESENT;
 
        /* Enable FTLB if present and not disabled */
-       set_ftlb_enable(c, !mips_ftlb_disabled);
+       set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN);
 
        ok = decode_config0(c);                 /* Read Config registers.  */
        BUG_ON(!ok);                            /* Arch spec violation!  */
@@ -902,6 +906,9 @@ static void decode_configs(struct cpuinfo_mips *c)
                }
        }
 
+       /* configure the FTLB write probability */
+       set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB);
+
        mips_probe_watch_registers(c);
 
 #ifndef CONFIG_MIPS_CPS
index 17326a9..dc0b296 100644 (file)
@@ -142,9 +142,8 @@ LEAF(__r4k_wait)
        PTR_LA  k1, __r4k_wait
        ori     k0, 0x1f        /* 32 byte rollback region */
        xori    k0, 0x1f
-       bne     k0, k1, 9f
+       bne     k0, k1, \handler
        MTC0    k0, CP0_EPC
-9:
        .set pop
        .endm
 
index c3372ca..0a7e10b 100644 (file)
@@ -1164,7 +1164,9 @@ fpu_emul:
                regs->regs[31] = r31;
                regs->cp0_epc = epc;
                if (!used_math()) {     /* First time FPU user.  */
+                       preempt_disable();
                        err = init_fpu();
+                       preempt_enable();
                        set_used_math();
                }
                lose_fpu(1);    /* Save FPU state for the emulator. */
index 7429ad0..d2d0615 100644 (file)
@@ -605,14 +605,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
                return -EOPNOTSUPP;
 
        /* Avoid inadvertently triggering emulation */
-       if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
-           !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+       if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
+           !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
                return -EOPNOTSUPP;
-       if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
+       if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
                return -EOPNOTSUPP;
 
        /* FR = 0 not supported in MIPS R6 */
-       if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
+       if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
                return -EOPNOTSUPP;
 
        /* Proceed with the mode switch */
index 36cf8d6..0d57909 100644 (file)
@@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
        int x = boot_mem_map.nr_map;
        int i;
 
+       /*
+        * If the region reaches the top of the physical address space, adjust
+        * the size slightly so that (start + size) doesn't overflow
+        */
+       if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
+               --size;
+
        /* Sanity check */
        if (start + size < start) {
                pr_warn("Trying to add an invalid memory region, skipped\n");
@@ -757,7 +764,6 @@ static void __init arch_mem_init(char **cmdline_p)
        device_tree_init();
        sparse_init();
        plat_swiotlb_setup();
-       paging_init();
 
        dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
        /* Tell bootmem about cma reserved memblock section */
@@ -870,6 +876,7 @@ void __init setup_arch(char **cmdline_p)
        prefill_possible_map();
 
        cpu_cache_init();
+       paging_init();
 }
 
 unsigned long kernelsp[NR_CPUS];
index e9d9fc6..6183ad8 100644 (file)
@@ -513,7 +513,7 @@ static void cps_cpu_die(unsigned int cpu)
                 * in which case the CPC will refuse to power down the core.
                 */
                do {
-                       mips_cm_lock_other(core, vpe_id);
+                       mips_cm_lock_other(core, 0);
                        mips_cpc_lock_other(core);
                        stat = read_cpc_co_stat_conf();
                        stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
index f95f094..b0baf48 100644 (file)
@@ -322,6 +322,9 @@ asmlinkage void start_secondary(void)
        cpumask_set_cpu(cpu, &cpu_coherent_mask);
        notify_cpu_starting(cpu);
 
+       cpumask_set_cpu(cpu, &cpu_callin_map);
+       synchronise_count_slave(cpu);
+
        set_cpu_online(cpu, true);
 
        set_cpu_sibling_map(cpu);
@@ -329,10 +332,6 @@ asmlinkage void start_secondary(void)
 
        calculate_cpu_foreign_map();
 
-       cpumask_set_cpu(cpu, &cpu_callin_map);
-
-       synchronise_count_slave(cpu);
-
        /*
         * irq will be enabled in ->smp_finish(), enabling it too early
         * is dangerous.
index 8452d93..4c7c155 100644 (file)
@@ -157,7 +157,6 @@ bool is_trap_insn(uprobe_opcode_t *insn)
 int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
 {
        struct uprobe_task *utask = current->utask;
-       union mips_instruction insn;
 
        /*
         * Now find the EPC where to resume after the breakpoint has been
@@ -168,10 +167,10 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
                unsigned long epc;
 
                epc = regs->cp0_epc;
-               __compute_return_epc_for_insn(regs, insn);
+               __compute_return_epc_for_insn(regs,
+                       (union mips_instruction) aup->insn[0]);
                aup->resume_epc = regs->cp0_epc;
        }
-
        utask->autask.saved_trap_nr = current->thread.trap_nr;
        current->thread.trap_nr = UPROBE_TRAP_NR;
        regs->cp0_epc = current->utask->xol_vaddr;
@@ -222,7 +221,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
                return NOTIFY_DONE;
 
        switch (val) {
-       case DIE_BREAK:
+       case DIE_UPROBE:
                if (uprobe_pre_sstep_notifier(regs))
                        return NOTIFY_STOP;
                break;
@@ -257,7 +256,7 @@ unsigned long arch_uretprobe_hijack_return_addr(
        ra = regs->regs[31];
 
        /* Replace the return address with the trampoline address */
-       regs->regs[31] = ra;
+       regs->regs[31] = trampoline_vaddr;
 
        return ra;
 }
@@ -280,24 +279,6 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
        return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
 }
 
-/**
- * set_orig_insn - Restore the original instruction.
- * @mm: the probed process address space.
- * @auprobe: arch specific probepoint information.
- * @vaddr: the virtual address to insert the opcode.
- *
- * For mm @mm, restore the original opcode (opcode) at @vaddr.
- * Return 0 (success) or a negative errno.
- *
- * This overrides the weak version in kernel/events/uprobes.c.
- */
-int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
-                unsigned long vaddr)
-{
-       return uprobe_write_opcode(mm, vaddr,
-                       *(uprobe_opcode_t *)&auprobe->orig_inst[0].word);
-}
-
 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
                                  void *src, unsigned long len)
 {
index 9abe447..f9dbfb1 100644 (file)
@@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
 static void __init init_vdso_image(struct mips_vdso_image *image)
 {
        unsigned long num_pages, i;
+       unsigned long data_pfn;
 
        BUG_ON(!PAGE_ALIGNED(image->data));
        BUG_ON(!PAGE_ALIGNED(image->size));
 
        num_pages = image->size / PAGE_SIZE;
 
-       for (i = 0; i < num_pages; i++) {
-               image->mapping.pages[i] =
-                       virt_to_page(image->data + (i * PAGE_SIZE));
-       }
+       data_pfn = __phys_to_pfn(__pa_symbol(image->data));
+       for (i = 0; i < num_pages; i++)
+               image->mapping.pages[i] = pfn_to_page(data_pfn + i);
 }
 
 static int __init init_vdso(void)
index 72a4642..4a094f7 100644 (file)
@@ -298,5 +298,6 @@ bool do_dsemulret(struct pt_regs *xcp)
        /* Set EPC to return to post-branch instruction */
        xcp->cp0_epc = current->thread.bd_emu_cont_pc;
        pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
+       MIPS_FPU_EMU_INC_STATS(ds_emul);
        return true;
 }
index cd72805..fa7d8d3 100644 (file)
@@ -800,7 +800,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
                 * If address-based cache ops don't require an SMP call, then
                 * use them exclusively for small flushes.
                 */
-               size = start - end;
+               size = end - start;
                cache_size = icache_size;
                if (!cpu_has_ic_fills_f_dc) {
                        size *= 2;
index a5509e7..72f7478 100644 (file)
@@ -261,7 +261,6 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
 {
        struct maar_config cfg[BOOT_MEM_MAP_MAX];
        unsigned i, num_configured, num_cfg = 0;
-       phys_addr_t skip;
 
        for (i = 0; i < boot_mem_map.nr_map; i++) {
                switch (boot_mem_map.map[i].type) {
@@ -272,14 +271,14 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
                        continue;
                }
 
-               skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
-
+               /* Round lower up */
                cfg[num_cfg].lower = boot_mem_map.map[i].addr;
-               cfg[num_cfg].lower += skip;
+               cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
 
-               cfg[num_cfg].upper = cfg[num_cfg].lower;
-               cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
-               cfg[num_cfg].upper -= skip;
+               /* Round upper down */
+               cfg[num_cfg].upper = boot_mem_map.map[i].addr +
+                                       boot_mem_map.map[i].size;
+               cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
 
                cfg[num_cfg].attrs = MIPS_MAAR_S;
                num_cfg++;
@@ -441,6 +440,9 @@ static inline void mem_init_free_highmem(void)
 #ifdef CONFIG_HIGHMEM
        unsigned long tmp;
 
+       if (cpu_has_dc_aliases)
+               return;
+
        for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
                struct page *page = pfn_to_page(tmp);
 
index ec5b216..7e7364b 100644 (file)
@@ -39,6 +39,9 @@
 #include <linux/console.h>
 #endif
 
+#define ROCIT_CONFIG_GEN0              0x1f403000
+#define  ROCIT_CONFIG_GEN0_PCI_IOCU    BIT(7)
+
 extern void malta_be_init(void);
 extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
 
@@ -107,6 +110,8 @@ static void __init fd_activate(void)
 static int __init plat_enable_iocoherency(void)
 {
        int supported = 0;
+       u32 cfg;
+
        if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
                if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
                        BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
@@ -129,7 +134,8 @@ static int __init plat_enable_iocoherency(void)
        } else if (mips_cm_numiocu() != 0) {
                /* Nothing special needs to be done to enable coherency */
                pr_info("CMP IOCU detected\n");
-               if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
+               cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0));
+               if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) {
                        pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
                        return 0;
                }
index bc0c91e..38a5c65 100644 (file)
@@ -124,6 +124,13 @@ static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
                r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
 }
 
+static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
+{
+       unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
+
+       return (resource_flags & flags) == flags;
+}
+
 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
 {
        phb->ioda.pe_array[pe_no].phb = phb;
@@ -2871,7 +2878,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
                res = &pdev->resource[i + PCI_IOV_RESOURCES];
                if (!res->flags || res->parent)
                        continue;
-               if (!pnv_pci_is_m64(phb, res)) {
+               if (!pnv_pci_is_m64_flags(res->flags)) {
                        dev_warn(&pdev->dev, "Don't support SR-IOV with"
                                        " non M64 VF BAR%d: %pR. \n",
                                 i, res);
@@ -3096,7 +3103,7 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
         * alignment for any 64-bit resource, PCIe doesn't care and
         * bridges only do 64-bit prefetchable anyway.
         */
-       if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64))
+       if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
                return phb->ioda.m64_segsize;
        if (type & IORESOURCE_MEM)
                return phb->ioda.m32_segsize;
index caea2c4..1d159ce 100644 (file)
@@ -60,7 +60,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                       \
 "      movco.l %0, @%3                                 \n"             \
 "      bf      1b                                      \n"             \
 "      synco                                           \n"             \
-       : "=&z" (temp), "=&z" (res)                                     \
+       : "=&z" (temp), "=&r" (res)                                     \
        : "r" (i), "r" (&v->counter)                                    \
        : "t");                                                         \
                                                                        \
index 8c2a8c9..c1263fc 100644 (file)
@@ -25,6 +25,7 @@
 #define HPAGE_MASK             (~(HPAGE_SIZE - 1UL))
 #define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#define REAL_HPAGE_PER_HPAGE   (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
 #endif
 
 #ifndef __ASSEMBLY__
index 26d9e77..ce2233f 100644 (file)
@@ -43,6 +43,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 int hard_smp_processor_id(void);
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
+void smp_fill_in_cpu_possible_map(void);
 void smp_fill_in_sib_core_maps(void);
 void cpu_play_dead(void);
 
@@ -72,6 +73,7 @@ void __cpu_die(unsigned int cpu);
 #define smp_fill_in_sib_core_maps() do { } while (0)
 #define smp_fetch_global_regs() do { } while (0)
 #define smp_fetch_global_pmu() do { } while (0)
+#define smp_fill_in_cpu_possible_map() do { } while (0)
 
 #endif /* !(CONFIG_SMP) */
 
index 599f120..6b7331d 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/initrd.h>
 #include <linux/module.h>
 #include <linux/start_kernel.h>
+#include <linux/bootmem.h>
 
 #include <asm/io.h>
 #include <asm/processor.h>
@@ -50,6 +51,8 @@
 #include <asm/elf.h>
 #include <asm/mdesc.h>
 #include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
 
 #ifdef CONFIG_IP_PNP
 #include <net/ipconfig.h>
@@ -590,6 +593,22 @@ static void __init init_sparc64_elf_hwcap(void)
                pause_patch();
 }
 
+void __init alloc_irqstack_bootmem(void)
+{
+       unsigned int i, node;
+
+       for_each_possible_cpu(i) {
+               node = cpu_to_node(i);
+
+               softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
+                                                       THREAD_SIZE,
+                                                       THREAD_SIZE, 0);
+               hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
+                                                       THREAD_SIZE,
+                                                       THREAD_SIZE, 0);
+       }
+}
+
 void __init setup_arch(char **cmdline_p)
 {
        /* Initialize PROM console and command line. */
@@ -650,6 +669,13 @@ void __init setup_arch(char **cmdline_p)
 
        paging_init();
        init_sparc64_elf_hwcap();
+       smp_fill_in_cpu_possible_map();
+       /*
+        * Once the OF device tree and MDESC have been setup and nr_cpus has
+        * been parsed, we know the list of possible cpus.  Therefore we can
+        * allocate the IRQ stacks.
+        */
+       alloc_irqstack_bootmem();
 }
 
 extern int stop_a_enabled;
index 8a6151a..d3035ba 100644 (file)
@@ -1227,6 +1227,20 @@ void __init smp_setup_processor_id(void)
                xcall_deliver_impl = hypervisor_xcall_deliver;
 }
 
+void __init smp_fill_in_cpu_possible_map(void)
+{
+       int possible_cpus = num_possible_cpus();
+       int i;
+
+       if (possible_cpus > nr_cpu_ids)
+               possible_cpus = nr_cpu_ids;
+
+       for (i = 0; i < possible_cpus; i++)
+               set_cpu_possible(i, true);
+       for (; i < NR_CPUS; i++)
+               set_cpu_possible(i, false);
+}
+
 void smp_fill_in_sib_core_maps(void)
 {
        unsigned int i;
index e16fdd2..3f291d8 100644 (file)
@@ -484,6 +484,7 @@ good_area:
                tsb_grow(mm, MM_TSB_BASE, mm_rss);
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
        mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
+       mm_rss *= REAL_HPAGE_PER_HPAGE;
        if (unlikely(mm_rss >
                     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
                if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
index 65457c9..7ac6b62 100644 (file)
@@ -1160,7 +1160,7 @@ int __node_distance(int from, int to)
        return numa_latency[from][to];
 }
 
-static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
+static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
 {
        int i;
 
@@ -1173,8 +1173,8 @@ static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
        return i;
 }
 
-static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp,
-                                         int index)
+static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
+                                                u64 grp, int index)
 {
        u64 arc;
 
@@ -2081,7 +2081,6 @@ void __init paging_init(void)
 {
        unsigned long end_pfn, shift, phys_base;
        unsigned long real_end, i;
-       int node;
 
        setup_page_offset();
 
@@ -2250,21 +2249,6 @@ void __init paging_init(void)
        /* Setup bootmem... */
        last_valid_pfn = end_pfn = bootmem_init(phys_base);
 
-       /* Once the OF device tree and MDESC have been setup, we know
-        * the list of possible cpus.  Therefore we can allocate the
-        * IRQ stacks.
-        */
-       for_each_possible_cpu(i) {
-               node = cpu_to_node(i);
-
-               softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
-                                                       THREAD_SIZE,
-                                                       THREAD_SIZE, 0);
-               hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
-                                                       THREAD_SIZE,
-                                                       THREAD_SIZE, 0);
-       }
-
        kernel_physical_mapping_init();
 
        {
index 3659d37..c56a195 100644 (file)
@@ -174,10 +174,25 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                return;
 
        if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
-               if (pmd_val(pmd) & _PAGE_PMD_HUGE)
-                       mm->context.thp_pte_count++;
-               else
-                       mm->context.thp_pte_count--;
+               /*
+                * Note that this routine only sets pmds for THP pages.
+                * Hugetlb pages are handled elsewhere.  We need to check
+                * for huge zero page.  Huge zero pages are like hugetlb
+                * pages in that there is no RSS, but there is the need
+                * for TSB entries.  So, huge zero page counts go into
+                * hugetlb_pte_count.
+                */
+               if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
+                       if (is_huge_zero_page(pmd_page(pmd)))
+                               mm->context.hugetlb_pte_count++;
+                       else
+                               mm->context.thp_pte_count++;
+               } else {
+                       if (is_huge_zero_page(pmd_page(orig)))
+                               mm->context.hugetlb_pte_count--;
+                       else
+                               mm->context.thp_pte_count--;
+               }
 
                /* Do not try to allocate the TSB hash table if we
                 * don't have one already.  We have various locks held
@@ -204,6 +219,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
        }
 }
 
+/*
+ * This routine is only called when splitting a THP
+ */
 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                     pmd_t *pmdp)
 {
@@ -213,6 +231,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 
        set_pmd_at(vma->vm_mm, address, pmdp, entry);
        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+       /*
+        * set_pmd_at() will not be called in a way to decrement
+        * thp_pte_count when splitting a THP, so do it now.
+        * Sanity check pmd before doing the actual decrement.
+        */
+       if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
+           !is_huge_zero_page(pmd_page(entry)))
+               (vma->vm_mm)->context.thp_pte_count--;
 }
 
 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
index 6725ed4..f2b7711 100644 (file)
@@ -469,8 +469,10 @@ retry_tsb_alloc:
 
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
+       unsigned long mm_rss = get_mm_rss(mm);
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       unsigned long total_huge_pte_count;
+       unsigned long saved_hugetlb_pte_count;
+       unsigned long saved_thp_pte_count;
 #endif
        unsigned int i;
 
@@ -483,10 +485,12 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
         * will re-increment the counters as the parent PTEs are
         * copied into the child address space.
         */
-       total_huge_pte_count = mm->context.hugetlb_pte_count +
-                        mm->context.thp_pte_count;
+       saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
+       saved_thp_pte_count = mm->context.thp_pte_count;
        mm->context.hugetlb_pte_count = 0;
        mm->context.thp_pte_count = 0;
+
+       mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
 #endif
 
        /* copy_mm() copies over the parent's mm_struct before calling
@@ -499,11 +503,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
        /* If this is fork, inherit the parent's TSB size.  We would
         * grow it to that size on the first page fault anyways.
         */
-       tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
+       tsb_grow(mm, MM_TSB_BASE, mm_rss);
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       if (unlikely(total_huge_pte_count))
-               tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
+       if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
+               tsb_grow(mm, MM_TSB_HUGE,
+                        (saved_hugetlb_pte_count + saved_thp_pte_count) *
+                        REAL_HPAGE_PER_HPAGE);
 #endif
 
        if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
index d172c61..02fff3e 100644 (file)
@@ -1002,7 +1002,6 @@ ENTRY(error_entry)
        testb   $3, CS+8(%rsp)
        jz      .Lerror_kernelspace
 
-.Lerror_entry_from_usermode_swapgs:
        /*
         * We entered from user mode or we're pretending to have entered
         * from user mode due to an IRET fault.
@@ -1045,7 +1044,8 @@ ENTRY(error_entry)
         * gsbase and proceed.  We'll fix up the exception and land in
         * .Lgs_change's error handler with kernel gsbase.
         */
-       jmp     .Lerror_entry_from_usermode_swapgs
+       SWAPGS
+       jmp .Lerror_entry_done
 
 .Lbstep_iret:
        /* Fix truncated RIP */
index 4f74119..3dab75f 100644 (file)
@@ -22,7 +22,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
 
        ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
 
-       if (hdr->e_type != ET_DYN)
+       if (GET_LE(&hdr->e_type) != ET_DYN)
                fail("input is not a shared object\n");
 
        /* Walk the segment table. */
index bdcd651..982c9e3 100644 (file)
@@ -455,7 +455,7 @@ int intel_bts_interrupt(void)
         * The only surefire way of knowing if this NMI is ours is by checking
         * the write ptr against the PMI threshold.
         */
-       if (ds->bts_index >= ds->bts_interrupt_threshold)
+       if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
                handled = 1;
 
        /*
@@ -584,7 +584,8 @@ static __init int bts_init(void)
        if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
                return -ENODEV;
 
-       bts_pmu.capabilities    = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE;
+       bts_pmu.capabilities    = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
+                                 PERF_PMU_CAP_EXCLUSIVE;
        bts_pmu.task_ctx_nr     = perf_sw_context;
        bts_pmu.event_init      = bts_event_init;
        bts_pmu.add             = bts_event_add;
index 6fa8594..dee8a70 100644 (file)
@@ -81,7 +81,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 /* Initialize cr4 shadow for this CPU. */
 static inline void cr4_init_shadow(void)
 {
-       this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
+       this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
 }
 
 /* Set in this cpu's CR4. */
index 809eda0..bcc9ccc 100644 (file)
@@ -804,21 +804,20 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                identify_cpu_without_cpuid(c);
 
        /* cyrix could have cpuid enabled via c_identify()*/
-       if (!have_cpuid_p())
-               return;
+       if (have_cpuid_p()) {
+               cpu_detect(c);
+               get_cpu_vendor(c);
+               get_cpu_cap(c);
 
-       cpu_detect(c);
-       get_cpu_vendor(c);
-       get_cpu_cap(c);
-
-       if (this_cpu->c_early_init)
-               this_cpu->c_early_init(c);
+               if (this_cpu->c_early_init)
+                       this_cpu->c_early_init(c);
 
-       c->cpu_index = 0;
-       filter_cpuid_features(c, false);
+               c->cpu_index = 0;
+               filter_cpuid_features(c, false);
 
-       if (this_cpu->c_bsp_init)
-               this_cpu->c_bsp_init(c);
+               if (this_cpu->c_bsp_init)
+                       this_cpu->c_bsp_init(c);
+       }
 
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
        fpu__init_system(c);
index 0fa60f5..98c9cd6 100644 (file)
@@ -1137,9 +1137,7 @@ void __init setup_arch(char **cmdline_p)
         * auditing all the early-boot CR4 manipulation would be needed to
         * rule it out.
         */
-       if (boot_cpu_data.cpuid_level >= 0)
-               /* A CPU has %cr4 if and only if it has CPUID. */
-               mmu_cr4_features = __read_cr4();
+       mmu_cr4_features = __read_cr4_safe();
 
        memblock_set_current_limit(get_max_mapped());
 
index 849dc09..e3353c9 100644 (file)
@@ -917,11 +917,11 @@ static void populate_pte(struct cpa_data *cpa,
        }
 }
 
-static int populate_pmd(struct cpa_data *cpa,
-                       unsigned long start, unsigned long end,
-                       unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+static long populate_pmd(struct cpa_data *cpa,
+                        unsigned long start, unsigned long end,
+                        unsigned num_pages, pud_t *pud, pgprot_t pgprot)
 {
-       unsigned int cur_pages = 0;
+       long cur_pages = 0;
        pmd_t *pmd;
        pgprot_t pmd_pgprot;
 
@@ -991,12 +991,12 @@ static int populate_pmd(struct cpa_data *cpa,
        return num_pages;
 }
 
-static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
-                       pgprot_t pgprot)
+static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
+                        pgprot_t pgprot)
 {
        pud_t *pud;
        unsigned long end;
-       int cur_pages = 0;
+       long cur_pages = 0;
        pgprot_t pud_pgprot;
 
        end = start + (cpa->numpages << PAGE_SHIFT);
@@ -1052,7 +1052,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
 
        /* Map trailing leftover */
        if (start < end) {
-               int tmp;
+               long tmp;
 
                pud = pud_offset(pgd, start);
                if (pud_none(*pud))
@@ -1078,7 +1078,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
        pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
        pud_t *pud = NULL;      /* shut up gcc */
        pgd_t *pgd_entry;
-       int ret;
+       long ret;
 
        pgd_entry = cpa->pgd + pgd_index(addr);
 
@@ -1327,7 +1327,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
 
 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
 {
-       int ret, numpages = cpa->numpages;
+       unsigned long numpages = cpa->numpages;
+       int ret;
 
        while (numpages) {
                /*
index 677e29e..8dd3784 100644 (file)
@@ -245,7 +245,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
         * text and allocate a new stack because we can't rely on the
         * stack pointer being < 4GB.
         */
-       if (!IS_ENABLED(CONFIG_EFI_MIXED))
+       if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
                return 0;
 
        /*
index 13f5a6c..c207fa9 100644 (file)
@@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
        if (ret)
                return ERR_PTR(ret);
 
+       /*
+        * Check if the hardware context is actually mapped to anything.
+        * If not tell the caller that it should skip this queue.
+        */
        hctx = q->queue_hw_ctx[hctx_idx];
+       if (!blk_mq_hw_queue_mapped(hctx)) {
+               ret = -EXDEV;
+               goto out_queue_exit;
+       }
        ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
 
        blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
        rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
        if (!rq) {
-               blk_queue_exit(q);
-               return ERR_PTR(-EWOULDBLOCK);
+               ret = -EWOULDBLOCK;
+               goto out_queue_exit;
        }
 
        return rq;
+
+out_queue_exit:
+       blk_queue_exit(q);
+       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
index f1aba26..a3ea826 100644 (file)
@@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
        /*
         * If previous slice expired, start a new one otherwise renew/extend
         * existing slice to make sure it is at least throtl_slice interval
-        * long since now.
+        * long since now. New slice is started only for empty throttle group.
+        * If there is queued bio, that means there should be an active
+        * slice and it should be extended instead.
         */
-       if (throtl_slice_used(tg, rw))
+       if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
                throtl_start_new_slice(tg, rw);
        else {
                if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
index 877019a..8baab43 100644 (file)
@@ -298,41 +298,48 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
        struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+       unsigned int dst_len;
        unsigned int pos;
-
-       if (err == -EOVERFLOW)
-               /* Decrypted value had no leading 0 byte */
-               err = -EINVAL;
+       u8 *out_buf;
 
        if (err)
                goto done;
 
-       if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
-               err = -EINVAL;
+       err = -EINVAL;
+       dst_len = req_ctx->child_req.dst_len;
+       if (dst_len < ctx->key_size - 1)
                goto done;
+
+       out_buf = req_ctx->out_buf;
+       if (dst_len == ctx->key_size) {
+               if (out_buf[0] != 0x00)
+                       /* Decrypted value had no leading 0 byte */
+                       goto done;
+
+               dst_len--;
+               out_buf++;
        }
 
-       if (req_ctx->out_buf[0] != 0x02) {
-               err = -EINVAL;
+       if (out_buf[0] != 0x02)
                goto done;
-       }
-       for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
-               if (req_ctx->out_buf[pos] == 0x00)
+
+       for (pos = 1; pos < dst_len; pos++)
+               if (out_buf[pos] == 0x00)
                        break;
-       if (pos < 9 || pos == req_ctx->child_req.dst_len) {
-               err = -EINVAL;
+       if (pos < 9 || pos == dst_len)
                goto done;
-       }
        pos++;
 
-       if (req->dst_len < req_ctx->child_req.dst_len - pos)
+       err = 0;
+
+       if (req->dst_len < dst_len - pos)
                err = -EOVERFLOW;
-       req->dst_len = req_ctx->child_req.dst_len - pos;
+       req->dst_len = dst_len - pos;
 
        if (!err)
                sg_copy_from_buffer(req->dst,
                                sg_nents_for_len(req->dst, req->dst_len),
-                               req_ctx->out_buf + pos, req->dst_len);
+                               out_buf + pos, req->dst_len);
 
 done:
        kzfree(req_ctx->out_buf);
index 80cc7c0..e1d5ea6 100644 (file)
@@ -94,54 +94,50 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
        return to_acpi_device(acpi_desc->dev);
 }
 
-static int xlat_status(void *buf, unsigned int cmd)
+static int xlat_status(void *buf, unsigned int cmd, u32 status)
 {
        struct nd_cmd_clear_error *clear_err;
        struct nd_cmd_ars_status *ars_status;
-       struct nd_cmd_ars_start *ars_start;
-       struct nd_cmd_ars_cap *ars_cap;
        u16 flags;
 
        switch (cmd) {
        case ND_CMD_ARS_CAP:
-               ars_cap = buf;
-               if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
+               if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
                        return -ENOTTY;
 
                /* Command failed */
-               if (ars_cap->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
 
                /* No supported scan types for this range */
                flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
-               if ((ars_cap->status >> 16 & flags) == 0)
+               if ((status >> 16 & flags) == 0)
                        return -ENOTTY;
                break;
        case ND_CMD_ARS_START:
-               ars_start = buf;
                /* ARS is in progress */
-               if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
+               if ((status & 0xffff) == NFIT_ARS_START_BUSY)
                        return -EBUSY;
 
                /* Command failed */
-               if (ars_start->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
                break;
        case ND_CMD_ARS_STATUS:
                ars_status = buf;
                /* Command failed */
-               if (ars_status->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
                /* Check extended status (Upper two bytes) */
-               if (ars_status->status == NFIT_ARS_STATUS_DONE)
+               if (status == NFIT_ARS_STATUS_DONE)
                        return 0;
 
                /* ARS is in progress */
-               if (ars_status->status == NFIT_ARS_STATUS_BUSY)
+               if (status == NFIT_ARS_STATUS_BUSY)
                        return -EBUSY;
 
                /* No ARS performed for the current boot */
-               if (ars_status->status == NFIT_ARS_STATUS_NONE)
+               if (status == NFIT_ARS_STATUS_NONE)
                        return -EAGAIN;
 
                /*
@@ -149,19 +145,19 @@ static int xlat_status(void *buf, unsigned int cmd)
                 * agent wants the scan to stop.  If we didn't overflow
                 * then just continue with the returned results.
                 */
-               if (ars_status->status == NFIT_ARS_STATUS_INTR) {
+               if (status == NFIT_ARS_STATUS_INTR) {
                        if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
                                return -ENOSPC;
                        return 0;
                }
 
                /* Unknown status */
-               if (ars_status->status >> 16)
+               if (status >> 16)
                        return -EIO;
                break;
        case ND_CMD_CLEAR_ERROR:
                clear_err = buf;
-               if (clear_err->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
                if (!clear_err->cleared)
                        return -EIO;
@@ -172,6 +168,9 @@ static int xlat_status(void *buf, unsigned int cmd)
                break;
        }
 
+       /* all other non-zero status results in an error */
+       if (status)
+               return -EIO;
        return 0;
 }
 
@@ -186,10 +185,10 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
        struct nd_cmd_pkg *call_pkg = NULL;
        const char *cmd_name, *dimm_name;
        unsigned long cmd_mask, dsm_mask;
+       u32 offset, fw_status = 0;
        acpi_handle handle;
        unsigned int func;
        const u8 *uuid;
-       u32 offset;
        int rc, i;
 
        func = cmd;
@@ -317,6 +316,15 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
                                out_obj->buffer.pointer + offset, out_size);
                offset += out_size;
        }
+
+       /*
+        * Set fw_status for all the commands with a known format to be
+        * later interpreted by xlat_status().
+        */
+       if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
+                       || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
+               fw_status = *(u32 *) out_obj->buffer.pointer;
+
        if (offset + in_buf.buffer.length < buf_len) {
                if (i >= 1) {
                        /*
@@ -325,7 +333,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
                         */
                        rc = buf_len - offset - in_buf.buffer.length;
                        if (cmd_rc)
-                               *cmd_rc = xlat_status(buf, cmd);
+                               *cmd_rc = xlat_status(buf, cmd, fw_status);
                } else {
                        dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
                                        __func__, dimm_name, cmd_name, buf_len,
@@ -335,7 +343,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
        } else {
                rc = 0;
                if (cmd_rc)
-                       *cmd_rc = xlat_status(buf, cmd);
+                       *cmd_rc = xlat_status(buf, cmd, fw_status);
        }
 
  out:
index 25d26bb..e964d06 100644 (file)
@@ -1475,7 +1475,11 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
 
                kfree(buf);
        } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
-               regcache_drop_region(map, reg, reg + 1);
+               /* regcache_drop_region() takes lock that we already have,
+                * thus call map->cache_ops->drop() directly
+                */
+               if (map->cache_ops && map->cache_ops->drop)
+                       map->cache_ops->drop(map, reg, reg + 1);
        }
 
        trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
index df7ab24..39c01b9 100644 (file)
@@ -1708,11 +1708,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 
        DRM_INFO("amdgpu: finishing device.\n");
        adev->shutdown = true;
+       drm_crtc_force_disable_all(adev->ddev);
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
        amdgpu_ib_pool_fini(adev);
        amdgpu_fence_driver_fini(adev);
-       drm_crtc_force_disable_all(adev->ddev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_fini(adev);
        kfree(adev->ip_block_status);
index 7ea8aa7..6bc712f 100644 (file)
@@ -175,6 +175,7 @@ struct nvkm_device_func {
        void (*fini)(struct nvkm_device *, bool suspend);
        resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
        resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+       bool cpu_coherent;
 };
 
 struct nvkm_device_quirk {
index 6190035..864323b 100644 (file)
@@ -209,7 +209,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
        nvbo->tile_flags = tile_flags;
        nvbo->bo.bdev = &drm->ttm.bdev;
 
-       nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+       if (!nvxx_device(&drm->device)->func->cpu_coherent)
+               nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
 
        nvbo->page_shift = 12;
        if (drm->client.vm) {
index b1b6932..62ad030 100644 (file)
@@ -1614,6 +1614,7 @@ nvkm_device_pci_func = {
        .fini = nvkm_device_pci_fini,
        .resource_addr = nvkm_device_pci_resource_addr,
        .resource_size = nvkm_device_pci_resource_size,
+       .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
 };
 
 int
index 939682f..9b638bd 100644 (file)
@@ -245,6 +245,7 @@ nvkm_device_tegra_func = {
        .fini = nvkm_device_tegra_fini,
        .resource_addr = nvkm_device_tegra_resource_addr,
        .resource_size = nvkm_device_tegra_resource_size,
+       .cpu_coherent = false,
 };
 
 int
index edec30f..0a7b6ed 100644 (file)
@@ -37,7 +37,10 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
 {
        struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
        struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+
+       mutex_lock(&chan->fifo->base.engine.subdev.mutex);
        nvkm_ramht_remove(imem->ramht, cookie);
+       mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
 }
 
 static int
index e6abc09..1f78ec2 100644 (file)
@@ -3015,6 +3015,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
        if (rdev->pdev->device == 0x6811 &&
            rdev->pdev->revision == 0x81)
                max_mclk = 120000;
+       /* limit sclk/mclk on Jet parts for stability */
+       if (rdev->pdev->device == 0x6665 &&
+           rdev->pdev->revision == 0xc3) {
+               max_sclk = 75000;
+               max_mclk = 80000;
+       }
 
        if (rps->vce_active) {
                rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
index 9688bfa..611b6b9 100644 (file)
@@ -122,7 +122,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
                return 0;
        cmd = urb->transfer_buffer;
 
-       for (i = y; i < height ; i++) {
+       for (i = y; i < y + height ; i++) {
                const int line_offset = fb->base.pitches[0] * i;
                const int byte_offset = line_offset + (x * bpp);
                const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
index 137125b..5ce71ce 100644 (file)
@@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
        /* Set the number of I2C channel instance */
        adap_info->ch_num = id->driver_data;
 
-       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
-                 KBUILD_MODNAME, adap_info);
-       if (ret) {
-               pch_pci_err(pdev, "request_irq FAILED\n");
-               goto err_request_irq;
-       }
-
        for (i = 0; i < adap_info->ch_num; i++) {
                pch_adap = &adap_info->pch_data[i].pch_adapter;
                adap_info->pch_i2c_suspended = false;
@@ -797,6 +790,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
 
                pch_adap->dev.of_node = pdev->dev.of_node;
                pch_adap->dev.parent = &pdev->dev;
+       }
+
+       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+                 KBUILD_MODNAME, adap_info);
+       if (ret) {
+               pch_pci_err(pdev, "request_irq FAILED\n");
+               goto err_request_irq;
+       }
+
+       for (i = 0; i < adap_info->ch_num; i++) {
+               pch_adap = &adap_info->pch_data[i].pch_adapter;
 
                pch_i2c_init(&adap_info->pch_data[i]);
 
index 501bd15..a8497cf 100644 (file)
@@ -1599,7 +1599,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
 #ifdef CONFIG_PM_SLEEP
 static int qup_i2c_suspend(struct device *device)
 {
-       qup_i2c_pm_suspend_runtime(device);
+       if (!pm_runtime_suspended(device))
+               return qup_i2c_pm_suspend_runtime(device);
        return 0;
 }
 
index 528e755..3278ebf 100644 (file)
@@ -164,7 +164,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
        /* Only select the channel if its different from the last channel */
        if (data->last_chan != regval) {
                ret = pca954x_reg_write(muxc->parent, client, regval);
-               data->last_chan = regval;
+               data->last_chan = ret ? 0 : regval;
        }
 
        return ret;
index 5d11fea..f3135ae 100644 (file)
@@ -946,6 +946,12 @@ static const struct input_device_id joydev_ids[] = {
                .evbit = { BIT_MASK(EV_ABS) },
                .absbit = { BIT_MASK(ABS_X) },
        },
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+                               INPUT_DEVICE_ID_MATCH_ABSBIT,
+               .evbit = { BIT_MASK(EV_ABS) },
+               .absbit = { BIT_MASK(ABS_Z) },
+       },
        {
                .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
                                INPUT_DEVICE_ID_MATCH_ABSBIT,
index b2744a6..f502c84 100644 (file)
@@ -390,9 +390,10 @@ static void silead_ts_read_props(struct i2c_client *client)
                data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
        }
 
-       error = device_property_read_string(dev, "touchscreen-fw-name", &str);
+       error = device_property_read_string(dev, "firmware-name", &str);
        if (!error)
-               snprintf(data->fw_name, sizeof(data->fw_name), "%s", str);
+               snprintf(data->fw_name, sizeof(data->fw_name),
+                        "silead/%s", str);
        else
                dev_dbg(dev, "Firmware file name read error. Using default.");
 }
@@ -410,14 +411,14 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
                if (!acpi_id)
                        return -ENODEV;
 
-               snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
-                       acpi_id->id);
+               snprintf(data->fw_name, sizeof(data->fw_name),
+                        "silead/%s.fw", acpi_id->id);
 
                for (i = 0; i < strlen(data->fw_name); i++)
                        data->fw_name[i] = tolower(data->fw_name[i]);
        } else {
-               snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
-                       id->name);
+               snprintf(data->fw_name, sizeof(data->fw_name),
+                        "silead/%s.fw", id->name);
        }
 
        return 0;
@@ -426,7 +427,8 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
 static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
                                         const struct i2c_device_id *id)
 {
-       snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name);
+       snprintf(data->fw_name, sizeof(data->fw_name),
+                "silead/%s.fw", id->name);
        return 0;
 }
 #endif
index ede5672..da6c0ba 100644 (file)
@@ -548,7 +548,7 @@ static int gic_starting_cpu(unsigned int cpu)
 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
                                   unsigned long cluster_id)
 {
-       int cpu = *base_cpu;
+       int next_cpu, cpu = *base_cpu;
        unsigned long mpidr = cpu_logical_map(cpu);
        u16 tlist = 0;
 
@@ -562,9 +562,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
 
                tlist |= 1 << (mpidr & 0xf);
 
-               cpu = cpumask_next(cpu, mask);
-               if (cpu >= nr_cpu_ids)
+               next_cpu = cpumask_next(cpu, mask);
+               if (next_cpu >= nr_cpu_ids)
                        goto out;
+               cpu = next_cpu;
 
                mpidr = cpu_logical_map(cpu);
 
index 83f4983..6185696 100644 (file)
@@ -638,27 +638,6 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
        if (!gic_local_irq_is_routable(intr))
                return -EPERM;
 
-       /*
-        * HACK: These are all really percpu interrupts, but the rest
-        * of the MIPS kernel code does not use the percpu IRQ API for
-        * the CP0 timer and performance counter interrupts.
-        */
-       switch (intr) {
-       case GIC_LOCAL_INT_TIMER:
-       case GIC_LOCAL_INT_PERFCTR:
-       case GIC_LOCAL_INT_FDC:
-               irq_set_chip_and_handler(virq,
-                                        &gic_all_vpes_local_irq_controller,
-                                        handle_percpu_irq);
-               break;
-       default:
-               irq_set_chip_and_handler(virq,
-                                        &gic_local_irq_controller,
-                                        handle_percpu_devid_irq);
-               irq_set_percpu_devid(virq);
-               break;
-       }
-
        spin_lock_irqsave(&gic_lock, flags);
        for (i = 0; i < gic_vpes; i++) {
                u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
@@ -724,16 +703,42 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
        return 0;
 }
 
-static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
-                             irq_hw_number_t hw)
+static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq,
+                             unsigned int hwirq)
 {
-       if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
-               return gic_local_irq_domain_map(d, virq, hw);
+       struct irq_chip *chip;
+       int err;
+
+       if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
+               err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+                                                   &gic_level_irq_controller,
+                                                   NULL);
+       } else {
+               switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
+               case GIC_LOCAL_INT_TIMER:
+               case GIC_LOCAL_INT_PERFCTR:
+               case GIC_LOCAL_INT_FDC:
+                       /*
+                        * HACK: These are all really percpu interrupts, but
+                        * the rest of the MIPS kernel code does not use the
+                        * percpu IRQ API for them.
+                        */
+                       chip = &gic_all_vpes_local_irq_controller;
+                       irq_set_handler(virq, handle_percpu_irq);
+                       break;
+
+               default:
+                       chip = &gic_local_irq_controller;
+                       irq_set_handler(virq, handle_percpu_devid_irq);
+                       irq_set_percpu_devid(virq);
+                       break;
+               }
 
-       irq_set_chip_and_handler(virq, &gic_level_irq_controller,
-                                handle_level_irq);
+               err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+                                                   chip, NULL);
+       }
 
-       return gic_shared_irq_domain_map(d, virq, hw, 0);
+       return err;
 }
 
 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@@ -744,15 +749,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
        int cpu, ret, i;
 
        if (spec->type == GIC_DEVICE) {
-               /* verify that it doesn't conflict with an IPI irq */
-               if (test_bit(spec->hwirq, ipi_resrv))
+               /* verify that shared irqs don't conflict with an IPI irq */
+               if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) &&
+                   test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv))
                        return -EBUSY;
 
-               hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
-
-               return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
-                                                    &gic_level_irq_controller,
-                                                    NULL);
+               return gic_setup_dev_chip(d, virq, spec->hwirq);
        } else {
                base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
                if (base_hwirq == gic_shared_intrs) {
@@ -821,7 +823,6 @@ int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
 }
 
 static const struct irq_domain_ops gic_irq_domain_ops = {
-       .map = gic_irq_domain_map,
        .alloc = gic_irq_domain_alloc,
        .free = gic_irq_domain_free,
        .match = gic_irq_domain_match,
@@ -852,29 +853,20 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
        struct irq_fwspec *fwspec = arg;
        struct gic_irq_spec spec = {
                .type = GIC_DEVICE,
-               .hwirq = fwspec->param[1],
        };
        int i, ret;
-       bool is_shared = fwspec->param[0] == GIC_SHARED;
 
-       if (is_shared) {
-               ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
-               if (ret)
-                       return ret;
-       }
-
-       for (i = 0; i < nr_irqs; i++) {
-               irq_hw_number_t hwirq;
+       if (fwspec->param[0] == GIC_SHARED)
+               spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
+       else
+               spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
 
-               if (is_shared)
-                       hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
-               else
-                       hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
+       ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
+       if (ret)
+               return ret;
 
-               ret = irq_domain_set_hwirq_and_chip(d, virq + i,
-                                                   hwirq,
-                                                   &gic_level_irq_controller,
-                                                   NULL);
+       for (i = 0; i < nr_irqs; i++) {
+               ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i);
                if (ret)
                        goto error;
        }
@@ -896,7 +888,10 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
 static void gic_dev_domain_activate(struct irq_domain *domain,
                                    struct irq_data *d)
 {
-       gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
+       if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS)
+               gic_local_irq_domain_map(domain, d->irq, d->hwirq);
+       else
+               gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
 }
 
 static struct irq_domain_ops gic_dev_domain_ops = {
index 32380d5..767af20 100644 (file)
@@ -1112,11 +1112,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
 
                div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
 
-               dev_info(&slot->mmc->class_dev,
-                        "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
-                        slot->id, host->bus_hz, clock,
-                        div ? ((host->bus_hz / div) >> 1) :
-                        host->bus_hz, div);
+               if (clock != slot->__clk_old || force_clkinit)
+                       dev_info(&slot->mmc->class_dev,
+                                "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
+                                slot->id, host->bus_hz, clock,
+                                div ? ((host->bus_hz / div) >> 1) :
+                                host->bus_hz, div);
 
                /* disable clock */
                mci_writel(host, CLKENA, 0);
@@ -1139,6 +1140,9 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
 
                /* inform CIU */
                mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+
+               /* keep the last clock value that was requested from core */
+               slot->__clk_old = clock;
        }
 
        host->current_speed = clock;
index 9e740bc..e8cd2de 100644 (file)
@@ -249,6 +249,8 @@ extern int dw_mci_resume(struct dw_mci *host);
  * @queue_node: List node for placing this node in the @queue list of
  *     &struct dw_mci.
  * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @__clk_old: The last clock value that was requested from core.
+ *     Keeping track of this helps us to avoid spamming the console.
  * @flags: Random state bits associated with the slot.
  * @id: Number of this slot.
  * @sdio_id: Number of this slot in the SDIO interrupt registers.
@@ -263,6 +265,7 @@ struct dw_mci_slot {
        struct list_head        queue_node;
 
        unsigned int            clock;
+       unsigned int            __clk_old;
 
        unsigned long           flags;
 #define DW_MMC_CARD_PRESENT    0
index cc07ba0..27fa8b8 100644 (file)
@@ -240,6 +240,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
        unsigned long flags;
        u32 val;
 
+       /* Reset ECC hardware */
+       davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+
        spin_lock_irqsave(&davinci_nand_lock, flags);
 
        /* Start 4-bit ECC calculation for read/write */
index 25a4fbd..d54f666 100644 (file)
@@ -366,7 +366,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
                   u8 *data, u32 bytes)
 {
        dma_addr_t addr;
-       u32 *p, len, i;
+       u8 *p;
+       u32 len, i, val;
        int ret = 0;
 
        addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
@@ -392,11 +393,14 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
 
        /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
        len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
-       p = (u32 *)(data + bytes);
+       p = data + bytes;
 
        /* write the parity bytes generated by the ECC back to the OOB region */
-       for (i = 0; i < len; i++)
-               p[i] = readl(ecc->regs + ECC_ENCPAR(i));
+       for (i = 0; i < len; i++) {
+               if ((i % 4) == 0)
+                       val = readl(ecc->regs + ECC_ENCPAR(i / 4));
+               p[i] = (val >> ((i % 4) * 8)) & 0xff;
+       }
 timeout:
 
        dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
index ddaa2ac..5223a21 100644 (file)
@@ -93,6 +93,9 @@
 #define                NFI_FSM_MASK            (0xf << 16)
 #define NFI_ADDRCNTR           (0x70)
 #define                CNTR_MASK               GENMASK(16, 12)
+#define                ADDRCNTR_SEC_SHIFT      (12)
+#define                ADDRCNTR_SEC(val) \
+               (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
 #define NFI_STRADDR            (0x80)
 #define NFI_BYTELEN            (0x84)
 #define NFI_CSEL               (0x90)
@@ -699,7 +702,7 @@ static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
        }
 
        ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
-                                       (reg & CNTR_MASK) >= chip->ecc.steps,
+                                       ADDRCNTR_SEC(reg) >= chip->ecc.steps,
                                        10, MTK_TIMEOUT);
        if (ret)
                dev_err(dev, "hwecc write timeout\n");
@@ -902,7 +905,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
                dev_warn(nfc->dev, "read ahb/dma done timeout\n");
 
        rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
-                                      (reg & CNTR_MASK) >= sectors, 10,
+                                      ADDRCNTR_SEC(reg) >= sectors, 10,
                                       MTK_TIMEOUT);
        if (rc < 0) {
                dev_err(nfc->dev, "subpage done timeout\n");
index 5173fad..57cbe2b 100644 (file)
@@ -943,7 +943,7 @@ static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
        struct nand_chip *nand_chip = mtd_to_nand(mtd);
        int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
 
-       if (section > nand_chip->ecc.steps)
+       if (section >= nand_chip->ecc.steps)
                return -ERANGE;
 
        if (!section) {
index a59361c..5513bfd 100644 (file)
@@ -2169,7 +2169,7 @@ scan_tail:
        return 0;
 
 return_error:
-       if (info->dma)
+       if (!IS_ERR_OR_NULL(info->dma))
                dma_release_channel(info->dma);
        if (nand_chip->ecc.priv) {
                nand_bch_free(nand_chip->ecc.priv);
index e21f7cc..8d6208c 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 #include <linux/if_arp.h>
+#include <linux/workqueue.h>
 #include <linux/can.h>
 #include <linux/can/dev.h>
 #include <linux/can/skb.h>
@@ -501,9 +502,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
 /*
  * CAN device restart for bus-off recovery
  */
-static void can_restart(unsigned long data)
+static void can_restart(struct net_device *dev)
 {
-       struct net_device *dev = (struct net_device *)data;
        struct can_priv *priv = netdev_priv(dev);
        struct net_device_stats *stats = &dev->stats;
        struct sk_buff *skb;
@@ -543,6 +543,14 @@ restart:
                netdev_err(dev, "Error %d during restart", err);
 }
 
+static void can_restart_work(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct can_priv *priv = container_of(dwork, struct can_priv, restart_work);
+
+       can_restart(priv->dev);
+}
+
 int can_restart_now(struct net_device *dev)
 {
        struct can_priv *priv = netdev_priv(dev);
@@ -556,8 +564,8 @@ int can_restart_now(struct net_device *dev)
        if (priv->state != CAN_STATE_BUS_OFF)
                return -EBUSY;
 
-       /* Runs as soon as possible in the timer context */
-       mod_timer(&priv->restart_timer, jiffies);
+       cancel_delayed_work_sync(&priv->restart_work);
+       can_restart(dev);
 
        return 0;
 }
@@ -578,8 +586,8 @@ void can_bus_off(struct net_device *dev)
        netif_carrier_off(dev);
 
        if (priv->restart_ms)
-               mod_timer(&priv->restart_timer,
-                         jiffies + (priv->restart_ms * HZ) / 1000);
+               schedule_delayed_work(&priv->restart_work,
+                                     msecs_to_jiffies(priv->restart_ms));
 }
 EXPORT_SYMBOL_GPL(can_bus_off);
 
@@ -688,6 +696,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
                return NULL;
 
        priv = netdev_priv(dev);
+       priv->dev = dev;
 
        if (echo_skb_max) {
                priv->echo_skb_max = echo_skb_max;
@@ -697,7 +706,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
 
        priv->state = CAN_STATE_STOPPED;
 
-       init_timer(&priv->restart_timer);
+       INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
 
        return dev;
 }
@@ -778,8 +787,6 @@ int open_candev(struct net_device *dev)
        if (!netif_carrier_ok(dev))
                netif_carrier_on(dev);
 
-       setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(open_candev);
@@ -794,7 +801,7 @@ void close_candev(struct net_device *dev)
 {
        struct can_priv *priv = netdev_priv(dev);
 
-       del_timer_sync(&priv->restart_timer);
+       cancel_delayed_work_sync(&priv->restart_work);
        can_flush_echo_skb(dev);
 }
 EXPORT_SYMBOL_GPL(close_candev);
index 2726f03..a927a73 100644 (file)
@@ -18134,14 +18134,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        rtnl_lock();
 
-       /* We needn't recover from permanent error */
-       if (state == pci_channel_io_frozen)
-               tp->pcierr_recovery = true;
-
        /* We probably don't have netdev yet */
        if (!netdev || !netif_running(netdev))
                goto done;
 
+       /* We needn't recover from permanent error */
+       if (state == pci_channel_io_frozen)
+               tp->pcierr_recovery = true;
+
        tg3_phy_stop(tp);
 
        tg3_netif_stop(tp);
@@ -18238,7 +18238,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
 
        rtnl_lock();
 
-       if (!netif_running(netdev))
+       if (!netdev || !netif_running(netdev))
                goto done;
 
        tg3_full_lock(tp, 0);
index fb5c638..1fa2d87 100644 (file)
@@ -89,10 +89,10 @@ static struct platform_device_id fec_devtype[] = {
                .driver_data = 0,
        }, {
                .name = "imx25-fec",
-               .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
+               .driver_data = FEC_QUIRK_USE_GASKET,
        }, {
                .name = "imx27-fec",
-               .driver_data = FEC_QUIRK_HAS_RACC,
+               .driver_data = 0,
        }, {
                .name = "imx28-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
@@ -180,6 +180,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 /* FEC receive acceleration */
 #define FEC_RACC_IPDIS         (1 << 1)
 #define FEC_RACC_PRODIS                (1 << 2)
+#define FEC_RACC_SHIFT16       BIT(7)
 #define FEC_RACC_OPTIONS       (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
 
 /*
@@ -945,9 +946,11 @@ fec_restart(struct net_device *ndev)
 
 #if !defined(CONFIG_M5272)
        if (fep->quirks & FEC_QUIRK_HAS_RACC) {
-               /* set RX checksum */
                val = readl(fep->hwp + FEC_RACC);
+               /* align IP header */
+               val |= FEC_RACC_SHIFT16;
                if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+                       /* set RX checksum */
                        val |= FEC_RACC_OPTIONS;
                else
                        val &= ~FEC_RACC_OPTIONS;
@@ -1428,6 +1431,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                prefetch(skb->data - NET_IP_ALIGN);
                skb_put(skb, pkt_len - 4);
                data = skb->data;
+
+#if !defined(CONFIG_M5272)
+               if (fep->quirks & FEC_QUIRK_HAS_RACC)
+                       data = skb_pull_inline(skb, 2);
+#endif
+
                if (!is_copybreak && need_swap)
                        swap_buffer(data, pkt_len);
 
index 715583f..4d7bbd2 100644 (file)
@@ -99,8 +99,11 @@ static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
        nvdimm_map->size = size;
        kref_init(&nvdimm_map->kref);
 
-       if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev)))
+       if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
+               dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
+                               &offset, size, dev_name(dev));
                goto err_request_region;
+       }
 
        if (flags)
                nvdimm_map->mem = memremap(offset, size, flags);
@@ -171,6 +174,9 @@ void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
                kref_get(&nvdimm_map->kref);
        nvdimm_bus_unlock(dev);
 
+       if (!nvdimm_map)
+               return NULL;
+
        if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
                return NULL;
 
index 8024a0e..0b78a82 100644 (file)
@@ -52,10 +52,28 @@ struct nvdimm_drvdata {
 struct nd_region_data {
        int ns_count;
        int ns_active;
-       unsigned int flush_mask;
-       void __iomem *flush_wpq[0][0];
+       unsigned int hints_shift;
+       void __iomem *flush_wpq[0];
 };
 
+static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
+               int dimm, int hint)
+{
+       unsigned int num = 1 << ndrd->hints_shift;
+       unsigned int mask = num - 1;
+
+       return ndrd->flush_wpq[dimm * num + (hint & mask)];
+}
+
+static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
+               int hint, void __iomem *flush)
+{
+       unsigned int num = 1 << ndrd->hints_shift;
+       unsigned int mask = num - 1;
+
+       ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
+}
+
 static inline struct nd_namespace_index *to_namespace_index(
                struct nvdimm_drvdata *ndd, int i)
 {
index e8d5ba7..4c0ac4a 100644 (file)
@@ -38,7 +38,7 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
 
        dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
                        nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
-       for (i = 0; i < nvdimm->num_flush; i++) {
+       for (i = 0; i < (1 << ndrd->hints_shift); i++) {
                struct resource *res = &nvdimm->flush_wpq[i];
                unsigned long pfn = PHYS_PFN(res->start);
                void __iomem *flush_page;
@@ -54,14 +54,15 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
 
                if (j < i)
                        flush_page = (void __iomem *) ((unsigned long)
-                                       ndrd->flush_wpq[dimm][j] & PAGE_MASK);
+                                       ndrd_get_flush_wpq(ndrd, dimm, j)
+                                       & PAGE_MASK);
                else
                        flush_page = devm_nvdimm_ioremap(dev,
-                                       PHYS_PFN(pfn), PAGE_SIZE);
+                                       PFN_PHYS(pfn), PAGE_SIZE);
                if (!flush_page)
                        return -ENXIO;
-               ndrd->flush_wpq[dimm][i] = flush_page
-                       + (res->start & ~PAGE_MASK);
+               ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
+                               + (res->start & ~PAGE_MASK));
        }
 
        return 0;
@@ -93,7 +94,10 @@ int nd_region_activate(struct nd_region *nd_region)
                return -ENOMEM;
        dev_set_drvdata(dev, ndrd);
 
-       ndrd->flush_mask = (1 << ilog2(num_flush)) - 1;
+       if (!num_flush)
+               return 0;
+
+       ndrd->hints_shift = ilog2(num_flush);
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
                struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -900,8 +904,8 @@ void nvdimm_flush(struct nd_region *nd_region)
         */
        wmb();
        for (i = 0; i < nd_region->ndr_mappings; i++)
-               if (ndrd->flush_wpq[i][0])
-                       writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]);
+               if (ndrd_get_flush_wpq(ndrd, i, 0))
+                       writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
        wmb();
 }
 EXPORT_SYMBOL_GPL(nvdimm_flush);
@@ -925,7 +929,7 @@ int nvdimm_has_flush(struct nd_region *nd_region)
 
        for (i = 0; i < nd_region->ndr_mappings; i++)
                /* flush hints present, flushing required */
-               if (ndrd->flush_wpq[i][0])
+               if (ndrd_get_flush_wpq(ndrd, i, 0))
                        return 1;
 
        /*
index c2c2c28..fbdb226 100644 (file)
@@ -561,7 +561,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
 
        queue = &ctrl->queues[idx];
        queue->ctrl = ctrl;
-       queue->flags = 0;
        init_completion(&queue->cm_done);
 
        if (idx > 0)
@@ -595,6 +594,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
                goto out_destroy_cm_id;
        }
 
+       clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
        set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
 
        return 0;
index ba9af4a..ec6381e 100644 (file)
@@ -486,6 +486,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        else
                shost->dma_boundary = 0xffffffff;
 
+       shost->use_blk_mq = scsi_use_blk_mq;
+
        device_initialize(&shost->shost_gendev);
        dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
        shost->shost_gendev.bus = &scsi_bus_type;
index 1f36aca..1deb6ad 100644 (file)
@@ -1160,7 +1160,6 @@ bool scsi_use_blk_mq = true;
 bool scsi_use_blk_mq = false;
 #endif
 module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
-EXPORT_SYMBOL_GPL(scsi_use_blk_mq);
 
 static int __init init_scsi(void)
 {
index 57a4b99..85c8a51 100644 (file)
@@ -29,6 +29,7 @@ extern int scsi_init_hosts(void);
 extern void scsi_exit_hosts(void);
 
 /* scsi.c */
+extern bool scsi_use_blk_mq;
 extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
 extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
 #ifdef CONFIG_SCSI_LOGGING
index 38c2df8..665da8f 100644 (file)
@@ -4271,13 +4271,10 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
        if (ret < 0)
                return ret;
 
-       /*
-        * Use new btrfs_qgroup_reserve_data to reserve precious data space
-        *
-        * TODO: Find a good method to avoid reserve data space for NOCOW
-        * range, but don't impact performance on quota disable case.
-        */
+       /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
        ret = btrfs_qgroup_reserve_data(inode, start, len);
+       if (ret)
+               btrfs_free_reserved_data_space_noquota(inode, start, len);
        return ret;
 }
 
index b2a2da5..7fd939b 100644 (file)
@@ -1634,6 +1634,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
        int namelen;
        int ret = 0;
 
+       if (!S_ISDIR(file_inode(file)->i_mode))
+               return -ENOTDIR;
+
        ret = mnt_want_write_file(file);
        if (ret)
                goto out;
@@ -1691,6 +1694,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
        struct btrfs_ioctl_vol_args *vol_args;
        int ret;
 
+       if (!S_ISDIR(file_inode(file)->i_mode))
+               return -ENOTDIR;
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args))
                return PTR_ERR(vol_args);
@@ -1714,6 +1720,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
        bool readonly = false;
        struct btrfs_qgroup_inherit *inherit = NULL;
 
+       if (!S_ISDIR(file_inode(file)->i_mode))
+               return -ENOTDIR;
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args))
                return PTR_ERR(vol_args);
@@ -2357,6 +2366,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
        int ret;
        int err = 0;
 
+       if (!S_ISDIR(dir->i_mode))
+               return -ENOTDIR;
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args))
                return PTR_ERR(vol_args);
index c30cf49..2c6312d 100644 (file)
@@ -333,6 +333,7 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
                if (bin_attr->cb_max_size &&
                        *ppos + count > bin_attr->cb_max_size) {
                        len = -EFBIG;
+                       goto out;
                }
 
                tbuf = vmalloc(*ppos + count);
index 98d3654..bbb4b3e 100644 (file)
@@ -1842,6 +1842,16 @@ out_commit:
        ocfs2_commit_trans(osb, handle);
 
 out:
+       /*
+        * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
+        * even in case of error here like ENOSPC and ENOMEM. So, we need
+        * to unlock the target page manually to prevent deadlocks when
+        * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
+        * to VM code.
+        */
+       if (wc->w_target_locked)
+               unlock_page(mmap_page);
+
        ocfs2_free_write_ctxt(inode, wc);
 
        if (data_ac) {
index 5261751..5f52709 100644 (file)
@@ -32,6 +32,7 @@ enum can_mode {
  * CAN common private data
  */
 struct can_priv {
+       struct net_device *dev;
        struct can_device_stats can_stats;
 
        struct can_bittiming bittiming, data_bittiming;
@@ -47,7 +48,7 @@ struct can_priv {
        u32 ctrlmode_static;    /* static enabled options for driver/hardware */
 
        int restart_ms;
-       struct timer_list restart_timer;
+       struct delayed_work restart_work;
 
        int (*do_set_bittiming)(struct net_device *dev);
        int (*do_set_data_bittiming)(struct net_device *dev);
index 66533e1..dc69df0 100644 (file)
@@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev,
 #define dma_mmap_writecombine dma_mmap_wc
 #endif
 
-#ifdef CONFIG_NEED_DMA_MAP_STATE
+#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
index d351fd3..e5fb813 100644 (file)
@@ -120,5 +120,5 @@ struct mfc_cache {
 struct rtmsg;
 int ipmr_get_route(struct net *net, struct sk_buff *skb,
                   __be32 saddr, __be32 daddr,
-                  struct rtmsg *rtm, int nowait);
+                  struct rtmsg *rtm, int nowait, u32 portid);
 #endif
index 3987b64..19a1c0c 100644 (file)
@@ -116,7 +116,7 @@ struct mfc6_cache {
 
 struct rtmsg;
 extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
-                          struct rtmsg *rtm, int nowait);
+                          struct rtmsg *rtm, int nowait, u32 portid);
 
 #ifdef CONFIG_IPV6_MROUTE
 extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
index 7e3d537..01e8443 100644 (file)
@@ -620,6 +620,7 @@ static inline int fault_in_multipages_readable(const char __user *uaddr,
                return __get_user(c, end);
        }
 
+       (void)c;
        return 0;
 }
 
index 3a2f9ae..856e50b 100644 (file)
@@ -190,7 +190,7 @@ struct property_entry {
        .length = ARRAY_SIZE(_val_) * sizeof(_type_),           \
        .is_array = true,                                       \
        .is_string = false,                                     \
-       { .pointer = { _type_##_data = _val_ } },               \
+       { .pointer = { ._type_##_data = _val_ } },              \
 }
 
 #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_)                 \
index b17cc48..4a529c9 100644 (file)
@@ -257,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
 
 static inline void workingset_node_pages_dec(struct radix_tree_node *node)
 {
+       VM_BUG_ON(!workingset_node_pages(node));
        node->count--;
 }
 
@@ -272,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
 
 static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
 {
+       VM_BUG_ON(!workingset_node_shadows(node));
        node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
 }
 
index 8693dc4..11c3bf2 100644 (file)
@@ -555,6 +555,9 @@ struct sctp_chunk {
 
        atomic_t refcnt;
 
+       /* How many times this chunk have been sent, for prsctp RTX policy */
+       int sent_count;
+
        /* This is our link to the per-transport transmitted list.  */
        struct list_head transmitted_list;
 
@@ -604,16 +607,6 @@ struct sctp_chunk {
        /* This needs to be recoverable for SCTP_SEND_FAILED events. */
        struct sctp_sndrcvinfo sinfo;
 
-       /* We use this field to record param for prsctp policies,
-        * for TTL policy, it is the time_to_drop of this chunk,
-        * for RTX policy, it is the max_sent_count of this chunk,
-        * for PRIO policy, it is the priority of this chunk.
-        */
-       unsigned long prsctp_param;
-
-       /* How many times this chunk have been sent, for prsctp RTX policy */
-       int sent_count;
-
        /* Which association does this belong to?  */
        struct sctp_association *asoc;
 
index 0dee7af..7e4cd53 100644 (file)
@@ -771,12 +771,9 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
                shost->tmf_in_progress;
 }
 
-extern bool scsi_use_blk_mq;
-
 static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
 {
-       return scsi_use_blk_mq;
-
+       return shost->use_blk_mq;
 }
 
 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
index 5e8dab5..d6b729b 100644 (file)
@@ -3446,9 +3446,28 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
         * Except for the root, subtree_control must be zero for a cgroup
         * with tasks so that child cgroups don't compete against tasks.
         */
-       if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
-               ret = -EBUSY;
-               goto out_unlock;
+       if (enable && cgroup_parent(cgrp)) {
+               struct cgrp_cset_link *link;
+
+               /*
+                * Because namespaces pin csets too, @cgrp->cset_links
+                * might not be empty even when @cgrp is empty.  Walk and
+                * verify each cset.
+                */
+               spin_lock_irq(&css_set_lock);
+
+               ret = 0;
+               list_for_each_entry(link, &cgrp->cset_links, cset_link) {
+                       if (css_set_populated(link->cset)) {
+                               ret = -EBUSY;
+                               break;
+                       }
+               }
+
+               spin_unlock_irq(&css_set_lock);
+
+               if (ret)
+                       goto out_unlock;
        }
 
        /* save and update control masks and prepare csses */
@@ -3899,7 +3918,9 @@ void cgroup_file_notify(struct cgroup_file *cfile)
  * cgroup_task_count - count the number of tasks in a cgroup.
  * @cgrp: the cgroup in question
  *
- * Return the number of tasks in the cgroup.
+ * Return the number of tasks in the cgroup.  The returned number can be
+ * higher than the actual number of tasks due to css_set references from
+ * namespace roots and temporary usages.
  */
 static int cgroup_task_count(const struct cgroup *cgrp)
 {
index c27e533..2b4c20a 100644 (file)
@@ -325,8 +325,7 @@ static struct file_system_type cpuset_fs_type = {
 /*
  * Return in pmask the portion of a cpusets's cpus_allowed that
  * are online.  If none are online, walk up the cpuset hierarchy
- * until we find one that does have some online cpus.  The top
- * cpuset always has some cpus online.
+ * until we find one that does have some online cpus.
  *
  * One way or another, we guarantee to return some non-empty subset
  * of cpu_online_mask.
@@ -335,8 +334,20 @@ static struct file_system_type cpuset_fs_type = {
  */
 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
 {
-       while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
+       while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
                cs = parent_cs(cs);
+               if (unlikely(!cs)) {
+                       /*
+                        * The top cpuset doesn't have any online cpu as a
+                        * consequence of a race between cpuset_hotplug_work
+                        * and cpu hotplug notifier.  But we know the top
+                        * cpuset's effective_cpus is on its way to to be
+                        * identical to cpu_online_mask.
+                        */
+                       cpumask_copy(pmask, cpu_online_mask);
+                       return;
+               }
+       }
        cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
 }
 
@@ -2074,7 +2085,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
  * which could have been changed by cpuset just after it inherits the
  * state from the parent and before it sits on the cgroup's task list.
  */
-void cpuset_fork(struct task_struct *task)
+static void cpuset_fork(struct task_struct *task)
 {
        if (task_css_is_root(task, cpuset_cgrp_id))
                return;
index 9fc3be0..5c02f67 100644 (file)
@@ -3929,7 +3929,7 @@ static void exclusive_event_destroy(struct perf_event *event)
 
 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
 {
-       if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
+       if ((e1->pmu == e2->pmu) &&
            (e1->cpu == e2->cpu ||
             e1->cpu == -1 ||
             e2->cpu == -1))
index 6373890..26ba565 100644 (file)
@@ -820,6 +820,8 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
        desc->name = name;
 
        if (handle != handle_bad_irq && is_chained) {
+               unsigned int type = irqd_get_trigger_type(&desc->irq_data);
+
                /*
                 * We're about to start this interrupt immediately,
                 * hence the need to set the trigger configuration.
@@ -828,8 +830,10 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
                 * chained interrupt. Reset it immediately because we
                 * do know better.
                 */
-               __irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data));
-               desc->handle_irq = handle;
+               if (type != IRQ_TYPE_NONE) {
+                       __irq_set_trigger(desc, type);
+                       desc->handle_irq = handle;
+               }
 
                irq_settings_set_noprobe(desc);
                irq_settings_set_norequest(desc);
index dade4c9..7bc5676 100644 (file)
@@ -5124,19 +5124,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        struct trace_iterator *iter = filp->private_data;
        ssize_t sret;
 
-       /* return any leftover data */
-       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
-       if (sret != -EBUSY)
-               return sret;
-
-       trace_seq_init(&iter->seq);
-
        /*
         * Avoid more than one consumer on a single file descriptor
         * This is just a matter of traces coherency, the ring buffer itself
         * is protected.
         */
        mutex_lock(&iter->mutex);
+
+       /* return any leftover data */
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (sret != -EBUSY)
+               goto out;
+
+       trace_seq_init(&iter->seq);
+
        if (iter->trace->read) {
                sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
                if (sret)
@@ -6163,9 +6164,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                return -EBUSY;
 #endif
 
-       if (splice_grow_spd(pipe, &spd))
-               return -ENOMEM;
-
        if (*ppos & (PAGE_SIZE - 1))
                return -EINVAL;
 
@@ -6175,6 +6173,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                len &= PAGE_MASK;
        }
 
+       if (splice_grow_spd(pipe, &spd))
+               return -ENOMEM;
+
  again:
        trace_access_lock(iter->cpu_file);
        entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
@@ -6232,19 +6233,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        /* did we read anything? */
        if (!spd.nr_pages) {
                if (ret)
-                       return ret;
+                       goto out;
 
+               ret = -EAGAIN;
                if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
-                       return -EAGAIN;
+                       goto out;
 
                ret = wait_on_pipe(iter, true);
                if (ret)
-                       return ret;
+                       goto out;
 
                goto again;
        }
 
        ret = splice_to_pipe(pipe, &spd);
+out:
        splice_shrink_spd(&spd);
 
        return ret;
index 2e2cca5..cab7405 100644 (file)
@@ -821,7 +821,7 @@ config DETECT_HUNG_TASK
        help
          Say Y here to enable the kernel to detect "hung tasks",
          which are bugs that cause the task to be stuck in
-         uninterruptible "D" state indefinitiley.
+         uninterruptible "D" state indefinitely.
 
          When a hung task is detected, the kernel will print the
          current stack trace (which you should report), but the
index 1b7bf73..91f0727 100644 (file)
@@ -105,10 +105,10 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent,
 
 #ifdef CONFIG_RADIX_TREE_MULTIORDER
        if (radix_tree_is_internal_node(entry)) {
-               unsigned long siboff = get_slot_offset(parent, entry);
-               if (siboff < RADIX_TREE_MAP_SIZE) {
-                       offset = siboff;
-                       entry = rcu_dereference_raw(parent->slots[offset]);
+               if (is_sibling_entry(parent, entry)) {
+                       void **sibentry = (void **) entry_to_node(entry);
+                       offset = get_slot_offset(parent, sibentry);
+                       entry = rcu_dereference_raw(*sibentry);
                }
        }
 #endif
index 8a287df..2d0986a 100644 (file)
  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
  */
 
+static int page_cache_tree_insert(struct address_space *mapping,
+                                 struct page *page, void **shadowp)
+{
+       struct radix_tree_node *node;
+       void **slot;
+       int error;
+
+       error = __radix_tree_create(&mapping->page_tree, page->index, 0,
+                                   &node, &slot);
+       if (error)
+               return error;
+       if (*slot) {
+               void *p;
+
+               p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+               if (!radix_tree_exceptional_entry(p))
+                       return -EEXIST;
+
+               mapping->nrexceptional--;
+               if (!dax_mapping(mapping)) {
+                       if (shadowp)
+                               *shadowp = p;
+                       if (node)
+                               workingset_node_shadows_dec(node);
+               } else {
+                       /* DAX can replace empty locked entry with a hole */
+                       WARN_ON_ONCE(p !=
+                               (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
+                                        RADIX_DAX_ENTRY_LOCK));
+                       /* DAX accounts exceptional entries as normal pages */
+                       if (node)
+                               workingset_node_pages_dec(node);
+                       /* Wakeup waiters for exceptional entry lock */
+                       dax_wake_mapping_entry_waiter(mapping, page->index,
+                                                     false);
+               }
+       }
+       radix_tree_replace_slot(slot, page);
+       mapping->nrpages++;
+       if (node) {
+               workingset_node_pages_inc(node);
+               /*
+                * Don't track node that contains actual pages.
+                *
+                * Avoid acquiring the list_lru lock if already
+                * untracked.  The list_empty() test is safe as
+                * node->private_list is protected by
+                * mapping->tree_lock.
+                */
+               if (!list_empty(&node->private_list))
+                       list_lru_del(&workingset_shadow_nodes,
+                                    &node->private_list);
+       }
+       return 0;
+}
+
 static void page_cache_tree_delete(struct address_space *mapping,
                                   struct page *page, void *shadow)
 {
@@ -561,7 +617,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 
                spin_lock_irqsave(&mapping->tree_lock, flags);
                __delete_from_page_cache(old, NULL);
-               error = radix_tree_insert(&mapping->page_tree, offset, new);
+               error = page_cache_tree_insert(mapping, new, NULL);
                BUG_ON(error);
                mapping->nrpages++;
 
@@ -584,62 +640,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL_GPL(replace_page_cache_page);
 
-static int page_cache_tree_insert(struct address_space *mapping,
-                                 struct page *page, void **shadowp)
-{
-       struct radix_tree_node *node;
-       void **slot;
-       int error;
-
-       error = __radix_tree_create(&mapping->page_tree, page->index, 0,
-                                   &node, &slot);
-       if (error)
-               return error;
-       if (*slot) {
-               void *p;
-
-               p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
-               if (!radix_tree_exceptional_entry(p))
-                       return -EEXIST;
-
-               mapping->nrexceptional--;
-               if (!dax_mapping(mapping)) {
-                       if (shadowp)
-                               *shadowp = p;
-                       if (node)
-                               workingset_node_shadows_dec(node);
-               } else {
-                       /* DAX can replace empty locked entry with a hole */
-                       WARN_ON_ONCE(p !=
-                               (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
-                                        RADIX_DAX_ENTRY_LOCK));
-                       /* DAX accounts exceptional entries as normal pages */
-                       if (node)
-                               workingset_node_pages_dec(node);
-                       /* Wakeup waiters for exceptional entry lock */
-                       dax_wake_mapping_entry_waiter(mapping, page->index,
-                                                     false);
-               }
-       }
-       radix_tree_replace_slot(slot, page);
-       mapping->nrpages++;
-       if (node) {
-               workingset_node_pages_inc(node);
-               /*
-                * Don't track node that contains actual pages.
-                *
-                * Avoid acquiring the list_lru lock if already
-                * untracked.  The list_empty() test is safe as
-                * node->private_list is protected by
-                * mapping->tree_lock.
-                */
-               if (!list_empty(&node->private_list))
-                       list_lru_del(&workingset_shadow_nodes,
-                                    &node->private_list);
-       }
-       return 0;
-}
-
 static int __add_to_page_cache_locked(struct page *page,
                                      struct address_space *mapping,
                                      pgoff_t offset, gfp_t gfp_mask,
index a6abd76..53ae6d0 100644 (file)
@@ -1138,9 +1138,6 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
        bool was_writable;
        int flags = 0;
 
-       /* A PROT_NONE fault should not end up here */
-       BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
-
        fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
        if (unlikely(!pmd_same(pmd, *fe->pmd)))
                goto out_unlock;
index 73d43ba..5048083 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void)
 {
        struct rmap_item *rmap_item;
 
-       rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
+       rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
+                                               __GFP_NORETRY | __GFP_NOWARN);
        if (rmap_item)
                ksm_rmap_items++;
        return rmap_item;
index 83be99d..793fe0f 100644 (file)
@@ -3351,9 +3351,6 @@ static int do_numa_page(struct fault_env *fe, pte_t pte)
        bool was_writable = pte_write(pte);
        int flags = 0;
 
-       /* A PROT_NONE fault should not end up here */
-       BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
-
        /*
        * The "pte" at this point cannot be used safely without
        * validation through pte_unmap_same(). It's of NUMA type but
@@ -3458,6 +3455,11 @@ static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd)
        return VM_FAULT_FALLBACK;
 }
 
+static inline bool vma_is_accessible(struct vm_area_struct *vma)
+{
+       return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
+}
+
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
@@ -3524,7 +3526,7 @@ static int handle_pte_fault(struct fault_env *fe)
        if (!pte_present(entry))
                return do_swap_page(fe, entry);
 
-       if (pte_protnone(entry))
+       if (pte_protnone(entry) && vma_is_accessible(fe->vma))
                return do_numa_page(fe, entry);
 
        fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd);
@@ -3590,7 +3592,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 
                barrier();
                if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
-                       if (pmd_protnone(orig_pmd))
+                       if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
                                return do_huge_pmd_numa_page(&fe, orig_pmd);
 
                        if ((fe.flags & FAULT_FLAG_WRITE) &&
index b58906b..9d29ba0 100644 (file)
@@ -1555,8 +1555,8 @@ static struct page *new_node_page(struct page *page, unsigned long private,
 {
        gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
        int nid = page_to_nid(page);
-       nodemask_t nmask = node_online_map;
-       struct page *new_page;
+       nodemask_t nmask = node_states[N_MEMORY];
+       struct page *new_page = NULL;
 
        /*
         * TODO: allocate a destination hugepage from a nearest neighbor node,
@@ -1567,14 +1567,14 @@ static struct page *new_node_page(struct page *page, unsigned long private,
                return alloc_huge_page_node(page_hstate(compound_head(page)),
                                        next_node_in(nid, nmask));
 
-       if (nid != next_node_in(nid, nmask))
-               node_clear(nid, nmask);
+       node_clear(nid, nmask);
 
        if (PageHighMem(page)
            || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
                gfp_mask |= __GFP_HIGHMEM;
 
-       new_page = __alloc_pages_nodemask(gfp_mask, 0,
+       if (!nodes_empty(nmask))
+               new_page = __alloc_pages_nodemask(gfp_mask, 0,
                                        node_zonelist(nid, gfp_mask), &nmask);
        if (!new_page)
                new_page = __alloc_pages(gfp_mask, 0,
index fd8b2b5..971fc83 100644 (file)
@@ -270,7 +270,7 @@ bool shmem_charge(struct inode *inode, long pages)
                info->alloced -= pages;
                shmem_recalc_inode(inode);
                spin_unlock_irqrestore(&info->lock, flags);
-
+               shmem_unacct_blocks(info->flags, pages);
                return false;
        }
        percpu_counter_add(&sbinfo->used_blocks, pages);
@@ -291,6 +291,7 @@ void shmem_uncharge(struct inode *inode, long pages)
 
        if (sbinfo->max_blocks)
                percpu_counter_sub(&sbinfo->used_blocks, pages);
+       shmem_unacct_blocks(info->flags, pages);
 }
 
 /*
@@ -1980,7 +1981,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
                                return addr;
                        sb = shm_mnt->mnt_sb;
                }
-               if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
+               if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
                        return addr;
        }
 
index b1e12a1..0fe8b71 100644 (file)
@@ -2303,23 +2303,6 @@ out:
        }
 }
 
-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-static void init_tlb_ubc(void)
-{
-       /*
-        * This deliberately does not clear the cpumask as it's expensive
-        * and unnecessary. If there happens to be data in there then the
-        * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
-        * then will be cleared.
-        */
-       current->tlb_ubc.flush_required = false;
-}
-#else
-static inline void init_tlb_ubc(void)
-{
-}
-#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
-
 /*
  * This is a basic per-node page freer.  Used by both kswapd and direct reclaim.
  */
@@ -2355,8 +2338,6 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
        scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
                         sc->priority == DEF_PRIORITY);
 
-       init_tlb_ubc();
-
        blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
index 69551cf..617475f 100644 (file)
@@ -418,21 +418,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
         * no pages, so we expect to be able to remove them all and
         * delete and free the empty node afterwards.
         */
-
-       BUG_ON(!node->count);
-       BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
+       BUG_ON(!workingset_node_shadows(node));
+       BUG_ON(workingset_node_pages(node));
 
        for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
                if (node->slots[i]) {
                        BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
                        node->slots[i] = NULL;
-                       BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
-                       node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
+                       workingset_node_shadows_dec(node);
                        BUG_ON(!mapping->nrexceptional);
                        mapping->nrexceptional--;
                }
        }
-       BUG_ON(node->count);
+       BUG_ON(workingset_node_shadows(node));
        inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
        if (!__radix_tree_delete_node(&mapping->page_tree, node))
                BUG();
index a87bcd2..5f006e1 100644 (file)
@@ -2123,7 +2123,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
 
 int ipmr_get_route(struct net *net, struct sk_buff *skb,
                   __be32 saddr, __be32 daddr,
-                  struct rtmsg *rtm, int nowait)
+                  struct rtmsg *rtm, int nowait, u32 portid)
 {
        struct mfc_cache *cache;
        struct mr_table *mrt;
@@ -2168,6 +2168,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
                        return -ENOMEM;
                }
 
+               NETLINK_CB(skb2).portid = portid;
                skb_push(skb2, sizeof(struct iphdr));
                skb_reset_network_header(skb2);
                iph = ip_hdr(skb2);
index 654a9af..f2be689 100644 (file)
@@ -2500,7 +2500,8 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src, u32 table_id,
                    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
                        int err = ipmr_get_route(net, skb,
                                                 fl4->saddr, fl4->daddr,
-                                                r, nowait);
+                                                r, nowait, portid);
+
                        if (err <= 0) {
                                if (!nowait) {
                                        if (err == 0)
index 8c6ad2d..a27b9c0 100644 (file)
@@ -2362,10 +2362,9 @@ static void DBGUNDO(struct sock *sk, const char *msg)
        }
 #if IS_ENABLED(CONFIG_IPV6)
        else if (sk->sk_family == AF_INET6) {
-               struct ipv6_pinfo *np = inet6_sk(sk);
                pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
                         msg,
-                        &np->daddr, ntohs(inet->inet_dport),
+                        &sk->sk_v6_daddr, ntohs(inet->inet_dport),
                         tp->snd_cwnd, tcp_left_out(tp),
                         tp->snd_ssthresh, tp->prior_ssthresh,
                         tp->packets_out);
index 7c77708..896e9df 100644 (file)
@@ -1992,12 +1992,14 @@ static int tcp_mtu_probe(struct sock *sk)
        len = 0;
        tcp_for_write_queue_from_safe(skb, next, sk) {
                copy = min_t(int, skb->len, probe_size - len);
-               if (nskb->ip_summed)
+               if (nskb->ip_summed) {
                        skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
-               else
-                       nskb->csum = skb_copy_and_csum_bits(skb, 0,
-                                                           skb_put(nskb, copy),
-                                                           copy, nskb->csum);
+               } else {
+                       __wsum csum = skb_copy_and_csum_bits(skb, 0,
+                                                            skb_put(nskb, copy),
+                                                            copy, 0);
+                       nskb->csum = csum_block_add(nskb->csum, csum, len);
+               }
 
                if (skb->len <= copy) {
                        /* We've eaten all the data from this skb.
index 4ce74f8..d7d6d3a 100644 (file)
@@ -648,7 +648,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-       fl6.flowi6_proto = skb->protocol;
 
        err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
        if (err)
index fccb5dd..7f4265b 100644 (file)
@@ -2285,8 +2285,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
        return 1;
 }
 
-int ip6mr_get_route(struct net *net,
-                   struct sk_buff *skb, struct rtmsg *rtm, int nowait)
+int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
+                   int nowait, u32 portid)
 {
        int err;
        struct mr6_table *mrt;
@@ -2331,6 +2331,7 @@ int ip6mr_get_route(struct net *net,
                        return -ENOMEM;
                }
 
+               NETLINK_CB(skb2).portid = portid;
                skb_reset_transport_header(skb2);
 
                skb_put(skb2, sizeof(struct ipv6hdr));
index 5a5aeb9..bdbc38e 100644 (file)
@@ -3216,7 +3216,9 @@ static int rt6_fill_node(struct net *net,
        if (iif) {
 #ifdef CONFIG_IPV6_MROUTE
                if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
-                       int err = ip6mr_get_route(net, skb, rtm, nowait);
+                       int err = ip6mr_get_route(net, skb, rtm, nowait,
+                                                 portid);
+
                        if (err <= 0) {
                                if (!nowait) {
                                        if (err == 0)
index ccf7b4b..95c463c 100644 (file)
@@ -53,7 +53,7 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
        u32 *tlv = (u32 *)(skbdata);
        u16 totlen = nla_total_size(dlen);      /*alignment + hdr */
        char *dptr = (char *)tlv + NLA_HDRLEN;
-       u32 htlv = attrtype << 16 | dlen;
+       u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN);
 
        *tlv = htonl(htlv);
        memset(dptr, 0, totlen - NLA_HDRLEN);
@@ -653,7 +653,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
        struct tcf_ife_info *ife = to_ife(a);
        int action = ife->tcf_action;
        struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data;
-       u16 ifehdrln = ifehdr->metalen;
+       int ifehdrln = (int)ifehdr->metalen;
        struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data);
 
        spin_lock(&ife->tcf_lock);
@@ -766,8 +766,6 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
                return TC_ACT_SHOT;
        }
 
-       iethh = eth_hdr(skb);
-
        err = skb_cow_head(skb, hdrm);
        if (unlikely(err)) {
                ife->tcf_qstats.drops++;
@@ -778,6 +776,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
        if (!(at & AT_EGRESS))
                skb_push(skb, skb->dev->hard_header_len);
 
+       iethh = (struct ethhdr *)skb->data;
        __skb_push(skb, hdrm);
        memcpy(skb->data, iethh, skb->mac_len);
        skb_reset_mac_header(skb);
index f27ffee..ca0516e 100644 (file)
@@ -1153,6 +1153,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
        if (!skb)
                return NULL;
 
+       qdisc_qstats_backlog_dec(sch, skb);
        sch->q.qlen--;
        qdisc_bstats_update(sch, skb);
 
@@ -1256,6 +1257,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        }
 
        bstats_update(&cl->bstats, skb);
+       qdisc_qstats_backlog_inc(sch, skb);
        ++sch->q.qlen;
 
        agg = cl->agg;
@@ -1476,6 +1478,7 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
                        qdisc_reset(cl->qdisc);
                }
        }
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
index add3cc7..20a350b 100644 (file)
@@ -400,6 +400,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 enqueue:
        ret = qdisc_enqueue(skb, child, to_free);
        if (likely(ret == NET_XMIT_SUCCESS)) {
+               qdisc_qstats_backlog_inc(sch, skb);
                sch->q.qlen++;
                increment_qlen(skb, q);
        } else if (net_xmit_drop_count(ret)) {
@@ -428,6 +429,7 @@ static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
 
        if (skb) {
                qdisc_bstats_update(sch, skb);
+               qdisc_qstats_backlog_dec(sch, skb);
                sch->q.qlen--;
                decrement_qlen(skb, q);
        }
@@ -450,6 +452,7 @@ static void sfb_reset(struct Qdisc *sch)
        struct sfb_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset(q->qdisc);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
        q->slot = 0;
        q->double_buffering = false;
index 8afe2e9..7a1cdf4 100644 (file)
@@ -192,6 +192,11 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
                         msg, msg->expires_at, jiffies);
        }
 
+       if (asoc->peer.prsctp_capable &&
+           SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))
+               msg->expires_at =
+                       jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);
+
        /* This is the biggest possible DATA chunk that can fit into
         * the packet
         */
@@ -349,7 +354,7 @@ errout:
 /* Check whether this message has expired. */
 int sctp_chunk_abandoned(struct sctp_chunk *chunk)
 {
-       if (!chunk->asoc->prsctp_enable ||
+       if (!chunk->asoc->peer.prsctp_capable ||
            !SCTP_PR_POLICY(chunk->sinfo.sinfo_flags)) {
                struct sctp_datamsg *msg = chunk->msg;
 
@@ -363,14 +368,14 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
        }
 
        if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
-           time_after(jiffies, chunk->prsctp_param)) {
+           time_after(jiffies, chunk->msg->expires_at)) {
                if (chunk->sent_count)
                        chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
                else
                        chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
                return 1;
        } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
-                  chunk->sent_count > chunk->prsctp_param) {
+                  chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
                chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
                return 1;
        }
index 3ec6da8..5825853 100644 (file)
@@ -304,7 +304,7 @@ void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
                         "illegal chunk");
 
                sctp_outq_tail_data(q, chunk);
-               if (chunk->asoc->prsctp_enable &&
+               if (chunk->asoc->peer.prsctp_capable &&
                    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
                        chunk->asoc->sent_cnt_removable++;
                if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
@@ -354,7 +354,7 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
 
        list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
                if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
-                   chk->prsctp_param <= sinfo->sinfo_timetolive)
+                   chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
                        continue;
 
                list_del_init(&chk->transmitted_list);
@@ -389,7 +389,7 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
 
        list_for_each_entry_safe(chk, temp, queue, list) {
                if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
-                   chk->prsctp_param <= sinfo->sinfo_timetolive)
+                   chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
                        continue;
 
                list_del_init(&chk->list);
@@ -413,7 +413,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
 {
        struct sctp_transport *transport;
 
-       if (!asoc->prsctp_enable || !asoc->sent_cnt_removable)
+       if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
                return;
 
        msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
@@ -1026,7 +1026,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 
                                /* Mark as failed send. */
                                sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
-                               if (asoc->prsctp_enable &&
+                               if (asoc->peer.prsctp_capable &&
                                    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
                                        asoc->sent_cnt_removable--;
                                sctp_chunk_free(chunk);
@@ -1319,7 +1319,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
                tsn = ntohl(tchunk->subh.data_hdr->tsn);
                if (TSN_lte(tsn, ctsn)) {
                        list_del_init(&tchunk->transmitted_list);
-                       if (asoc->prsctp_enable &&
+                       if (asoc->peer.prsctp_capable &&
                            SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
                                asoc->sent_cnt_removable--;
                        sctp_chunk_free(tchunk);
index 807158e..048954e 100644 (file)
@@ -276,28 +276,17 @@ out:
        return err;
 }
 
-static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
+static int sctp_sock_dump(struct sock *sk, void *p)
 {
-       struct sctp_endpoint *ep = tsp->asoc->ep;
+       struct sctp_endpoint *ep = sctp_sk(sk)->ep;
        struct sctp_comm_param *commp = p;
-       struct sock *sk = ep->base.sk;
        struct sk_buff *skb = commp->skb;
        struct netlink_callback *cb = commp->cb;
        const struct inet_diag_req_v2 *r = commp->r;
-       struct sctp_association *assoc =
-               list_entry(ep->asocs.next, struct sctp_association, asocs);
+       struct sctp_association *assoc;
        int err = 0;
 
-       /* find the ep only once through the transports by this condition */
-       if (tsp->asoc != assoc)
-               goto out;
-
-       if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
-               goto out;
-
        lock_sock(sk);
-       if (sk != assoc->base.sk)
-               goto release;
        list_for_each_entry(assoc, &ep->asocs, asocs) {
                if (cb->args[4] < cb->args[1])
                        goto next;
@@ -317,7 +306,7 @@ static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
                                        NLM_F_MULTI, cb->nlh,
                                        commp->net_admin) < 0) {
                        cb->args[3] = 1;
-                       err = 2;
+                       err = 1;
                        goto release;
                }
                cb->args[3] = 1;
@@ -327,7 +316,7 @@ static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
                                        NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, 0, cb->nlh,
                                        commp->net_admin) < 0) {
-                       err = 2;
+                       err = 1;
                        goto release;
                }
 next:
@@ -339,10 +328,35 @@ next:
        cb->args[4] = 0;
 release:
        release_sock(sk);
+       sock_put(sk);
        return err;
+}
+
+static int sctp_get_sock(struct sctp_transport *tsp, void *p)
+{
+       struct sctp_endpoint *ep = tsp->asoc->ep;
+       struct sctp_comm_param *commp = p;
+       struct sock *sk = ep->base.sk;
+       struct netlink_callback *cb = commp->cb;
+       const struct inet_diag_req_v2 *r = commp->r;
+       struct sctp_association *assoc =
+               list_entry(ep->asocs.next, struct sctp_association, asocs);
+
+       /* find the ep only once through the transports by this condition */
+       if (tsp->asoc != assoc)
+               goto out;
+
+       if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
+               goto out;
+
+       sock_hold(sk);
+       cb->args[5] = (long)sk;
+
+       return 1;
+
 out:
        cb->args[2]++;
-       return err;
+       return 0;
 }
 
 static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
@@ -480,10 +494,18 @@ skip:
         * 2 : to record the transport pos of this time's traversal
         * 3 : to mark if we have dumped the ep info of the current asoc
         * 4 : to work as a temporary variable to traversal list
+        * 5 : to save the sk we get from travelsing the tsp list.
         */
        if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
                goto done;
-       sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
+
+next:
+       cb->args[5] = 0;
+       sctp_for_each_transport(sctp_get_sock, net, cb->args[2], &commp);
+
+       if (cb->args[5] && !sctp_sock_dump((struct sock *)cb->args[5], &commp))
+               goto next;
+
 done:
        cb->args[1] = cb->args[4];
        cb->args[4] = 0;
index 79dd660..9e9690b 100644 (file)
@@ -706,20 +706,6 @@ nodata:
        return retval;
 }
 
-static void sctp_set_prsctp_policy(struct sctp_chunk *chunk,
-                                  const struct sctp_sndrcvinfo *sinfo)
-{
-       if (!chunk->asoc->prsctp_enable)
-               return;
-
-       if (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))
-               chunk->prsctp_param =
-                       jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);
-       else if (SCTP_PR_RTX_ENABLED(sinfo->sinfo_flags) ||
-                SCTP_PR_PRIO_ENABLED(sinfo->sinfo_flags))
-               chunk->prsctp_param = sinfo->sinfo_timetolive;
-}
-
 /* Make a DATA chunk for the given association from the provided
  * parameters.  However, do not populate the data payload.
  */
@@ -753,7 +739,6 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
 
        retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
        memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
-       sctp_set_prsctp_policy(retval, sinfo);
 
 nodata:
        return retval;
index 6cdc61c..fb02c70 100644 (file)
@@ -4473,17 +4473,21 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
                                  const union sctp_addr *paddr, void *p)
 {
        struct sctp_transport *transport;
-       int err = 0;
+       int err = -ENOENT;
 
        rcu_read_lock();
        transport = sctp_addrs_lookup_transport(net, laddr, paddr);
        if (!transport || !sctp_transport_hold(transport))
                goto out;
-       err = cb(transport, p);
+
+       sctp_association_hold(transport->asoc);
        sctp_transport_put(transport);
 
-out:
        rcu_read_unlock();
+       err = cb(transport, p);
+       sctp_association_put(transport->asoc);
+
+out:
        return err;
 }
 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
index 17dbbe6..8a398b3 100644 (file)
@@ -465,6 +465,8 @@ void vsock_pending_work(struct work_struct *work)
 
        if (vsock_is_pending(sk)) {
                vsock_remove_pending(listener, sk);
+
+               listener->sk_ack_backlog--;
        } else if (!vsk->rejected) {
                /* We are not on the pending list and accept() did not reject
                 * us, so we must have been accepted by our user process.  We
@@ -475,8 +477,6 @@ void vsock_pending_work(struct work_struct *work)
                goto out;
        }
 
-       listener->sk_ack_backlog--;
-
        /* We need to remove ourself from the global connected sockets list so
         * incoming packets can't find this socket, and to reduce the reference
         * count.
@@ -2010,5 +2010,5 @@ EXPORT_SYMBOL_GPL(vsock_core_get_transport);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Virtual Socket Family");
-MODULE_VERSION("1.0.1.0-k");
+MODULE_VERSION("1.0.2.0-k");
 MODULE_LICENSE("GPL v2");
index 42396a7..a68f031 100644 (file)
@@ -363,6 +363,7 @@ is_mcounted_section_name(char const *const txtname)
                strcmp(".sched.text",    txtname) == 0 ||
                strcmp(".spinlock.text", txtname) == 0 ||
                strcmp(".irqentry.text", txtname) == 0 ||
+               strcmp(".softirqentry.text", txtname) == 0 ||
                strcmp(".kprobes.text", txtname) == 0 ||
                strcmp(".text.unlikely", txtname) == 0;
 }
index 96e2486..2d48011 100755 (executable)
@@ -134,6 +134,7 @@ my %text_sections = (
      ".sched.text" => 1,
      ".spinlock.text" => 1,
      ".irqentry.text" => 1,
+     ".softirqentry.text" => 1,
      ".kprobes.text" => 1,
      ".text.unlikely" => 1,
 );
index 5adbfc3..17a0610 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/rcupdate.h>
 #include <linux/scatterlist.h>
 #include <linux/ctype.h>
+#include <crypto/aes.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
 #include <crypto/skcipher.h>
@@ -478,6 +479,7 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        struct crypto_skcipher *tfm;
        struct skcipher_request *req;
        unsigned int encrypted_datalen;
+       u8 iv[AES_BLOCK_SIZE];
        unsigned int padlen;
        char pad[16];
        int ret;
@@ -500,8 +502,8 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        sg_init_table(sg_out, 1);
        sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
 
-       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen,
-                                  epayload->iv);
+       memcpy(iv, epayload->iv, sizeof(iv));
+       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
        ret = crypto_skcipher_encrypt(req);
        tfm = crypto_skcipher_reqtfm(req);
        skcipher_request_free(req);
@@ -581,6 +583,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
        struct crypto_skcipher *tfm;
        struct skcipher_request *req;
        unsigned int encrypted_datalen;
+       u8 iv[AES_BLOCK_SIZE];
        char pad[16];
        int ret;
 
@@ -599,8 +602,8 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
                   epayload->decrypted_datalen);
        sg_set_buf(&sg_out[1], pad, sizeof pad);
 
-       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen,
-                                  epayload->iv);
+       memcpy(iv, epayload->iv, sizeof(iv));
+       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
        ret = crypto_skcipher_decrypt(req);
        tfm = crypto_skcipher_reqtfm(req);
        skcipher_request_free(req);
index dd48f42..f64c57b 100644 (file)
@@ -603,7 +603,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
                        return -ENOMEM;
                sprintf(t->label[i], "label%d", i);
 
-               t->flush[i] = test_alloc(t, sizeof(u64) * NUM_HINTS,
+               t->flush[i] = test_alloc(t, max(PAGE_SIZE,
+                                       sizeof(u64) * NUM_HINTS),
                                &t->flush_dma[i]);
                if (!t->flush[i])
                        return -ENOMEM;
index 3b53046..9d0919e 100644 (file)
@@ -1,5 +1,5 @@
 
-CFLAGS += -I. -g -Wall -D_LGPL_SOURCE
+CFLAGS += -I. -g -O2 -Wall -D_LGPL_SOURCE
 LDFLAGS += -lpthread -lurcu
 TARGETS = main
 OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
index 39d9b95..05d7bc4 100644 (file)
@@ -124,6 +124,8 @@ static void multiorder_check(unsigned long index, int order)
        unsigned long i;
        unsigned long min = index & ~((1UL << order) - 1);
        unsigned long max = min + (1UL << order);
+       void **slot;
+       struct item *item2 = item_create(min);
        RADIX_TREE(tree, GFP_KERNEL);
 
        printf("Multiorder index %ld, order %d\n", index, order);
@@ -139,13 +141,19 @@ static void multiorder_check(unsigned long index, int order)
                item_check_absent(&tree, i);
        for (i = max; i < 2*max; i++)
                item_check_absent(&tree, i);
+       for (i = min; i < max; i++)
+               assert(radix_tree_insert(&tree, i, item2) == -EEXIST);
+
+       slot = radix_tree_lookup_slot(&tree, index);
+       free(*slot);
+       radix_tree_replace_slot(slot, item2);
        for (i = min; i < max; i++) {
-               static void *entry = (void *)
-                                       (0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY);
-               assert(radix_tree_insert(&tree, i, entry) == -EEXIST);
+               struct item *item = item_lookup(&tree, i);
+               assert(item != 0);
+               assert(item->index == min);
        }
 
-       assert(item_delete(&tree, index) != 0);
+       assert(item_delete(&tree, min) != 0);
 
        for (i = 0; i < 2*max; i++)
                item_check_absent(&tree, i);