Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 2 Oct 2016 17:53:38 +0000 (10:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 2 Oct 2016 17:53:38 +0000 (10:53 -0700)
Pull MIPS fixes from Ralf Baechle:
 "Another round of fixes:

   - CM: Fix mips_cm_max_vp_width for non-MT kernels on MT systems
   - CPS: Avoid BUG() when offlining pre-r6 CPUs
   - DEC: Avoid gas warnings due to suspicious instruction scheduling by
     manually expanding assembler macros.
   - FTLB: Fix configuration by moving confiuguratoin after probing
   - FTLB: clear execution hazard after changing FTLB enable
   - Highmem: Fix detection of unsupported highmem with cache aliases
   - I6400: Don't touch FTLBP chicken bits
   - microMIPS: Fix BUILD_ROLLBACK_PROLOGUE
   - Malta: Fix IOCU disable switch read for MIPS64
   - Octeon: Fix probing of devices attached to GPIO lines
   - uprobes: Misc small fixes"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: CM: Fix mips_cm_max_vp_width for non-MT kernels on MT systems
  MIPS: Fix detection of unsupported highmem with cache aliases
  MIPS: Malta: Fix IOCU disable switch read for MIPS64
  MIPS: Fix BUILD_ROLLBACK_PROLOGUE for microMIPS
  MIPS: clear execution hazard after changing FTLB enable
  MIPS: Configure FTLB after probing TLB sizes from config4
  MIPS: Stop setting I6400 FTLBP
  MIPS: DEC: Avoid la pseudo-instruction in delay slots
  MIPS: Octeon: mark GPIO controller node not populated after IRQ init.
  MIPS: uprobes: fix use of uninitialised variable
  MIPS: uprobes: remove incorrect set_orig_insn
  MIPS: fix uretprobe implementation
  MIPS: smp-cps: Avoid BUG() when offlining pre-r6 CPUs

68 files changed:
.mailmap
MAINTAINERS
arch/sparc/include/asm/page_64.h
arch/sparc/include/asm/smp_64.h
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/smp_64.c
arch/sparc/mm/fault_64.c
arch/sparc/mm/init_64.c
arch/sparc/mm/tlb.c
arch/sparc/mm/tsb.c
drivers/acpi/nfit/core.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/udl/udl_fb.c
drivers/input/joydev.c
drivers/mtd/nand/davinci_nand.c
drivers/mtd/nand/omap2.c
drivers/net/can/dev.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/freescale/fec_main.c
drivers/nvdimm/core.c
drivers/nvdimm/nd.h
drivers/nvdimm/region_devs.c
drivers/scsi/hosts.c
drivers/scsi/scsi.c
drivers/scsi/scsi_priv.h
fs/ocfs2/aops.c
include/linux/can/dev.h
include/linux/dma-mapping.h
include/linux/mroute.h
include/linux/mroute6.h
include/linux/property.h
include/linux/swap.h
include/net/sctp/structs.h
include/scsi/scsi_host.h
kernel/cgroup.c
kernel/cpuset.c
mm/filemap.c
mm/ksm.c
mm/memory_hotplug.c
mm/workingset.c
net/ipv4/ipmr.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/ip6_gre.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/sched/act_ife.c
net/sched/sch_qfq.c
net/sched/sch_sfb.c
net/sctp/chunk.c
net/sctp/outqueue.c
net/sctp/sctp_diag.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/vmw_vsock/af_vsock.c
scripts/recordmcount.c
scripts/recordmcount.pl
tools/testing/nvdimm/test/nfit.c

index de22dae..1dab0a1 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -69,6 +69,7 @@ James Bottomley <jejb@mulgrave.(none)>
 James Bottomley <jejb@titanic.il.steeleye.com>
 James E Wilson <wilson@specifix.com>
 James Ketrenos <jketreno@io.(none)>
+Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
 <javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
 Jean Tourrilhes <jt@hpl.hp.com>
 Jeff Garzik <jgarzik@pretzel.yyz.us>
index 01bff8e..f593300 100644 (file)
@@ -8745,7 +8745,7 @@ F:        drivers/oprofile/
 F:     include/linux/oprofile.h
 
 ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
-M:     Mark Fasheh <mfasheh@suse.com>
+M:     Mark Fasheh <mfasheh@versity.com>
 M:     Joel Becker <jlbec@evilplan.org>
 L:     ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
 W:     http://ocfs2.wiki.kernel.org
@@ -11626,7 +11626,7 @@ F:      Documentation/devicetree/bindings/thermal/
 THERMAL/CPU_COOLING
 M:     Amit Daniel Kachhap <amit.kachhap@gmail.com>
 M:     Viresh Kumar <viresh.kumar@linaro.org>
-M:     Javi Merino <javi.merino@arm.com>
+M:     Javi Merino <javi.merino@kernel.org>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/thermal/cpu-cooling-api.txt
index 8c2a8c9..c1263fc 100644 (file)
@@ -25,6 +25,7 @@
 #define HPAGE_MASK             (~(HPAGE_SIZE - 1UL))
 #define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#define REAL_HPAGE_PER_HPAGE   (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
 #endif
 
 #ifndef __ASSEMBLY__
index 26d9e77..ce2233f 100644 (file)
@@ -43,6 +43,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 int hard_smp_processor_id(void);
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
+void smp_fill_in_cpu_possible_map(void);
 void smp_fill_in_sib_core_maps(void);
 void cpu_play_dead(void);
 
@@ -72,6 +73,7 @@ void __cpu_die(unsigned int cpu);
 #define smp_fill_in_sib_core_maps() do { } while (0)
 #define smp_fetch_global_regs() do { } while (0)
 #define smp_fetch_global_pmu() do { } while (0)
+#define smp_fill_in_cpu_possible_map() do { } while (0)
 
 #endif /* !(CONFIG_SMP) */
 
index 599f120..6b7331d 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/initrd.h>
 #include <linux/module.h>
 #include <linux/start_kernel.h>
+#include <linux/bootmem.h>
 
 #include <asm/io.h>
 #include <asm/processor.h>
@@ -50,6 +51,8 @@
 #include <asm/elf.h>
 #include <asm/mdesc.h>
 #include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
 
 #ifdef CONFIG_IP_PNP
 #include <net/ipconfig.h>
@@ -590,6 +593,22 @@ static void __init init_sparc64_elf_hwcap(void)
                pause_patch();
 }
 
+void __init alloc_irqstack_bootmem(void)
+{
+       unsigned int i, node;
+
+       for_each_possible_cpu(i) {
+               node = cpu_to_node(i);
+
+               softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
+                                                       THREAD_SIZE,
+                                                       THREAD_SIZE, 0);
+               hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
+                                                       THREAD_SIZE,
+                                                       THREAD_SIZE, 0);
+       }
+}
+
 void __init setup_arch(char **cmdline_p)
 {
        /* Initialize PROM console and command line. */
@@ -650,6 +669,13 @@ void __init setup_arch(char **cmdline_p)
 
        paging_init();
        init_sparc64_elf_hwcap();
+       smp_fill_in_cpu_possible_map();
+       /*
+        * Once the OF device tree and MDESC have been setup and nr_cpus has
+        * been parsed, we know the list of possible cpus.  Therefore we can
+        * allocate the IRQ stacks.
+        */
+       alloc_irqstack_bootmem();
 }
 
 extern int stop_a_enabled;
index 8a6151a..d3035ba 100644 (file)
@@ -1227,6 +1227,20 @@ void __init smp_setup_processor_id(void)
                xcall_deliver_impl = hypervisor_xcall_deliver;
 }
 
+void __init smp_fill_in_cpu_possible_map(void)
+{
+       int possible_cpus = num_possible_cpus();
+       int i;
+
+       if (possible_cpus > nr_cpu_ids)
+               possible_cpus = nr_cpu_ids;
+
+       for (i = 0; i < possible_cpus; i++)
+               set_cpu_possible(i, true);
+       for (; i < NR_CPUS; i++)
+               set_cpu_possible(i, false);
+}
+
 void smp_fill_in_sib_core_maps(void)
 {
        unsigned int i;
index e16fdd2..3f291d8 100644 (file)
@@ -484,6 +484,7 @@ good_area:
                tsb_grow(mm, MM_TSB_BASE, mm_rss);
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
        mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
+       mm_rss *= REAL_HPAGE_PER_HPAGE;
        if (unlikely(mm_rss >
                     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
                if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
index 65457c9..7ac6b62 100644 (file)
@@ -1160,7 +1160,7 @@ int __node_distance(int from, int to)
        return numa_latency[from][to];
 }
 
-static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
+static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
 {
        int i;
 
@@ -1173,8 +1173,8 @@ static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
        return i;
 }
 
-static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp,
-                                         int index)
+static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
+                                                u64 grp, int index)
 {
        u64 arc;
 
@@ -2081,7 +2081,6 @@ void __init paging_init(void)
 {
        unsigned long end_pfn, shift, phys_base;
        unsigned long real_end, i;
-       int node;
 
        setup_page_offset();
 
@@ -2250,21 +2249,6 @@ void __init paging_init(void)
        /* Setup bootmem... */
        last_valid_pfn = end_pfn = bootmem_init(phys_base);
 
-       /* Once the OF device tree and MDESC have been setup, we know
-        * the list of possible cpus.  Therefore we can allocate the
-        * IRQ stacks.
-        */
-       for_each_possible_cpu(i) {
-               node = cpu_to_node(i);
-
-               softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
-                                                       THREAD_SIZE,
-                                                       THREAD_SIZE, 0);
-               hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
-                                                       THREAD_SIZE,
-                                                       THREAD_SIZE, 0);
-       }
-
        kernel_physical_mapping_init();
 
        {
index 3659d37..c56a195 100644 (file)
@@ -174,10 +174,25 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                return;
 
        if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
-               if (pmd_val(pmd) & _PAGE_PMD_HUGE)
-                       mm->context.thp_pte_count++;
-               else
-                       mm->context.thp_pte_count--;
+               /*
+                * Note that this routine only sets pmds for THP pages.
+                * Hugetlb pages are handled elsewhere.  We need to check
+                * for huge zero page.  Huge zero pages are like hugetlb
+                * pages in that there is no RSS, but there is the need
+                * for TSB entries.  So, huge zero page counts go into
+                * hugetlb_pte_count.
+                */
+               if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
+                       if (is_huge_zero_page(pmd_page(pmd)))
+                               mm->context.hugetlb_pte_count++;
+                       else
+                               mm->context.thp_pte_count++;
+               } else {
+                       if (is_huge_zero_page(pmd_page(orig)))
+                               mm->context.hugetlb_pte_count--;
+                       else
+                               mm->context.thp_pte_count--;
+               }
 
                /* Do not try to allocate the TSB hash table if we
                 * don't have one already.  We have various locks held
@@ -204,6 +219,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
        }
 }
 
+/*
+ * This routine is only called when splitting a THP
+ */
 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                     pmd_t *pmdp)
 {
@@ -213,6 +231,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 
        set_pmd_at(vma->vm_mm, address, pmdp, entry);
        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+       /*
+        * set_pmd_at() will not be called in a way to decrement
+        * thp_pte_count when splitting a THP, so do it now.
+        * Sanity check pmd before doing the actual decrement.
+        */
+       if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
+           !is_huge_zero_page(pmd_page(entry)))
+               (vma->vm_mm)->context.thp_pte_count--;
 }
 
 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
index 6725ed4..f2b7711 100644 (file)
@@ -469,8 +469,10 @@ retry_tsb_alloc:
 
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
+       unsigned long mm_rss = get_mm_rss(mm);
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       unsigned long total_huge_pte_count;
+       unsigned long saved_hugetlb_pte_count;
+       unsigned long saved_thp_pte_count;
 #endif
        unsigned int i;
 
@@ -483,10 +485,12 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
         * will re-increment the counters as the parent PTEs are
         * copied into the child address space.
         */
-       total_huge_pte_count = mm->context.hugetlb_pte_count +
-                        mm->context.thp_pte_count;
+       saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
+       saved_thp_pte_count = mm->context.thp_pte_count;
        mm->context.hugetlb_pte_count = 0;
        mm->context.thp_pte_count = 0;
+
+       mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
 #endif
 
        /* copy_mm() copies over the parent's mm_struct before calling
@@ -499,11 +503,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
        /* If this is fork, inherit the parent's TSB size.  We would
         * grow it to that size on the first page fault anyways.
         */
-       tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
+       tsb_grow(mm, MM_TSB_BASE, mm_rss);
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       if (unlikely(total_huge_pte_count))
-               tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
+       if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
+               tsb_grow(mm, MM_TSB_HUGE,
+                        (saved_hugetlb_pte_count + saved_thp_pte_count) *
+                        REAL_HPAGE_PER_HPAGE);
 #endif
 
        if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
index 80cc7c0..e1d5ea6 100644 (file)
@@ -94,54 +94,50 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
        return to_acpi_device(acpi_desc->dev);
 }
 
-static int xlat_status(void *buf, unsigned int cmd)
+static int xlat_status(void *buf, unsigned int cmd, u32 status)
 {
        struct nd_cmd_clear_error *clear_err;
        struct nd_cmd_ars_status *ars_status;
-       struct nd_cmd_ars_start *ars_start;
-       struct nd_cmd_ars_cap *ars_cap;
        u16 flags;
 
        switch (cmd) {
        case ND_CMD_ARS_CAP:
-               ars_cap = buf;
-               if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
+               if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
                        return -ENOTTY;
 
                /* Command failed */
-               if (ars_cap->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
 
                /* No supported scan types for this range */
                flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
-               if ((ars_cap->status >> 16 & flags) == 0)
+               if ((status >> 16 & flags) == 0)
                        return -ENOTTY;
                break;
        case ND_CMD_ARS_START:
-               ars_start = buf;
                /* ARS is in progress */
-               if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
+               if ((status & 0xffff) == NFIT_ARS_START_BUSY)
                        return -EBUSY;
 
                /* Command failed */
-               if (ars_start->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
                break;
        case ND_CMD_ARS_STATUS:
                ars_status = buf;
                /* Command failed */
-               if (ars_status->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
                /* Check extended status (Upper two bytes) */
-               if (ars_status->status == NFIT_ARS_STATUS_DONE)
+               if (status == NFIT_ARS_STATUS_DONE)
                        return 0;
 
                /* ARS is in progress */
-               if (ars_status->status == NFIT_ARS_STATUS_BUSY)
+               if (status == NFIT_ARS_STATUS_BUSY)
                        return -EBUSY;
 
                /* No ARS performed for the current boot */
-               if (ars_status->status == NFIT_ARS_STATUS_NONE)
+               if (status == NFIT_ARS_STATUS_NONE)
                        return -EAGAIN;
 
                /*
@@ -149,19 +145,19 @@ static int xlat_status(void *buf, unsigned int cmd)
                 * agent wants the scan to stop.  If we didn't overflow
                 * then just continue with the returned results.
                 */
-               if (ars_status->status == NFIT_ARS_STATUS_INTR) {
+               if (status == NFIT_ARS_STATUS_INTR) {
                        if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
                                return -ENOSPC;
                        return 0;
                }
 
                /* Unknown status */
-               if (ars_status->status >> 16)
+               if (status >> 16)
                        return -EIO;
                break;
        case ND_CMD_CLEAR_ERROR:
                clear_err = buf;
-               if (clear_err->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
                if (!clear_err->cleared)
                        return -EIO;
@@ -172,6 +168,9 @@ static int xlat_status(void *buf, unsigned int cmd)
                break;
        }
 
+       /* all other non-zero status results in an error */
+       if (status)
+               return -EIO;
        return 0;
 }
 
@@ -186,10 +185,10 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
        struct nd_cmd_pkg *call_pkg = NULL;
        const char *cmd_name, *dimm_name;
        unsigned long cmd_mask, dsm_mask;
+       u32 offset, fw_status = 0;
        acpi_handle handle;
        unsigned int func;
        const u8 *uuid;
-       u32 offset;
        int rc, i;
 
        func = cmd;
@@ -317,6 +316,15 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
                                out_obj->buffer.pointer + offset, out_size);
                offset += out_size;
        }
+
+       /*
+        * Set fw_status for all the commands with a known format to be
+        * later interpreted by xlat_status().
+        */
+       if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
+                       || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
+               fw_status = *(u32 *) out_obj->buffer.pointer;
+
        if (offset + in_buf.buffer.length < buf_len) {
                if (i >= 1) {
                        /*
@@ -325,7 +333,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
                         */
                        rc = buf_len - offset - in_buf.buffer.length;
                        if (cmd_rc)
-                               *cmd_rc = xlat_status(buf, cmd);
+                               *cmd_rc = xlat_status(buf, cmd, fw_status);
                } else {
                        dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
                                        __func__, dimm_name, cmd_name, buf_len,
@@ -335,7 +343,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
        } else {
                rc = 0;
                if (cmd_rc)
-                       *cmd_rc = xlat_status(buf, cmd);
+                       *cmd_rc = xlat_status(buf, cmd, fw_status);
        }
 
  out:
index df7ab24..39c01b9 100644 (file)
@@ -1708,11 +1708,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 
        DRM_INFO("amdgpu: finishing device.\n");
        adev->shutdown = true;
+       drm_crtc_force_disable_all(adev->ddev);
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
        amdgpu_ib_pool_fini(adev);
        amdgpu_fence_driver_fini(adev);
-       drm_crtc_force_disable_all(adev->ddev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_fini(adev);
        kfree(adev->ip_block_status);
index 7ea8aa7..6bc712f 100644 (file)
@@ -175,6 +175,7 @@ struct nvkm_device_func {
        void (*fini)(struct nvkm_device *, bool suspend);
        resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
        resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+       bool cpu_coherent;
 };
 
 struct nvkm_device_quirk {
index 6190035..864323b 100644 (file)
@@ -209,7 +209,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
        nvbo->tile_flags = tile_flags;
        nvbo->bo.bdev = &drm->ttm.bdev;
 
-       nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+       if (!nvxx_device(&drm->device)->func->cpu_coherent)
+               nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
 
        nvbo->page_shift = 12;
        if (drm->client.vm) {
index b1b6932..62ad030 100644 (file)
@@ -1614,6 +1614,7 @@ nvkm_device_pci_func = {
        .fini = nvkm_device_pci_fini,
        .resource_addr = nvkm_device_pci_resource_addr,
        .resource_size = nvkm_device_pci_resource_size,
+       .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
 };
 
 int
index 939682f..9b638bd 100644 (file)
@@ -245,6 +245,7 @@ nvkm_device_tegra_func = {
        .fini = nvkm_device_tegra_fini,
        .resource_addr = nvkm_device_tegra_resource_addr,
        .resource_size = nvkm_device_tegra_resource_size,
+       .cpu_coherent = false,
 };
 
 int
index edec30f..0a7b6ed 100644 (file)
@@ -37,7 +37,10 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
 {
        struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
        struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+
+       mutex_lock(&chan->fifo->base.engine.subdev.mutex);
        nvkm_ramht_remove(imem->ramht, cookie);
+       mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
 }
 
 static int
index e6abc09..1f78ec2 100644 (file)
@@ -3015,6 +3015,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
        if (rdev->pdev->device == 0x6811 &&
            rdev->pdev->revision == 0x81)
                max_mclk = 120000;
+       /* limit sclk/mclk on Jet parts for stability */
+       if (rdev->pdev->device == 0x6665 &&
+           rdev->pdev->revision == 0xc3) {
+               max_sclk = 75000;
+               max_mclk = 80000;
+       }
 
        if (rps->vce_active) {
                rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
index 9688bfa..611b6b9 100644 (file)
@@ -122,7 +122,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
                return 0;
        cmd = urb->transfer_buffer;
 
-       for (i = y; i < height ; i++) {
+       for (i = y; i < y + height ; i++) {
                const int line_offset = fb->base.pitches[0] * i;
                const int byte_offset = line_offset + (x * bpp);
                const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
index 5d11fea..f3135ae 100644 (file)
@@ -946,6 +946,12 @@ static const struct input_device_id joydev_ids[] = {
                .evbit = { BIT_MASK(EV_ABS) },
                .absbit = { BIT_MASK(ABS_X) },
        },
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+                               INPUT_DEVICE_ID_MATCH_ABSBIT,
+               .evbit = { BIT_MASK(EV_ABS) },
+               .absbit = { BIT_MASK(ABS_Z) },
+       },
        {
                .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
                                INPUT_DEVICE_ID_MATCH_ABSBIT,
index cc07ba0..27fa8b8 100644 (file)
@@ -240,6 +240,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
        unsigned long flags;
        u32 val;
 
+       /* Reset ECC hardware */
+       davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+
        spin_lock_irqsave(&davinci_nand_lock, flags);
 
        /* Start 4-bit ECC calculation for read/write */
index a59361c..5513bfd 100644 (file)
@@ -2169,7 +2169,7 @@ scan_tail:
        return 0;
 
 return_error:
-       if (info->dma)
+       if (!IS_ERR_OR_NULL(info->dma))
                dma_release_channel(info->dma);
        if (nand_chip->ecc.priv) {
                nand_bch_free(nand_chip->ecc.priv);
index e21f7cc..8d6208c 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 #include <linux/if_arp.h>
+#include <linux/workqueue.h>
 #include <linux/can.h>
 #include <linux/can/dev.h>
 #include <linux/can/skb.h>
@@ -501,9 +502,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
 /*
  * CAN device restart for bus-off recovery
  */
-static void can_restart(unsigned long data)
+static void can_restart(struct net_device *dev)
 {
-       struct net_device *dev = (struct net_device *)data;
        struct can_priv *priv = netdev_priv(dev);
        struct net_device_stats *stats = &dev->stats;
        struct sk_buff *skb;
@@ -543,6 +543,14 @@ restart:
                netdev_err(dev, "Error %d during restart", err);
 }
 
+static void can_restart_work(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct can_priv *priv = container_of(dwork, struct can_priv, restart_work);
+
+       can_restart(priv->dev);
+}
+
 int can_restart_now(struct net_device *dev)
 {
        struct can_priv *priv = netdev_priv(dev);
@@ -556,8 +564,8 @@ int can_restart_now(struct net_device *dev)
        if (priv->state != CAN_STATE_BUS_OFF)
                return -EBUSY;
 
-       /* Runs as soon as possible in the timer context */
-       mod_timer(&priv->restart_timer, jiffies);
+       cancel_delayed_work_sync(&priv->restart_work);
+       can_restart(dev);
 
        return 0;
 }
@@ -578,8 +586,8 @@ void can_bus_off(struct net_device *dev)
        netif_carrier_off(dev);
 
        if (priv->restart_ms)
-               mod_timer(&priv->restart_timer,
-                         jiffies + (priv->restart_ms * HZ) / 1000);
+               schedule_delayed_work(&priv->restart_work,
+                                     msecs_to_jiffies(priv->restart_ms));
 }
 EXPORT_SYMBOL_GPL(can_bus_off);
 
@@ -688,6 +696,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
                return NULL;
 
        priv = netdev_priv(dev);
+       priv->dev = dev;
 
        if (echo_skb_max) {
                priv->echo_skb_max = echo_skb_max;
@@ -697,7 +706,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
 
        priv->state = CAN_STATE_STOPPED;
 
-       init_timer(&priv->restart_timer);
+       INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
 
        return dev;
 }
@@ -778,8 +787,6 @@ int open_candev(struct net_device *dev)
        if (!netif_carrier_ok(dev))
                netif_carrier_on(dev);
 
-       setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(open_candev);
@@ -794,7 +801,7 @@ void close_candev(struct net_device *dev)
 {
        struct can_priv *priv = netdev_priv(dev);
 
-       del_timer_sync(&priv->restart_timer);
+       cancel_delayed_work_sync(&priv->restart_work);
        can_flush_echo_skb(dev);
 }
 EXPORT_SYMBOL_GPL(close_candev);
index 8d4f849..5414563 100644 (file)
@@ -453,25 +453,29 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
 static int bcmgenet_get_settings(struct net_device *dev,
                                 struct ethtool_cmd *cmd)
 {
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
        if (!netif_running(dev))
                return -EINVAL;
 
-       if (!dev->phydev)
+       if (!priv->phydev)
                return -ENODEV;
 
-       return phy_ethtool_gset(dev->phydev, cmd);
+       return phy_ethtool_gset(priv->phydev, cmd);
 }
 
 static int bcmgenet_set_settings(struct net_device *dev,
                                 struct ethtool_cmd *cmd)
 {
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
        if (!netif_running(dev))
                return -EINVAL;
 
-       if (!dev->phydev)
+       if (!priv->phydev)
                return -ENODEV;
 
-       return phy_ethtool_sset(dev->phydev, cmd);
+       return phy_ethtool_sset(priv->phydev, cmd);
 }
 
 static int bcmgenet_set_rx_csum(struct net_device *dev,
@@ -937,7 +941,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
        e->eee_active = p->eee_active;
        e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
 
-       return phy_ethtool_get_eee(dev->phydev, e);
+       return phy_ethtool_get_eee(priv->phydev, e);
 }
 
 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
@@ -954,7 +958,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
        if (!p->eee_enabled) {
                bcmgenet_eee_enable_set(dev, false);
        } else {
-               ret = phy_init_eee(dev->phydev, 0);
+               ret = phy_init_eee(priv->phydev, 0);
                if (ret) {
                        netif_err(priv, hw, dev, "EEE initialization failed\n");
                        return ret;
@@ -964,12 +968,14 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
                bcmgenet_eee_enable_set(dev, true);
        }
 
-       return phy_ethtool_set_eee(dev->phydev, e);
+       return phy_ethtool_set_eee(priv->phydev, e);
 }
 
 static int bcmgenet_nway_reset(struct net_device *dev)
 {
-       return genphy_restart_aneg(dev->phydev);
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       return genphy_restart_aneg(priv->phydev);
 }
 
 /* standard ethtool support functions. */
@@ -996,13 +1002,12 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
                                enum bcmgenet_power_mode mode)
 {
-       struct net_device *ndev = priv->dev;
        int ret = 0;
        u32 reg;
 
        switch (mode) {
        case GENET_POWER_CABLE_SENSE:
-               phy_detach(ndev->phydev);
+               phy_detach(priv->phydev);
                break;
 
        case GENET_POWER_WOL_MAGIC:
@@ -1063,6 +1068,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
 /* ioctl handle special commands that are not present in ethtool. */
 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
+       struct bcmgenet_priv *priv = netdev_priv(dev);
        int val = 0;
 
        if (!netif_running(dev))
@@ -1072,10 +1078,10 @@ static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        case SIOCGMIIPHY:
        case SIOCGMIIREG:
        case SIOCSMIIREG:
-               if (!dev->phydev)
+               if (!priv->phydev)
                        val = -ENODEV;
                else
-                       val = phy_mii_ioctl(dev->phydev, rq, cmd);
+                       val = phy_mii_ioctl(priv->phydev, rq, cmd);
                break;
 
        default:
@@ -2458,7 +2464,6 @@ static void bcmgenet_irq_task(struct work_struct *work)
 {
        struct bcmgenet_priv *priv = container_of(
                        work, struct bcmgenet_priv, bcmgenet_irq_work);
-       struct net_device *ndev = priv->dev;
 
        netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
 
@@ -2471,7 +2476,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
 
        /* Link UP/DOWN event */
        if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
-               phy_mac_interrupt(ndev->phydev,
+               phy_mac_interrupt(priv->phydev,
                                  !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
                priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
        }
@@ -2833,7 +2838,7 @@ static void bcmgenet_netif_start(struct net_device *dev)
        /* Monitor link interrupts now */
        bcmgenet_link_intr_enable(priv);
 
-       phy_start(dev->phydev);
+       phy_start(priv->phydev);
 }
 
 static int bcmgenet_open(struct net_device *dev)
@@ -2932,7 +2937,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
        struct bcmgenet_priv *priv = netdev_priv(dev);
 
        netif_tx_stop_all_queues(dev);
-       phy_stop(dev->phydev);
+       phy_stop(priv->phydev);
        bcmgenet_intr_disable(priv);
        bcmgenet_disable_rx_napi(priv);
        bcmgenet_disable_tx_napi(priv);
@@ -2958,7 +2963,7 @@ static int bcmgenet_close(struct net_device *dev)
        bcmgenet_netif_stop(dev);
 
        /* Really kill the PHY state machine and disconnect from it */
-       phy_disconnect(dev->phydev);
+       phy_disconnect(priv->phydev);
 
        /* Disable MAC receive */
        umac_enable_set(priv, CMD_RX_EN, false);
@@ -3517,7 +3522,7 @@ static int bcmgenet_suspend(struct device *d)
 
        bcmgenet_netif_stop(dev);
 
-       phy_suspend(dev->phydev);
+       phy_suspend(priv->phydev);
 
        netif_device_detach(dev);
 
@@ -3581,7 +3586,7 @@ static int bcmgenet_resume(struct device *d)
        if (priv->wolopts)
                clk_disable_unprepare(priv->clk_wol);
 
-       phy_init_hw(dev->phydev);
+       phy_init_hw(priv->phydev);
        /* Speed settings must be restored */
        bcmgenet_mii_config(priv->dev);
 
@@ -3614,7 +3619,7 @@ static int bcmgenet_resume(struct device *d)
 
        netif_device_attach(dev);
 
-       phy_resume(dev->phydev);
+       phy_resume(priv->phydev);
 
        if (priv->eee.eee_enabled)
                bcmgenet_eee_enable_set(dev, true);
index 0f0868c..1e2dc34 100644 (file)
@@ -597,6 +597,7 @@ struct bcmgenet_priv {
 
        /* MDIO bus variables */
        wait_queue_head_t wq;
+       struct phy_device *phydev;
        bool internal_phy;
        struct device_node *phy_dn;
        struct device_node *mdio_dn;
index e907acd..457c3bc 100644 (file)
@@ -86,7 +86,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
 void bcmgenet_mii_setup(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
-       struct phy_device *phydev = dev->phydev;
+       struct phy_device *phydev = priv->phydev;
        u32 reg, cmd_bits = 0;
        bool status_changed = false;
 
@@ -183,9 +183,9 @@ void bcmgenet_mii_reset(struct net_device *dev)
        if (GENET_IS_V4(priv))
                return;
 
-       if (dev->phydev) {
-               phy_init_hw(dev->phydev);
-               phy_start_aneg(dev->phydev);
+       if (priv->phydev) {
+               phy_init_hw(priv->phydev);
+               phy_start_aneg(priv->phydev);
        }
 }
 
@@ -236,7 +236,6 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
 
 static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
 {
-       struct net_device *ndev = priv->dev;
        u32 reg;
 
        /* Speed settings are set in bcmgenet_mii_setup() */
@@ -245,14 +244,14 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
        bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
 
        if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
-               fixed_phy_set_link_update(ndev->phydev,
+               fixed_phy_set_link_update(priv->phydev,
                                          bcmgenet_fixed_phy_link_update);
 }
 
 int bcmgenet_mii_config(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
-       struct phy_device *phydev = dev->phydev;
+       struct phy_device *phydev = priv->phydev;
        struct device *kdev = &priv->pdev->dev;
        const char *phy_name = NULL;
        u32 id_mode_dis = 0;
@@ -303,7 +302,7 @@ int bcmgenet_mii_config(struct net_device *dev)
                 * capabilities, use that knowledge to also configure the
                 * Reverse MII interface correctly.
                 */
-               if ((phydev->supported & PHY_BASIC_FEATURES) ==
+               if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
                                PHY_BASIC_FEATURES)
                        port_ctrl = PORT_MODE_EXT_RVMII_25;
                else
@@ -372,7 +371,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
                        return -ENODEV;
                }
        } else {
-               phydev = dev->phydev;
+               phydev = priv->phydev;
                phydev->dev_flags = phy_flags;
 
                ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
@@ -383,6 +382,8 @@ int bcmgenet_mii_probe(struct net_device *dev)
                }
        }
 
+       priv->phydev = phydev;
+
        /* Configure port multiplexer based on what the probed PHY device since
         * reading the 'max-speed' property determines the maximum supported
         * PHY speed which is needed for bcmgenet_mii_config() to configure
@@ -390,7 +391,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
         */
        ret = bcmgenet_mii_config(dev);
        if (ret) {
-               phy_disconnect(phydev);
+               phy_disconnect(priv->phydev);
                return ret;
        }
 
@@ -400,7 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
         * Ethernet MAC ISRs
         */
        if (priv->internal_phy)
-               phydev->irq = PHY_IGNORE_INTERRUPT;
+               priv->phydev->irq = PHY_IGNORE_INTERRUPT;
 
        return 0;
 }
@@ -605,6 +606,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
 
        }
 
+       priv->phydev = phydev;
        priv->phy_interface = pd->phy_interface;
 
        return 0;
index a2551bc..ea967df 100644 (file)
@@ -18122,14 +18122,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        rtnl_lock();
 
-       /* We needn't recover from permanent error */
-       if (state == pci_channel_io_frozen)
-               tp->pcierr_recovery = true;
-
        /* We probably don't have netdev yet */
        if (!netdev || !netif_running(netdev))
                goto done;
 
+       /* We needn't recover from permanent error */
+       if (state == pci_channel_io_frozen)
+               tp->pcierr_recovery = true;
+
        tg3_phy_stop(tp);
 
        tg3_netif_stop(tp);
@@ -18226,7 +18226,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
 
        rtnl_lock();
 
-       if (!netif_running(netdev))
+       if (!netdev || !netif_running(netdev))
                goto done;
 
        tg3_full_lock(tp, 0);
index 01f7e81..692ee24 100644 (file)
@@ -89,10 +89,10 @@ static struct platform_device_id fec_devtype[] = {
                .driver_data = 0,
        }, {
                .name = "imx25-fec",
-               .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
+               .driver_data = FEC_QUIRK_USE_GASKET,
        }, {
                .name = "imx27-fec",
-               .driver_data = FEC_QUIRK_HAS_RACC,
+               .driver_data = 0,
        }, {
                .name = "imx28-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
@@ -180,6 +180,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 /* FEC receive acceleration */
 #define FEC_RACC_IPDIS         (1 << 1)
 #define FEC_RACC_PRODIS                (1 << 2)
+#define FEC_RACC_SHIFT16       BIT(7)
 #define FEC_RACC_OPTIONS       (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
 
 /*
@@ -945,9 +946,11 @@ fec_restart(struct net_device *ndev)
 
 #if !defined(CONFIG_M5272)
        if (fep->quirks & FEC_QUIRK_HAS_RACC) {
-               /* set RX checksum */
                val = readl(fep->hwp + FEC_RACC);
+               /* align IP header */
+               val |= FEC_RACC_SHIFT16;
                if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+                       /* set RX checksum */
                        val |= FEC_RACC_OPTIONS;
                else
                        val &= ~FEC_RACC_OPTIONS;
@@ -1428,6 +1431,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                prefetch(skb->data - NET_IP_ALIGN);
                skb_put(skb, pkt_len - 4);
                data = skb->data;
+
+#if !defined(CONFIG_M5272)
+               if (fep->quirks & FEC_QUIRK_HAS_RACC)
+                       data = skb_pull_inline(skb, 2);
+#endif
+
                if (!is_copybreak && need_swap)
                        swap_buffer(data, pkt_len);
 
index 715583f..4d7bbd2 100644 (file)
@@ -99,8 +99,11 @@ static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
        nvdimm_map->size = size;
        kref_init(&nvdimm_map->kref);
 
-       if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev)))
+       if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
+               dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
+                               &offset, size, dev_name(dev));
                goto err_request_region;
+       }
 
        if (flags)
                nvdimm_map->mem = memremap(offset, size, flags);
@@ -171,6 +174,9 @@ void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
                kref_get(&nvdimm_map->kref);
        nvdimm_bus_unlock(dev);
 
+       if (!nvdimm_map)
+               return NULL;
+
        if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
                return NULL;
 
index 8024a0e..0b78a82 100644 (file)
@@ -52,10 +52,28 @@ struct nvdimm_drvdata {
 struct nd_region_data {
        int ns_count;
        int ns_active;
-       unsigned int flush_mask;
-       void __iomem *flush_wpq[0][0];
+       unsigned int hints_shift;
+       void __iomem *flush_wpq[0];
 };
 
+static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
+               int dimm, int hint)
+{
+       unsigned int num = 1 << ndrd->hints_shift;
+       unsigned int mask = num - 1;
+
+       return ndrd->flush_wpq[dimm * num + (hint & mask)];
+}
+
+static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
+               int hint, void __iomem *flush)
+{
+       unsigned int num = 1 << ndrd->hints_shift;
+       unsigned int mask = num - 1;
+
+       ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
+}
+
 static inline struct nd_namespace_index *to_namespace_index(
                struct nvdimm_drvdata *ndd, int i)
 {
index e8d5ba7..4c0ac4a 100644 (file)
@@ -38,7 +38,7 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
 
        dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
                        nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
-       for (i = 0; i < nvdimm->num_flush; i++) {
+       for (i = 0; i < (1 << ndrd->hints_shift); i++) {
                struct resource *res = &nvdimm->flush_wpq[i];
                unsigned long pfn = PHYS_PFN(res->start);
                void __iomem *flush_page;
@@ -54,14 +54,15 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
 
                if (j < i)
                        flush_page = (void __iomem *) ((unsigned long)
-                                       ndrd->flush_wpq[dimm][j] & PAGE_MASK);
+                                       ndrd_get_flush_wpq(ndrd, dimm, j)
+                                       & PAGE_MASK);
                else
                        flush_page = devm_nvdimm_ioremap(dev,
-                                       PHYS_PFN(pfn), PAGE_SIZE);
+                                       PFN_PHYS(pfn), PAGE_SIZE);
                if (!flush_page)
                        return -ENXIO;
-               ndrd->flush_wpq[dimm][i] = flush_page
-                       + (res->start & ~PAGE_MASK);
+               ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
+                               + (res->start & ~PAGE_MASK));
        }
 
        return 0;
@@ -93,7 +94,10 @@ int nd_region_activate(struct nd_region *nd_region)
                return -ENOMEM;
        dev_set_drvdata(dev, ndrd);
 
-       ndrd->flush_mask = (1 << ilog2(num_flush)) - 1;
+       if (!num_flush)
+               return 0;
+
+       ndrd->hints_shift = ilog2(num_flush);
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
                struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -900,8 +904,8 @@ void nvdimm_flush(struct nd_region *nd_region)
         */
        wmb();
        for (i = 0; i < nd_region->ndr_mappings; i++)
-               if (ndrd->flush_wpq[i][0])
-                       writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]);
+               if (ndrd_get_flush_wpq(ndrd, i, 0))
+                       writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
        wmb();
 }
 EXPORT_SYMBOL_GPL(nvdimm_flush);
@@ -925,7 +929,7 @@ int nvdimm_has_flush(struct nd_region *nd_region)
 
        for (i = 0; i < nd_region->ndr_mappings; i++)
                /* flush hints present, flushing required */
-               if (ndrd->flush_wpq[i][0])
+               if (ndrd_get_flush_wpq(ndrd, i, 0))
                        return 1;
 
        /*
index ba9af4a..ec6381e 100644 (file)
@@ -486,6 +486,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        else
                shost->dma_boundary = 0xffffffff;
 
+       shost->use_blk_mq = scsi_use_blk_mq;
+
        device_initialize(&shost->shost_gendev);
        dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
        shost->shost_gendev.bus = &scsi_bus_type;
index 1f36aca..1deb6ad 100644 (file)
@@ -1160,7 +1160,6 @@ bool scsi_use_blk_mq = true;
 bool scsi_use_blk_mq = false;
 #endif
 module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
-EXPORT_SYMBOL_GPL(scsi_use_blk_mq);
 
 static int __init init_scsi(void)
 {
index 57a4b99..85c8a51 100644 (file)
@@ -29,6 +29,7 @@ extern int scsi_init_hosts(void);
 extern void scsi_exit_hosts(void);
 
 /* scsi.c */
+extern bool scsi_use_blk_mq;
 extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
 extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
 #ifdef CONFIG_SCSI_LOGGING
index 98d3654..bbb4b3e 100644 (file)
@@ -1842,6 +1842,16 @@ out_commit:
        ocfs2_commit_trans(osb, handle);
 
 out:
+       /*
+        * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
+        * even in case of error here like ENOSPC and ENOMEM. So, we need
+        * to unlock the target page manually to prevent deadlocks when
+        * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
+        * to VM code.
+        */
+       if (wc->w_target_locked)
+               unlock_page(mmap_page);
+
        ocfs2_free_write_ctxt(inode, wc);
 
        if (data_ac) {
index 5261751..5f52709 100644 (file)
@@ -32,6 +32,7 @@ enum can_mode {
  * CAN common private data
  */
 struct can_priv {
+       struct net_device *dev;
        struct can_device_stats can_stats;
 
        struct can_bittiming bittiming, data_bittiming;
@@ -47,7 +48,7 @@ struct can_priv {
        u32 ctrlmode_static;    /* static enabled options for driver/hardware */
 
        int restart_ms;
-       struct timer_list restart_timer;
+       struct delayed_work restart_work;
 
        int (*do_set_bittiming)(struct net_device *dev);
        int (*do_set_data_bittiming)(struct net_device *dev);
index 66533e1..dc69df0 100644 (file)
@@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev,
 #define dma_mmap_writecombine dma_mmap_wc
 #endif
 
-#ifdef CONFIG_NEED_DMA_MAP_STATE
+#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
index d351fd3..e5fb813 100644 (file)
@@ -120,5 +120,5 @@ struct mfc_cache {
 struct rtmsg;
 int ipmr_get_route(struct net *net, struct sk_buff *skb,
                   __be32 saddr, __be32 daddr,
-                  struct rtmsg *rtm, int nowait);
+                  struct rtmsg *rtm, int nowait, u32 portid);
 #endif
index 3987b64..19a1c0c 100644 (file)
@@ -116,7 +116,7 @@ struct mfc6_cache {
 
 struct rtmsg;
 extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
-                          struct rtmsg *rtm, int nowait);
+                          struct rtmsg *rtm, int nowait, u32 portid);
 
 #ifdef CONFIG_IPV6_MROUTE
 extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
index 3a2f9ae..856e50b 100644 (file)
@@ -190,7 +190,7 @@ struct property_entry {
        .length = ARRAY_SIZE(_val_) * sizeof(_type_),           \
        .is_array = true,                                       \
        .is_string = false,                                     \
-       { .pointer = { _type_##_data = _val_ } },               \
+       { .pointer = { ._type_##_data = _val_ } },              \
 }
 
 #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_)                 \
index b17cc48..4a529c9 100644 (file)
@@ -257,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
 
 static inline void workingset_node_pages_dec(struct radix_tree_node *node)
 {
+       VM_BUG_ON(!workingset_node_pages(node));
        node->count--;
 }
 
@@ -272,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
 
 static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
 {
+       VM_BUG_ON(!workingset_node_shadows(node));
        node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
 }
 
index ce93c4b..ced0df3 100644 (file)
@@ -554,6 +554,9 @@ struct sctp_chunk {
 
        atomic_t refcnt;
 
+       /* How many times this chunk have been sent, for prsctp RTX policy */
+       int sent_count;
+
        /* This is our link to the per-transport transmitted list.  */
        struct list_head transmitted_list;
 
@@ -603,16 +606,6 @@ struct sctp_chunk {
        /* This needs to be recoverable for SCTP_SEND_FAILED events. */
        struct sctp_sndrcvinfo sinfo;
 
-       /* We use this field to record param for prsctp policies,
-        * for TTL policy, it is the time_to_drop of this chunk,
-        * for RTX policy, it is the max_sent_count of this chunk,
-        * for PRIO policy, it is the priority of this chunk.
-        */
-       unsigned long prsctp_param;
-
-       /* How many times this chunk have been sent, for prsctp RTX policy */
-       int sent_count;
-
        /* Which association does this belong to?  */
        struct sctp_association *asoc;
 
index 0dee7af..7e4cd53 100644 (file)
@@ -771,12 +771,9 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
                shost->tmf_in_progress;
 }
 
-extern bool scsi_use_blk_mq;
-
 static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
 {
-       return scsi_use_blk_mq;
-
+       return shost->use_blk_mq;
 }
 
 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
index 5e8dab5..d6b729b 100644 (file)
@@ -3446,9 +3446,28 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
         * Except for the root, subtree_control must be zero for a cgroup
         * with tasks so that child cgroups don't compete against tasks.
         */
-       if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
-               ret = -EBUSY;
-               goto out_unlock;
+       if (enable && cgroup_parent(cgrp)) {
+               struct cgrp_cset_link *link;
+
+               /*
+                * Because namespaces pin csets too, @cgrp->cset_links
+                * might not be empty even when @cgrp is empty.  Walk and
+                * verify each cset.
+                */
+               spin_lock_irq(&css_set_lock);
+
+               ret = 0;
+               list_for_each_entry(link, &cgrp->cset_links, cset_link) {
+                       if (css_set_populated(link->cset)) {
+                               ret = -EBUSY;
+                               break;
+                       }
+               }
+
+               spin_unlock_irq(&css_set_lock);
+
+               if (ret)
+                       goto out_unlock;
        }
 
        /* save and update control masks and prepare csses */
@@ -3899,7 +3918,9 @@ void cgroup_file_notify(struct cgroup_file *cfile)
  * cgroup_task_count - count the number of tasks in a cgroup.
  * @cgrp: the cgroup in question
  *
- * Return the number of tasks in the cgroup.
+ * Return the number of tasks in the cgroup.  The returned number can be
+ * higher than the actual number of tasks due to css_set references from
+ * namespace roots and temporary usages.
  */
 static int cgroup_task_count(const struct cgroup *cgrp)
 {
index c27e533..2b4c20a 100644 (file)
@@ -325,8 +325,7 @@ static struct file_system_type cpuset_fs_type = {
 /*
  * Return in pmask the portion of a cpusets's cpus_allowed that
  * are online.  If none are online, walk up the cpuset hierarchy
- * until we find one that does have some online cpus.  The top
- * cpuset always has some cpus online.
+ * until we find one that does have some online cpus.
  *
  * One way or another, we guarantee to return some non-empty subset
  * of cpu_online_mask.
@@ -335,8 +334,20 @@ static struct file_system_type cpuset_fs_type = {
  */
 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
 {
-       while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
+       while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
                cs = parent_cs(cs);
+               if (unlikely(!cs)) {
+                       /*
+                        * The top cpuset doesn't have any online cpu as a
+                        * consequence of a race between cpuset_hotplug_work
+                        * and cpu hotplug notifier.  But we know the top
+                        * cpuset's effective_cpus is on its way to to be
+                        * identical to cpu_online_mask.
+                        */
+                       cpumask_copy(pmask, cpu_online_mask);
+                       return;
+               }
+       }
        cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
 }
 
@@ -2074,7 +2085,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
  * which could have been changed by cpuset just after it inherits the
  * state from the parent and before it sits on the cgroup's task list.
  */
-void cpuset_fork(struct task_struct *task)
+static void cpuset_fork(struct task_struct *task)
 {
        if (task_css_is_root(task, cpuset_cgrp_id))
                return;
index 8a287df..2d0986a 100644 (file)
  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
  */
 
+static int page_cache_tree_insert(struct address_space *mapping,
+                                 struct page *page, void **shadowp)
+{
+       struct radix_tree_node *node;
+       void **slot;
+       int error;
+
+       error = __radix_tree_create(&mapping->page_tree, page->index, 0,
+                                   &node, &slot);
+       if (error)
+               return error;
+       if (*slot) {
+               void *p;
+
+               p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+               if (!radix_tree_exceptional_entry(p))
+                       return -EEXIST;
+
+               mapping->nrexceptional--;
+               if (!dax_mapping(mapping)) {
+                       if (shadowp)
+                               *shadowp = p;
+                       if (node)
+                               workingset_node_shadows_dec(node);
+               } else {
+                       /* DAX can replace empty locked entry with a hole */
+                       WARN_ON_ONCE(p !=
+                               (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
+                                        RADIX_DAX_ENTRY_LOCK));
+                       /* DAX accounts exceptional entries as normal pages */
+                       if (node)
+                               workingset_node_pages_dec(node);
+                       /* Wakeup waiters for exceptional entry lock */
+                       dax_wake_mapping_entry_waiter(mapping, page->index,
+                                                     false);
+               }
+       }
+       radix_tree_replace_slot(slot, page);
+       mapping->nrpages++;
+       if (node) {
+               workingset_node_pages_inc(node);
+               /*
+                * Don't track node that contains actual pages.
+                *
+                * Avoid acquiring the list_lru lock if already
+                * untracked.  The list_empty() test is safe as
+                * node->private_list is protected by
+                * mapping->tree_lock.
+                */
+               if (!list_empty(&node->private_list))
+                       list_lru_del(&workingset_shadow_nodes,
+                                    &node->private_list);
+       }
+       return 0;
+}
+
 static void page_cache_tree_delete(struct address_space *mapping,
                                   struct page *page, void *shadow)
 {
@@ -561,7 +617,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 
                spin_lock_irqsave(&mapping->tree_lock, flags);
                __delete_from_page_cache(old, NULL);
-               error = radix_tree_insert(&mapping->page_tree, offset, new);
+               error = page_cache_tree_insert(mapping, new, NULL);
                BUG_ON(error);
                mapping->nrpages++;
 
@@ -584,62 +640,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL_GPL(replace_page_cache_page);
 
-static int page_cache_tree_insert(struct address_space *mapping,
-                                 struct page *page, void **shadowp)
-{
-       struct radix_tree_node *node;
-       void **slot;
-       int error;
-
-       error = __radix_tree_create(&mapping->page_tree, page->index, 0,
-                                   &node, &slot);
-       if (error)
-               return error;
-       if (*slot) {
-               void *p;
-
-               p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
-               if (!radix_tree_exceptional_entry(p))
-                       return -EEXIST;
-
-               mapping->nrexceptional--;
-               if (!dax_mapping(mapping)) {
-                       if (shadowp)
-                               *shadowp = p;
-                       if (node)
-                               workingset_node_shadows_dec(node);
-               } else {
-                       /* DAX can replace empty locked entry with a hole */
-                       WARN_ON_ONCE(p !=
-                               (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
-                                        RADIX_DAX_ENTRY_LOCK));
-                       /* DAX accounts exceptional entries as normal pages */
-                       if (node)
-                               workingset_node_pages_dec(node);
-                       /* Wakeup waiters for exceptional entry lock */
-                       dax_wake_mapping_entry_waiter(mapping, page->index,
-                                                     false);
-               }
-       }
-       radix_tree_replace_slot(slot, page);
-       mapping->nrpages++;
-       if (node) {
-               workingset_node_pages_inc(node);
-               /*
-                * Don't track node that contains actual pages.
-                *
-                * Avoid acquiring the list_lru lock if already
-                * untracked.  The list_empty() test is safe as
-                * node->private_list is protected by
-                * mapping->tree_lock.
-                */
-               if (!list_empty(&node->private_list))
-                       list_lru_del(&workingset_shadow_nodes,
-                                    &node->private_list);
-       }
-       return 0;
-}
-
 static int __add_to_page_cache_locked(struct page *page,
                                      struct address_space *mapping,
                                      pgoff_t offset, gfp_t gfp_mask,
index 73d43ba..5048083 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void)
 {
        struct rmap_item *rmap_item;
 
-       rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
+       rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
+                                               __GFP_NORETRY | __GFP_NOWARN);
        if (rmap_item)
                ksm_rmap_items++;
        return rmap_item;
index b58906b..9d29ba0 100644 (file)
@@ -1555,8 +1555,8 @@ static struct page *new_node_page(struct page *page, unsigned long private,
 {
        gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
        int nid = page_to_nid(page);
-       nodemask_t nmask = node_online_map;
-       struct page *new_page;
+       nodemask_t nmask = node_states[N_MEMORY];
+       struct page *new_page = NULL;
 
        /*
         * TODO: allocate a destination hugepage from a nearest neighbor node,
@@ -1567,14 +1567,14 @@ static struct page *new_node_page(struct page *page, unsigned long private,
                return alloc_huge_page_node(page_hstate(compound_head(page)),
                                        next_node_in(nid, nmask));
 
-       if (nid != next_node_in(nid, nmask))
-               node_clear(nid, nmask);
+       node_clear(nid, nmask);
 
        if (PageHighMem(page)
            || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
                gfp_mask |= __GFP_HIGHMEM;
 
-       new_page = __alloc_pages_nodemask(gfp_mask, 0,
+       if (!nodes_empty(nmask))
+               new_page = __alloc_pages_nodemask(gfp_mask, 0,
                                        node_zonelist(nid, gfp_mask), &nmask);
        if (!new_page)
                new_page = __alloc_pages(gfp_mask, 0,
index 69551cf..617475f 100644 (file)
@@ -418,21 +418,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
         * no pages, so we expect to be able to remove them all and
         * delete and free the empty node afterwards.
         */
-
-       BUG_ON(!node->count);
-       BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
+       BUG_ON(!workingset_node_shadows(node));
+       BUG_ON(workingset_node_pages(node));
 
        for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
                if (node->slots[i]) {
                        BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
                        node->slots[i] = NULL;
-                       BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
-                       node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
+                       workingset_node_shadows_dec(node);
                        BUG_ON(!mapping->nrexceptional);
                        mapping->nrexceptional--;
                }
        }
-       BUG_ON(node->count);
+       BUG_ON(workingset_node_shadows(node));
        inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
        if (!__radix_tree_delete_node(&mapping->page_tree, node))
                BUG();
index a87bcd2..5f006e1 100644 (file)
@@ -2123,7 +2123,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
 
 int ipmr_get_route(struct net *net, struct sk_buff *skb,
                   __be32 saddr, __be32 daddr,
-                  struct rtmsg *rtm, int nowait)
+                  struct rtmsg *rtm, int nowait, u32 portid)
 {
        struct mfc_cache *cache;
        struct mr_table *mrt;
@@ -2168,6 +2168,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
                        return -ENOMEM;
                }
 
+               NETLINK_CB(skb2).portid = portid;
                skb_push(skb2, sizeof(struct iphdr));
                skb_reset_network_header(skb2);
                iph = ip_hdr(skb2);
index b5b47a2..62c3ed0 100644 (file)
@@ -2503,7 +2503,8 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src, u32 table_id,
                    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
                        int err = ipmr_get_route(net, skb,
                                                 fl4->saddr, fl4->daddr,
-                                                r, nowait);
+                                                r, nowait, portid);
+
                        if (err <= 0) {
                                if (!nowait) {
                                        if (err == 0)
index 08323bd..a756b87 100644 (file)
@@ -2329,10 +2329,9 @@ static void DBGUNDO(struct sock *sk, const char *msg)
        }
 #if IS_ENABLED(CONFIG_IPV6)
        else if (sk->sk_family == AF_INET6) {
-               struct ipv6_pinfo *np = inet6_sk(sk);
                pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
                         msg,
-                        &np->daddr, ntohs(inet->inet_dport),
+                        &sk->sk_v6_daddr, ntohs(inet->inet_dport),
                         tp->snd_cwnd, tcp_left_out(tp),
                         tp->snd_ssthresh, tp->prior_ssthresh,
                         tp->packets_out);
index 5288cec..d48d557 100644 (file)
@@ -1966,12 +1966,14 @@ static int tcp_mtu_probe(struct sock *sk)
        len = 0;
        tcp_for_write_queue_from_safe(skb, next, sk) {
                copy = min_t(int, skb->len, probe_size - len);
-               if (nskb->ip_summed)
+               if (nskb->ip_summed) {
                        skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
-               else
-                       nskb->csum = skb_copy_and_csum_bits(skb, 0,
-                                                           skb_put(nskb, copy),
-                                                           copy, nskb->csum);
+               } else {
+                       __wsum csum = skb_copy_and_csum_bits(skb, 0,
+                                                            skb_put(nskb, copy),
+                                                            copy, 0);
+                       nskb->csum = csum_block_add(nskb->csum, csum, len);
+               }
 
                if (skb->len <= copy) {
                        /* We've eaten all the data from this skb.
index 704274c..edc3daa 100644 (file)
@@ -648,7 +648,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-       fl6.flowi6_proto = skb->protocol;
 
        err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
        if (err)
index fccb5dd..7f4265b 100644 (file)
@@ -2285,8 +2285,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
        return 1;
 }
 
-int ip6mr_get_route(struct net *net,
-                   struct sk_buff *skb, struct rtmsg *rtm, int nowait)
+int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
+                   int nowait, u32 portid)
 {
        int err;
        struct mr6_table *mrt;
@@ -2331,6 +2331,7 @@ int ip6mr_get_route(struct net *net,
                        return -ENOMEM;
                }
 
+               NETLINK_CB(skb2).portid = portid;
                skb_reset_transport_header(skb2);
 
                skb_put(skb2, sizeof(struct ipv6hdr));
index e3a224b..269218a 100644 (file)
@@ -3202,7 +3202,9 @@ static int rt6_fill_node(struct net *net,
        if (iif) {
 #ifdef CONFIG_IPV6_MROUTE
                if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
-                       int err = ip6mr_get_route(net, skb, rtm, nowait);
+                       int err = ip6mr_get_route(net, skb, rtm, nowait,
+                                                 portid);
+
                        if (err <= 0) {
                                if (!nowait) {
                                        if (err == 0)
index e87cd81..4a60cd5 100644 (file)
@@ -53,7 +53,7 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
        u32 *tlv = (u32 *)(skbdata);
        u16 totlen = nla_total_size(dlen);      /*alignment + hdr */
        char *dptr = (char *)tlv + NLA_HDRLEN;
-       u32 htlv = attrtype << 16 | dlen;
+       u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN);
 
        *tlv = htonl(htlv);
        memset(dptr, 0, totlen - NLA_HDRLEN);
@@ -627,7 +627,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
        struct tcf_ife_info *ife = to_ife(a);
        int action = ife->tcf_action;
        struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data;
-       u16 ifehdrln = ifehdr->metalen;
+       int ifehdrln = (int)ifehdr->metalen;
        struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data);
 
        spin_lock(&ife->tcf_lock);
@@ -740,8 +740,6 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
                return TC_ACT_SHOT;
        }
 
-       iethh = eth_hdr(skb);
-
        err = skb_cow_head(skb, hdrm);
        if (unlikely(err)) {
                ife->tcf_qstats.drops++;
@@ -752,6 +750,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
        if (!(at & AT_EGRESS))
                skb_push(skb, skb->dev->hard_header_len);
 
+       iethh = (struct ethhdr *)skb->data;
        __skb_push(skb, hdrm);
        memcpy(skb->data, iethh, skb->mac_len);
        skb_reset_mac_header(skb);
index f27ffee..ca0516e 100644 (file)
@@ -1153,6 +1153,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
        if (!skb)
                return NULL;
 
+       qdisc_qstats_backlog_dec(sch, skb);
        sch->q.qlen--;
        qdisc_bstats_update(sch, skb);
 
@@ -1256,6 +1257,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        }
 
        bstats_update(&cl->bstats, skb);
+       qdisc_qstats_backlog_inc(sch, skb);
        ++sch->q.qlen;
 
        agg = cl->agg;
@@ -1476,6 +1478,7 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
                        qdisc_reset(cl->qdisc);
                }
        }
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
index add3cc7..20a350b 100644 (file)
@@ -400,6 +400,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 enqueue:
        ret = qdisc_enqueue(skb, child, to_free);
        if (likely(ret == NET_XMIT_SUCCESS)) {
+               qdisc_qstats_backlog_inc(sch, skb);
                sch->q.qlen++;
                increment_qlen(skb, q);
        } else if (net_xmit_drop_count(ret)) {
@@ -428,6 +429,7 @@ static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
 
        if (skb) {
                qdisc_bstats_update(sch, skb);
+               qdisc_qstats_backlog_dec(sch, skb);
                sch->q.qlen--;
                decrement_qlen(skb, q);
        }
@@ -450,6 +452,7 @@ static void sfb_reset(struct Qdisc *sch)
        struct sfb_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset(q->qdisc);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
        q->slot = 0;
        q->double_buffering = false;
index a55e547..0a3dbec 100644 (file)
@@ -179,6 +179,11 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
                         msg, msg->expires_at, jiffies);
        }
 
+       if (asoc->peer.prsctp_capable &&
+           SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))
+               msg->expires_at =
+                       jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);
+
        /* This is the biggest possible DATA chunk that can fit into
         * the packet
         */
@@ -335,7 +340,7 @@ errout:
 /* Check whether this message has expired. */
 int sctp_chunk_abandoned(struct sctp_chunk *chunk)
 {
-       if (!chunk->asoc->prsctp_enable ||
+       if (!chunk->asoc->peer.prsctp_capable ||
            !SCTP_PR_POLICY(chunk->sinfo.sinfo_flags)) {
                struct sctp_datamsg *msg = chunk->msg;
 
@@ -349,14 +354,14 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
        }
 
        if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
-           time_after(jiffies, chunk->prsctp_param)) {
+           time_after(jiffies, chunk->msg->expires_at)) {
                if (chunk->sent_count)
                        chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
                else
                        chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
                return 1;
        } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
-                  chunk->sent_count > chunk->prsctp_param) {
+                  chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
                chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
                return 1;
        }
index 72e54a4..107233d 100644 (file)
@@ -326,7 +326,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
 
                        sctp_chunk_hold(chunk);
                        sctp_outq_tail_data(q, chunk);
-                       if (chunk->asoc->prsctp_enable &&
+                       if (chunk->asoc->peer.prsctp_capable &&
                            SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
                                chunk->asoc->sent_cnt_removable++;
                        if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
@@ -383,7 +383,7 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
 
        list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
                if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
-                   chk->prsctp_param <= sinfo->sinfo_timetolive)
+                   chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
                        continue;
 
                list_del_init(&chk->transmitted_list);
@@ -418,7 +418,7 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
 
        list_for_each_entry_safe(chk, temp, queue, list) {
                if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
-                   chk->prsctp_param <= sinfo->sinfo_timetolive)
+                   chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
                        continue;
 
                list_del_init(&chk->list);
@@ -442,7 +442,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
 {
        struct sctp_transport *transport;
 
-       if (!asoc->prsctp_enable || !asoc->sent_cnt_removable)
+       if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
                return;
 
        msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
@@ -1055,7 +1055,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 
                                /* Mark as failed send. */
                                sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
-                               if (asoc->prsctp_enable &&
+                               if (asoc->peer.prsctp_capable &&
                                    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
                                        asoc->sent_cnt_removable--;
                                sctp_chunk_free(chunk);
@@ -1347,7 +1347,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
                tsn = ntohl(tchunk->subh.data_hdr->tsn);
                if (TSN_lte(tsn, ctsn)) {
                        list_del_init(&tchunk->transmitted_list);
-                       if (asoc->prsctp_enable &&
+                       if (asoc->peer.prsctp_capable &&
                            SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
                                asoc->sent_cnt_removable--;
                        sctp_chunk_free(tchunk);
index f3508aa..cef0cee 100644 (file)
@@ -272,28 +272,17 @@ out:
        return err;
 }
 
-static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
+static int sctp_sock_dump(struct sock *sk, void *p)
 {
-       struct sctp_endpoint *ep = tsp->asoc->ep;
+       struct sctp_endpoint *ep = sctp_sk(sk)->ep;
        struct sctp_comm_param *commp = p;
-       struct sock *sk = ep->base.sk;
        struct sk_buff *skb = commp->skb;
        struct netlink_callback *cb = commp->cb;
        const struct inet_diag_req_v2 *r = commp->r;
-       struct sctp_association *assoc =
-               list_entry(ep->asocs.next, struct sctp_association, asocs);
+       struct sctp_association *assoc;
        int err = 0;
 
-       /* find the ep only once through the transports by this condition */
-       if (tsp->asoc != assoc)
-               goto out;
-
-       if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
-               goto out;
-
        lock_sock(sk);
-       if (sk != assoc->base.sk)
-               goto release;
        list_for_each_entry(assoc, &ep->asocs, asocs) {
                if (cb->args[4] < cb->args[1])
                        goto next;
@@ -312,7 +301,7 @@ static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
                                        cb->nlh->nlmsg_seq,
                                        NLM_F_MULTI, cb->nlh) < 0) {
                        cb->args[3] = 1;
-                       err = 2;
+                       err = 1;
                        goto release;
                }
                cb->args[3] = 1;
@@ -321,7 +310,7 @@ static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
                                        sk_user_ns(NETLINK_CB(cb->skb).sk),
                                        NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, 0, cb->nlh) < 0) {
-                       err = 2;
+                       err = 1;
                        goto release;
                }
 next:
@@ -333,10 +322,35 @@ next:
        cb->args[4] = 0;
 release:
        release_sock(sk);
+       sock_put(sk);
        return err;
+}
+
+static int sctp_get_sock(struct sctp_transport *tsp, void *p)
+{
+       struct sctp_endpoint *ep = tsp->asoc->ep;
+       struct sctp_comm_param *commp = p;
+       struct sock *sk = ep->base.sk;
+       struct netlink_callback *cb = commp->cb;
+       const struct inet_diag_req_v2 *r = commp->r;
+       struct sctp_association *assoc =
+               list_entry(ep->asocs.next, struct sctp_association, asocs);
+
+       /* find the ep only once through the transports by this condition */
+       if (tsp->asoc != assoc)
+               goto out;
+
+       if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
+               goto out;
+
+       sock_hold(sk);
+       cb->args[5] = (long)sk;
+
+       return 1;
+
 out:
        cb->args[2]++;
-       return err;
+       return 0;
 }
 
 static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
@@ -472,10 +486,18 @@ skip:
         * 2 : to record the transport pos of this time's traversal
         * 3 : to mark if we have dumped the ep info of the current asoc
         * 4 : to work as a temporary variable to traversal list
+        * 5 : to save the sk we get from travelsing the tsp list.
         */
        if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
                goto done;
-       sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
+
+next:
+       cb->args[5] = 0;
+       sctp_for_each_transport(sctp_get_sock, net, cb->args[2], &commp);
+
+       if (cb->args[5] && !sctp_sock_dump((struct sock *)cb->args[5], &commp))
+               goto next;
+
 done:
        cb->args[1] = cb->args[4];
        cb->args[4] = 0;
index 8c77b87..46ffecc 100644 (file)
@@ -706,20 +706,6 @@ nodata:
        return retval;
 }
 
-static void sctp_set_prsctp_policy(struct sctp_chunk *chunk,
-                                  const struct sctp_sndrcvinfo *sinfo)
-{
-       if (!chunk->asoc->prsctp_enable)
-               return;
-
-       if (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))
-               chunk->prsctp_param =
-                       jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);
-       else if (SCTP_PR_RTX_ENABLED(sinfo->sinfo_flags) ||
-                SCTP_PR_PRIO_ENABLED(sinfo->sinfo_flags))
-               chunk->prsctp_param = sinfo->sinfo_timetolive;
-}
-
 /* Make a DATA chunk for the given association from the provided
  * parameters.  However, do not populate the data payload.
  */
@@ -753,7 +739,6 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
 
        retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
        memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
-       sctp_set_prsctp_policy(retval, sinfo);
 
 nodata:
        return retval;
index 9fc417a..8ed2d99 100644 (file)
@@ -4469,17 +4469,21 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
                                  const union sctp_addr *paddr, void *p)
 {
        struct sctp_transport *transport;
-       int err = 0;
+       int err = -ENOENT;
 
        rcu_read_lock();
        transport = sctp_addrs_lookup_transport(net, laddr, paddr);
        if (!transport || !sctp_transport_hold(transport))
                goto out;
-       err = cb(transport, p);
+
+       sctp_association_hold(transport->asoc);
        sctp_transport_put(transport);
 
-out:
        rcu_read_unlock();
+       err = cb(transport, p);
+       sctp_association_put(transport->asoc);
+
+out:
        return err;
 }
 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
index 17dbbe6..8a398b3 100644 (file)
@@ -465,6 +465,8 @@ void vsock_pending_work(struct work_struct *work)
 
        if (vsock_is_pending(sk)) {
                vsock_remove_pending(listener, sk);
+
+               listener->sk_ack_backlog--;
        } else if (!vsk->rejected) {
                /* We are not on the pending list and accept() did not reject
                 * us, so we must have been accepted by our user process.  We
@@ -475,8 +477,6 @@ void vsock_pending_work(struct work_struct *work)
                goto out;
        }
 
-       listener->sk_ack_backlog--;
-
        /* We need to remove ourself from the global connected sockets list so
         * incoming packets can't find this socket, and to reduce the reference
         * count.
@@ -2010,5 +2010,5 @@ EXPORT_SYMBOL_GPL(vsock_core_get_transport);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Virtual Socket Family");
-MODULE_VERSION("1.0.1.0-k");
+MODULE_VERSION("1.0.2.0-k");
 MODULE_LICENSE("GPL v2");
index 42396a7..a68f031 100644 (file)
@@ -363,6 +363,7 @@ is_mcounted_section_name(char const *const txtname)
                strcmp(".sched.text",    txtname) == 0 ||
                strcmp(".spinlock.text", txtname) == 0 ||
                strcmp(".irqentry.text", txtname) == 0 ||
+               strcmp(".softirqentry.text", txtname) == 0 ||
                strcmp(".kprobes.text", txtname) == 0 ||
                strcmp(".text.unlikely", txtname) == 0;
 }
index 96e2486..2d48011 100755 (executable)
@@ -134,6 +134,7 @@ my %text_sections = (
      ".sched.text" => 1,
      ".spinlock.text" => 1,
      ".irqentry.text" => 1,
+     ".softirqentry.text" => 1,
      ".kprobes.text" => 1,
      ".text.unlikely" => 1,
 );
index dd48f42..f64c57b 100644 (file)
@@ -603,7 +603,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
                        return -ENOMEM;
                sprintf(t->label[i], "label%d", i);
 
-               t->flush[i] = test_alloc(t, sizeof(u64) * NUM_HINTS,
+               t->flush[i] = test_alloc(t, max(PAGE_SIZE,
+                                       sizeof(u64) * NUM_HINTS),
                                &t->flush_dma[i]);
                if (!t->flush[i])
                        return -ENOMEM;