Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jul 2013 01:16:34 +0000 (18:16 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jul 2013 01:16:34 +0000 (18:16 -0700)
Pull Sparc bugfixes from David Miller:
 "Four bug fixes:

   1) Enable snoop tags properly on Sparc32/LEON, from Andreas Larsson

   2) strcpy() length check fix from Chen Gang.

   3) Forgotten unregister_netdev() in sunvnet driver, from Dave
      Kleikamp.

   4) Fix broken assembler offsets used in vm_area_struct accesses on
      sparc32, from Olivier DANET."

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  [PATCH] sparc32: vm_area_struct access for old Sun SPARCs.
  sunvnet: vnet_port_remove must call unregister_netdev
  sparc32, leon: Require separate snoop tags set to regard snooping to be enabled
  arch: sparc: kernel: check the memory length before use strcpy().

134 files changed:
Documentation/ABI/stable/sysfs-module
Documentation/coccinelle.txt
Documentation/devicetree/bindings/iommu/arm,smmu.txt [new file with mode: 0644]
Documentation/kbuild/kconfig.txt
Documentation/vfio.txt
Documentation/vm/zswap.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arm/mm/mmap.c
arch/arm64/mm/mmap.c
arch/mips/mm/mmap.c
arch/powerpc/mm/mmap.c
arch/s390/mm/mmap.c
arch/sparc/kernel/sys_sparc_64.c
arch/tile/mm/mmap.c
arch/x86/ia32/ia32_aout.c
arch/x86/kernel/cpu/perf_event_amd_iommu.c
arch/x86/mm/mmap.c
drivers/gpu/drm/drm_edid_load.c
drivers/ide/delkin_cb.c
drivers/ide/gayle.c
drivers/ide/ide-taskfile.c
drivers/ide/tx4938ide.c
drivers/ide/tx4939ide.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu.c [new file with mode: 0644]
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/iommu.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iopgtable.h
drivers/iommu/omap-iovmm.c
drivers/net/virtio_net.c
drivers/vfio/vfio.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/Kconfig
drivers/vhost/Makefile
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/test.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_pci.c
fs/binfmt_aout.c
fs/binfmt_elf.c
include/linux/mm_types.h
include/linux/moduleparam.h
include/linux/sched.h
include/linux/virtio_ring.h
include/linux/zbud.h [new file with mode: 0644]
include/uapi/linux/vfio.h
include/uapi/linux/virtio_config.h
kernel/events/core.c
kernel/fork.c
kernel/module.c
kernel/params.c
kernel/printk.c
mm/Kconfig
mm/Makefile
mm/mmap.c
mm/nommu.c
mm/util.c
mm/zbud.c [new file with mode: 0644]
mm/zswap.c [new file with mode: 0644]
scripts/Makefile.headersinst
scripts/Makefile.lib
scripts/coccicheck
scripts/coccinelle/api/alloc/drop_kmalloc_cast.cocci
scripts/coccinelle/api/alloc/kzalloc-simple.cocci
scripts/coccinelle/api/d_find_alias.cocci
scripts/coccinelle/api/devm_request_and_ioremap.cocci
scripts/coccinelle/api/kstrdup.cocci
scripts/coccinelle/api/memdup.cocci
scripts/coccinelle/api/memdup_user.cocci
scripts/coccinelle/api/ptr_ret.cocci
scripts/coccinelle/api/simple_open.cocci
scripts/coccinelle/free/devm_free.cocci
scripts/coccinelle/free/kfree.cocci
scripts/coccinelle/free/kfreeaddr.cocci [new file with mode: 0644]
scripts/coccinelle/free/pci_free_consistent.cocci [new file with mode: 0644]
scripts/coccinelle/iterators/fen.cocci
scripts/coccinelle/iterators/itnull.cocci
scripts/coccinelle/iterators/list_entry_update.cocci
scripts/coccinelle/iterators/use_after_iter.cocci
scripts/coccinelle/locks/call_kern.cocci
scripts/coccinelle/locks/double_lock.cocci
scripts/coccinelle/locks/flags.cocci
scripts/coccinelle/locks/mini_lock.cocci
scripts/coccinelle/misc/boolinit.cocci
scripts/coccinelle/misc/cstptr.cocci
scripts/coccinelle/misc/doubleinit.cocci
scripts/coccinelle/misc/ifaddr.cocci
scripts/coccinelle/misc/ifcol.cocci
scripts/coccinelle/misc/noderef.cocci
scripts/coccinelle/misc/orplus.cocci
scripts/coccinelle/misc/warn.cocci
scripts/coccinelle/null/eno.cocci
scripts/coccinelle/null/kmerr.cocci
scripts/coccinelle/tests/doublebitand.cocci
scripts/coccinelle/tests/doubletest.cocci
scripts/coccinelle/tests/odd_ptr_err.cocci
scripts/config
scripts/headers_install.sh
scripts/kconfig/conf.c
scripts/kconfig/confdata.c
scripts/kconfig/expr.h
scripts/kconfig/lkc.h
scripts/kconfig/lkc_proto.h
scripts/kconfig/lxdialog/checklist.c
scripts/kconfig/lxdialog/dialog.h
scripts/kconfig/lxdialog/inputbox.c
scripts/kconfig/lxdialog/menubox.c
scripts/kconfig/lxdialog/textbox.c
scripts/kconfig/lxdialog/util.c
scripts/kconfig/lxdialog/yesno.c
scripts/kconfig/mconf.c
scripts/kconfig/menu.c
scripts/kconfig/nconf.c
scripts/kconfig/nconf.gui.c
scripts/kconfig/symbol.c
scripts/mod/Makefile
scripts/mod/file2alias.c
scripts/package/mkspec
scripts/setlocalversion
tools/include/tools/be_byteshift.h
tools/include/tools/le_byteshift.h
tools/lguest/Makefile
tools/lguest/lguest.c
tools/virtio/linux/module.h
tools/virtio/linux/virtio.h

index a0dd21c..6272ae5 100644 (file)
@@ -4,9 +4,13 @@ Description:
 
        /sys/module/MODULENAME
                The name of the module that is in the kernel.  This
-               module name will show up either if the module is built
-               directly into the kernel, or if it is loaded as a
-               dynamic module.
+               module name will always show up if the module is loaded as a
+               dynamic module.  If it is built directly into the kernel, it
+               will only show up if it has a version or at least one
+               parameter.
+
+               Note: The conditions of creation in the built-in case are not
+               by design and may be removed in the future.
 
        /sys/module/MODULENAME/parameters
                This directory contains individual files that are each
index 18de785..7f773d5 100644 (file)
@@ -6,15 +6,17 @@ Copyright 2010 Gilles Muller <Gilles.Muller@lip6.fr>
  Getting Coccinelle
 ~~~~~~~~~~~~~~~~~~~~
 
-The semantic patches included in the kernel use the 'virtual rule'
-feature which was introduced in Coccinelle version 0.1.11.
+The semantic patches included in the kernel use features and options
+which are provided by Coccinelle version 1.0.0-rc11 and above.
+Using earlier versions will fail as the option names used by
+the Coccinelle files and coccicheck have been updated.
 
-Coccinelle (>=0.2.0) is available through the package manager
+Coccinelle is available through the package manager
 of many distributions, e.g. :
 
- - Debian (>=squeeze)
- - Fedora (>=13)
- - Ubuntu (>=10.04 Lucid Lynx)
+ - Debian
+ - Fedora
+ - Ubuntu
  - OpenSUSE
  - Arch Linux
  - NetBSD
@@ -36,11 +38,6 @@ as a regular user, and install it with
 
         sudo make install
 
-The semantic patches in the kernel will work best with Coccinelle version
-0.2.4 or later.  Using earlier versions may incur some parse errors in the
-semantic patch code, but any results that are obtained should still be
-correct.
-
  Using Coccinelle on the Linux kernel
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -48,7 +45,7 @@ A Coccinelle-specific target is defined in the top level
 Makefile. This target is named 'coccicheck' and calls the 'coccicheck'
 front-end in the 'scripts' directory.
 
-Four modes are defined: patch, report, context, and org. The mode to
+Four basic modes are defined: patch, report, context, and org. The mode to
 use is specified by setting the MODE variable with 'MODE=<mode>'.
 
 'patch' proposes a fix, when possible.
@@ -62,18 +59,24 @@ diff-like style.Lines of interest are indicated with '-'.
 'org' generates a report in the Org mode format of Emacs.
 
 Note that not all semantic patches implement all modes. For easy use
-of Coccinelle, the default mode is "chain" which tries the previous
-modes in the order above until one succeeds.
+of Coccinelle, the default mode is "report".
+
+Two other modes provide some common combinations of these modes.
 
-To make a report for every semantic patch, run the following command:
+'chain' tries the previous modes in the order above until one succeeds.
 
-       make coccicheck MODE=report
+'rep+ctxt' runs successively the report mode and the context mode.
+          It should be used with the C option (described later)
+          which checks the code on a file basis.
 
-NB: The 'report' mode is the default one.
+Examples:
+       To make a report for every semantic patch, run the following command:
 
-To produce patches, run:
+               make coccicheck MODE=report
 
-       make coccicheck MODE=patch
+       To produce patches, run:
+
+               make coccicheck MODE=patch
 
 
 The coccicheck target applies every semantic patch available in the
@@ -91,6 +94,11 @@ To enable verbose messages set the V= variable, for example:
 
    make coccicheck MODE=report V=1
 
+By default, coccicheck tries to run as parallel as possible. To change
+the parallelism, set the J= variable. For example, to run across 4 CPUs:
+
+   make coccicheck MODE=report J=4
+
 
  Using Coccinelle with a single semantic patch
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -124,26 +132,33 @@ To check only newly edited code, use the value 2 for the C flag, i.e.
 
     make C=2 CHECK="scripts/coccicheck"
 
+In these modes, which works on a file basis, there is no information
+about semantic patches displayed, and no commit message proposed.
+
 This runs every semantic patch in scripts/coccinelle by default. The
 COCCI variable may additionally be used to only apply a single
 semantic patch as shown in the previous section.
 
-The "chain" mode is the default. You can select another one with the
+The "report" mode is the default. You can select another one with the
 MODE variable explained above.
 
-In this mode, there is no information about semantic patches
-displayed, and no commit message proposed.
-
  Additional flags
 ~~~~~~~~~~~~~~~~~~
 
 Additional flags can be passed to spatch through the SPFLAGS
 variable.
 
-    make SPFLAGS=--use_glimpse coccicheck
+    make SPFLAGS=--use-glimpse coccicheck
+    make SPFLAGS=--use-idutils coccicheck
 
 See spatch --help to learn more about spatch options.
 
+Note that the '--use-glimpse' and '--use-idutils' options
+require external tools for indexing the code. None of them is
+thus active by default. However, by indexing the code with
+one of these tools, and according to the cocci file used,
+spatch could proceed the entire code base more quickly.
+
  Proposing new semantic patches
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
new file mode 100644 (file)
index 0000000..e34c6cd
--- /dev/null
@@ -0,0 +1,70 @@
+* ARM System MMU Architecture Implementation
+
+ARM SoCs may contain an implementation of the ARM System Memory
+Management Unit Architecture, which can be used to provide 1 or 2 stages
+of address translation to bus masters external to the CPU.
+
+The SMMU may also raise interrupts in response to various fault
+conditions.
+
+** System MMU required properties:
+
+- compatible    : Should be one of:
+
+                        "arm,smmu-v1"
+                        "arm,smmu-v2"
+                        "arm,mmu-400"
+                        "arm,mmu-500"
+
+                  depending on the particular implementation and/or the
+                  version of the architecture implemented.
+
+- reg           : Base address and size of the SMMU.
+
+- #global-interrupts : The number of global interrupts exposed by the
+                       device.
+
+- interrupts    : Interrupt list, with the first #global-irqs entries
+                  corresponding to the global interrupts and any
+                  following entries corresponding to context interrupts,
+                  specified in order of their indexing by the SMMU.
+
+                  For SMMUv2 implementations, there must be exactly one
+                  interrupt per context bank. In the case of a single,
+                  combined interrupt, it must be listed multiple times.
+
+- mmu-masters   : A list of phandles to device nodes representing bus
+                  masters for which the SMMU can provide a translation
+                  and their corresponding StreamIDs (see example below).
+                  Each device node linked from this list must have a
+                  "#stream-id-cells" property, indicating the number of
+                  StreamIDs associated with it.
+
+** System MMU optional properties:
+
+- smmu-parent   : When multiple SMMUs are chained together, this
+                  property can be used to provide a phandle to the
+                  parent SMMU (that is the next SMMU on the path going
+                  from the mmu-masters towards memory) node for this
+                  SMMU.
+
+Example:
+
+        smmu {
+                compatible = "arm,smmu-v1";
+                reg = <0xba5e0000 0x10000>;
+                #global-interrupts = <2>;
+                interrupts = <0 32 4>,
+                             <0 33 4>,
+                             <0 34 4>, /* This is the first context interrupt */
+                             <0 35 4>,
+                             <0 36 4>,
+                             <0 37 4>;
+
+                /*
+                 * Two DMA controllers, the first with two StreamIDs (0xd01d
+                 * and 0xd01e) and the second with only one (0xd11c).
+                 */
+                mmu-masters = <&dma0 0xd01d 0xd01e>,
+                              <&dma1 0xd11c>;
+        };
index 213859e..e349f29 100644 (file)
@@ -174,6 +174,19 @@ Searching in menuconfig:
 
                /^hotplug
 
+       When searching, symbols are sorted thus:
+         - exact match first: an exact match is when the search matches
+           the complete symbol name;
+         - alphabetical order: when two symbols do not match exactly,
+           they are sorted in alphabetical order (in the user's current
+           locale).
+       For example: ^ATH.K matches:
+           ATH5K ATH9K ATH5K_AHB ATH5K_DEBUG [...] ATH6KL ATH6KL_DEBUG
+           [...] ATH9K_AHB ATH9K_BTCOEX_SUPPORT ATH9K_COMMON [...]
+       of which only ATH5K and ATH9K match exactly and so are sorted
+       first (and in alphabetical order), then come all other symbols,
+       sorted in alphabetical order.
+
 ______________________________________________________________________
 User interface options for 'menuconfig'
 
index c55533c..d7993dc 100644 (file)
@@ -172,12 +172,12 @@ group and can access them as follows:
        struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
 
        /* Create a new container */
-       container = open("/dev/vfio/vfio, O_RDWR);
+       container = open("/dev/vfio/vfio", O_RDWR);
 
        if (ioctl(container, VFIO_GET_API_VERSION) != VFIO_API_VERSION)
                /* Unknown API version */
 
-       if (!ioctl(container, VFIO_CHECK_EXTENSION, VFIO_X86_IOMMU))
+       if (!ioctl(container, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU))
                /* Doesn't support the IOMMU driver we want. */
 
        /* Open the group */
@@ -193,7 +193,7 @@ group and can access them as follows:
        ioctl(group, VFIO_GROUP_SET_CONTAINER, &container);
 
        /* Enable the IOMMU model we want */
-       ioctl(container, VFIO_SET_IOMMU, VFIO_X86_IOMMU)
+       ioctl(container, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU)
 
        /* Get addition IOMMU info */
        ioctl(container, VFIO_IOMMU_GET_INFO, &iommu_info);
diff --git a/Documentation/vm/zswap.txt b/Documentation/vm/zswap.txt
new file mode 100644 (file)
index 0000000..7e492d8
--- /dev/null
@@ -0,0 +1,68 @@
+Overview:
+
+Zswap is a lightweight compressed cache for swap pages. It takes pages that are
+in the process of being swapped out and attempts to compress them into a
+dynamically allocated RAM-based memory pool.  zswap basically trades CPU cycles
+for potentially reduced swap I/O.  This trade-off can also result in a
+significant performance improvement if reads from the compressed cache are
+faster than reads from a swap device.
+
+NOTE: Zswap is a new feature as of v3.11 and interacts heavily with memory
+reclaim.  This interaction has not be fully explored on the large set of
+potential configurations and workloads that exist.  For this reason, zswap
+is a work in progress and should be considered experimental.
+
+Some potential benefits:
+* Desktop/laptop users with limited RAM capacities can mitigate the
+    performance impact of swapping.
+* Overcommitted guests that share a common I/O resource can
+    dramatically reduce their swap I/O pressure, avoiding heavy handed I/O
+    throttling by the hypervisor. This allows more work to get done with less
+    impact to the guest workload and guests sharing the I/O subsystem
+* Users with SSDs as swap devices can extend the life of the device by
+    drastically reducing life-shortening writes.
+
+Zswap evicts pages from compressed cache on an LRU basis to the backing swap
+device when the compressed pool reaches it size limit.  This requirement had
+been identified in prior community discussions.
+
+To enabled zswap, the "enabled" attribute must be set to 1 at boot time.  e.g.
+zswap.enabled=1
+
+Design:
+
+Zswap receives pages for compression through the Frontswap API and is able to
+evict pages from its own compressed pool on an LRU basis and write them back to
+the backing swap device in the case that the compressed pool is full.
+
+Zswap makes use of zbud for the managing the compressed memory pool.  Each
+allocation in zbud is not directly accessible by address.  Rather, a handle is
+return by the allocation routine and that handle must be mapped before being
+accessed.  The compressed memory pool grows on demand and shrinks as compressed
+pages are freed.  The pool is not preallocated.
+
+When a swap page is passed from frontswap to zswap, zswap maintains a mapping
+of the swap entry, a combination of the swap type and swap offset, to the zbud
+handle that references that compressed swap page.  This mapping is achieved
+with a red-black tree per swap type.  The swap offset is the search key for the
+tree nodes.
+
+During a page fault on a PTE that is a swap entry, frontswap calls the zswap
+load function to decompress the page into the page allocated by the page fault
+handler.
+
+Once there are no PTEs referencing a swap page stored in zswap (i.e. the count
+in the swap_map goes to 0) the swap code calls the zswap invalidate function,
+via frontswap, to free the compressed entry.
+
+Zswap seeks to be simple in its policies.  Sysfs attributes allow for one user
+controlled policies:
+* max_pool_percent - The maximum percentage of memory that the compressed
+    pool can occupy.
+
+Zswap allows the compressor to be selected at kernel boot time by setting the
+“compressor” attribute.  The default compressor is lzo.  e.g.
+zswap.compressor=deflate
+
+A debugfs interface is provided for various statistic about pool size, number
+of pages stored, and various counters for the reasons pages are rejected.
index 37f9a71..cbd4f66 100644 (file)
@@ -1333,6 +1333,12 @@ S:       Supported
 F:     arch/arm/mach-zynq/
 F:     drivers/cpuidle/cpuidle-zynq.c
 
+ARM SMMU DRIVER
+M:     Will Deacon <will.deacon@arm.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     drivers/iommu/arm-smmu.c
+
 ARM64 PORT (AARCH64 ARCHITECTURE)
 M:     Catalin Marinas <catalin.marinas@arm.com>
 M:     Will Deacon <will.deacon@arm.com>
@@ -2129,9 +2135,12 @@ COCCINELLE/Semantic Patches (SmPL)
 M:     Julia Lawall <Julia.Lawall@lip6.fr>
 M:     Gilles Muller <Gilles.Muller@lip6.fr>
 M:     Nicolas Palix <nicolas.palix@imag.fr>
+M:     Michal Marek <mmarek@suse.cz>
 L:     cocci@systeme.lip6.fr (moderated for non-subscribers)
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc
 W:     http://coccinelle.lip6.fr/
 S:     Supported
+F:     Documentation/coccinelle.txt
 F:     scripts/coccinelle/
 F:     scripts/coccicheck
 
@@ -8884,6 +8893,7 @@ M:        "Michael S. Tsirkin" <mst@redhat.com>
 L:     virtualization@lists.linux-foundation.org
 S:     Maintained
 F:     drivers/virtio/
+F:     tools/virtio/
 F:     drivers/net/virtio_net.c
 F:     drivers/block/virtio_blk.c
 F:     include/linux/virtio_*.h
index 170ed7c..e631e6c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -981,7 +981,7 @@ _modinst_:
 # boot a modules.dep even before / is mounted read-write.  However the
 # boot script depmod is the master version.
 PHONY += _modinst_post
-_modinst_post: _modinst_
+_modinst_post: include/config/kernel.release _modinst_
        $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst
        $(call cmd,depmod)
 
@@ -1116,6 +1116,7 @@ help:
        @echo  '  gtags           - Generate GNU GLOBAL index'
        @echo  '  kernelrelease   - Output the release version string'
        @echo  '  kernelversion   - Output the version stored in Makefile'
+       @echo  '  image_name      - Output the image name'
        @echo  '  headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
         echo  '                    (default: $(INSTALL_HDR_PATH))'; \
         echo  ''
@@ -1310,7 +1311,7 @@ export_report:
 endif #ifeq ($(config-targets),1)
 endif #ifeq ($(mixed-targets),1)
 
-PHONY += checkstack kernelrelease kernelversion
+PHONY += checkstack kernelrelease kernelversion image_name
 
 # UML needs a little special treatment here.  It wants to use the host
 # toolchain, so needs $(SUBARCH) passed to checkstack.pl.  Everyone
@@ -1331,6 +1332,9 @@ kernelrelease:
 kernelversion:
        @echo $(KERNELVERSION)
 
+image_name:
+       @echo $(KBUILD_IMAGE)
+
 # Clear a bunch of variables before executing the submake
 tools/: FORCE
        $(Q)mkdir -p $(objtree)/tools
index 10062ce..0c63562 100644 (file)
@@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        if (mmap_is_legacy()) {
                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
                mm->get_unmapped_area = arch_get_unmapped_area;
-               mm->unmap_area = arch_unmap_area;
        } else {
                mm->mmap_base = mmap_base(random_factor);
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-               mm->unmap_area = arch_unmap_area_topdown;
        }
 }
 
index 7c7be78..8ed6cb1 100644 (file)
@@ -90,11 +90,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        if (mmap_is_legacy()) {
                mm->mmap_base = TASK_UNMAPPED_BASE;
                mm->get_unmapped_area = arch_get_unmapped_area;
-               mm->unmap_area = arch_unmap_area;
        } else {
                mm->mmap_base = mmap_base();
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-               mm->unmap_area = arch_unmap_area_topdown;
        }
 }
 EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
index 7e5fe27..f1baadd 100644 (file)
@@ -158,11 +158,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        if (mmap_is_legacy()) {
                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
                mm->get_unmapped_area = arch_get_unmapped_area;
-               mm->unmap_area = arch_unmap_area;
        } else {
                mm->mmap_base = mmap_base(random_factor);
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-               mm->unmap_area = arch_unmap_area_topdown;
        }
 }
 
index 67a42ed..cb8bdbe 100644 (file)
@@ -92,10 +92,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        if (mmap_is_legacy()) {
                mm->mmap_base = TASK_UNMAPPED_BASE;
                mm->get_unmapped_area = arch_get_unmapped_area;
-               mm->unmap_area = arch_unmap_area;
        } else {
                mm->mmap_base = mmap_base();
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-               mm->unmap_area = arch_unmap_area_topdown;
        }
 }
index 06bafec..4002329 100644 (file)
@@ -91,11 +91,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        if (mmap_is_legacy()) {
                mm->mmap_base = TASK_UNMAPPED_BASE;
                mm->get_unmapped_area = arch_get_unmapped_area;
-               mm->unmap_area = arch_unmap_area;
        } else {
                mm->mmap_base = mmap_base();
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-               mm->unmap_area = arch_unmap_area_topdown;
        }
 }
 
@@ -176,11 +174,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        if (mmap_is_legacy()) {
                mm->mmap_base = TASK_UNMAPPED_BASE;
                mm->get_unmapped_area = s390_get_unmapped_area;
-               mm->unmap_area = arch_unmap_area;
        } else {
                mm->mmap_base = mmap_base();
                mm->get_unmapped_area = s390_get_unmapped_area_topdown;
-               mm->unmap_area = arch_unmap_area_topdown;
        }
 }
 
index 2daaaa6..51561b8 100644 (file)
@@ -290,7 +290,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
            sysctl_legacy_va_layout) {
                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
                mm->get_unmapped_area = arch_get_unmapped_area;
-               mm->unmap_area = arch_unmap_area;
        } else {
                /* We know it's 32-bit */
                unsigned long task_size = STACK_TOP32;
@@ -302,7 +301,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 
                mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-               mm->unmap_area = arch_unmap_area_topdown;
        }
 }
 
index f96f4ce..d67d91e 100644 (file)
@@ -66,10 +66,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) {
                mm->mmap_base = TASK_UNMAPPED_BASE;
                mm->get_unmapped_area = arch_get_unmapped_area;
-               mm->unmap_area = arch_unmap_area;
        } else {
                mm->mmap_base = mmap_base(mm);
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-               mm->unmap_area = arch_unmap_area_topdown;
        }
 }
index 52ff81c..bae3aba 100644 (file)
@@ -308,8 +308,6 @@ static int load_aout_binary(struct linux_binprm *bprm)
                (current->mm->start_data = N_DATADDR(ex));
        current->mm->brk = ex.a_bss +
                (current->mm->start_brk = N_BSSADDR(ex));
-       current->mm->free_area_cache = TASK_UNMAPPED_BASE;
-       current->mm->cached_hole_size = 0;
 
        retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
        if (retval < 0) {
index 0db655e..639d128 100644 (file)
@@ -491,10 +491,8 @@ static struct perf_amd_iommu __perf_iommu = {
 static __init int amd_iommu_pc_init(void)
 {
        /* Make sure the IOMMU PC resource is available */
-       if (!amd_iommu_pc_supported()) {
-               pr_err("perf: amd_iommu PMU not installed. No support!\n");
+       if (!amd_iommu_pc_supported())
                return -ENODEV;
-       }
 
        _init_perf_amd_iommu(&__perf_iommu, "amd_iommu");
 
index 845df68..62c29a5 100644 (file)
@@ -115,10 +115,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        if (mmap_is_legacy()) {
                mm->mmap_base = mmap_legacy_base();
                mm->get_unmapped_area = arch_get_unmapped_area;
-               mm->unmap_area = arch_unmap_area;
        } else {
                mm->mmap_base = mmap_base();
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-               mm->unmap_area = arch_unmap_area_topdown;
        }
 }
index a4f5ce1..271b42b 100644 (file)
@@ -133,8 +133,8 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
        },
 };
 
-static u8 *edid_load(struct drm_connector *connector, char *name,
-                       char *connector_name)
+static u8 *edid_load(struct drm_connector *connector, const char *name,
+                       const char *connector_name)
 {
        const struct firmware *fw;
        struct platform_device *pdev;
@@ -242,7 +242,7 @@ out:
 
 int drm_load_edid_firmware(struct drm_connector *connector)
 {
-       char *connector_name = drm_get_connector_name(connector);
+       const char *connector_name = drm_get_connector_name(connector);
        char *edidname = edid_firmware, *last, *colon;
        int ret;
        struct edid *edid;
index 7e27d32..300daab 100644 (file)
@@ -173,18 +173,7 @@ static struct pci_driver delkin_cb_pci_driver = {
        .resume         = delkin_cb_resume,
 };
 
-static int __init delkin_cb_init(void)
-{
-       return pci_register_driver(&delkin_cb_pci_driver);
-}
-
-static void __exit delkin_cb_exit(void)
-{
-       pci_unregister_driver(&delkin_cb_pci_driver);
-}
-
-module_init(delkin_cb_init);
-module_exit(delkin_cb_exit);
+module_pci_driver(delkin_cb_pci_driver);
 
 MODULE_AUTHOR("Mark Lord");
 MODULE_DESCRIPTION("Basic support for Delkin/ASKA/Workbit Cardbus IDE");
index 51beb85..0a8440a 100644 (file)
@@ -183,20 +183,7 @@ static struct platform_driver amiga_gayle_ide_driver = {
        },
 };
 
-static int __init amiga_gayle_ide_init(void)
-{
-       return platform_driver_probe(&amiga_gayle_ide_driver,
-                                    amiga_gayle_ide_probe);
-}
-
-module_init(amiga_gayle_ide_init);
-
-static void __exit amiga_gayle_ide_exit(void)
-{
-       platform_driver_unregister(&amiga_gayle_ide_driver);
-}
-
-module_exit(amiga_gayle_ide_exit);
+module_platform_driver_probe(amiga_gayle_ide_driver, amiga_gayle_ide_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:amiga-gayle-ide");
index 729428e..dabb88b 100644 (file)
@@ -239,9 +239,6 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
                unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs);
                int page_is_high;
 
-               if (nr_bytes > PAGE_SIZE)
-                       nr_bytes = PAGE_SIZE;
-
                page = sg_page(cursg);
                offset = cursg->offset + cmd->cursg_ofs;
 
@@ -249,6 +246,8 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
                page = nth_page(page, (offset >> PAGE_SHIFT));
                offset %= PAGE_SIZE;
 
+               nr_bytes = min_t(unsigned, nr_bytes, (PAGE_SIZE - offset));
+
                page_is_high = PageHighMem(page);
                if (page_is_high)
                        local_irq_save(flags);
index 91d49dd..ede8575 100644 (file)
@@ -203,18 +203,7 @@ static struct platform_driver tx4938ide_driver = {
        .remove = __exit_p(tx4938ide_remove),
 };
 
-static int __init tx4938ide_init(void)
-{
-       return platform_driver_probe(&tx4938ide_driver, tx4938ide_probe);
-}
-
-static void __exit tx4938ide_exit(void)
-{
-       platform_driver_unregister(&tx4938ide_driver);
-}
-
-module_init(tx4938ide_init);
-module_exit(tx4938ide_exit);
+module_platform_driver_probe(tx4938ide_driver, tx4938ide_probe);
 
 MODULE_DESCRIPTION("TX4938 internal IDE driver");
 MODULE_LICENSE("GPL");
index c0ab800..4ecdee5 100644 (file)
@@ -624,18 +624,7 @@ static struct platform_driver tx4939ide_driver = {
        .resume = tx4939ide_resume,
 };
 
-static int __init tx4939ide_init(void)
-{
-       return platform_driver_probe(&tx4939ide_driver, tx4939ide_probe);
-}
-
-static void __exit tx4939ide_exit(void)
-{
-       platform_driver_unregister(&tx4939ide_driver);
-}
-
-module_init(tx4939ide_init);
-module_exit(tx4939ide_exit);
+module_platform_driver_probe(tx4939ide_driver, tx4939ide_probe);
 
 MODULE_DESCRIPTION("TX4939 internal IDE driver");
 MODULE_LICENSE("GPL");
index 01730b2..820d85c 100644 (file)
@@ -269,4 +269,17 @@ config SPAPR_TCE_IOMMU
          Enables bits of IOMMU API required by VFIO. The iommu_ops
          is not implemented as it is not necessary for VFIO.
 
+config ARM_SMMU
+       bool "ARM Ltd. System MMU (SMMU) Support"
+       depends on ARM64 || (ARM_LPAE && OF)
+       select IOMMU_API
+       select ARM_DMA_USE_IOMMU if ARM
+       help
+         Support for implementations of the ARM System MMU architecture
+         versions 1 and 2. The driver supports both v7l and v8l table
+         formats with 4k and 64k page sizes.
+
+         Say Y here if your SoC includes an IOMMU device implementing
+         the ARM SMMU architecture.
+
 endif # IOMMU_SUPPORT
index ef0e520..bbe7041 100644 (file)
@@ -3,6 +3,7 @@ obj-$(CONFIG_OF_IOMMU)  += of_iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
 obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
index 21d02b0..6dc6594 100644 (file)
@@ -287,14 +287,27 @@ static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
 
        /*
         * If it's a multifunction device that does not support our
-        * required ACS flags, add to the same group as function 0.
+        * required ACS flags, add to the same group as lowest numbered
+        * function that also does not suport the required ACS flags.
         */
        if (dma_pdev->multifunction &&
-           !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
-               swap_pci_ref(&dma_pdev,
-                            pci_get_slot(dma_pdev->bus,
-                                         PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
-                                         0)));
+           !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
+               u8 i, slot = PCI_SLOT(dma_pdev->devfn);
+
+               for (i = 0; i < 8; i++) {
+                       struct pci_dev *tmp;
+
+                       tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
+                       if (!tmp)
+                               continue;
+
+                       if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
+                               swap_pci_ref(&dma_pdev, tmp);
+                               break;
+                       }
+                       pci_dev_put(tmp);
+               }
+       }
 
        /*
         * Devices on the root bus go through the iommu.  If that's not us,
@@ -1484,6 +1497,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
 
                        /* Large PTE found which maps this address */
                        unmap_size = PTE_PAGE_SIZE(*pte);
+
+                       /* Only unmap from the first pte in the page */
+                       if ((unmap_size - 1) & bus_addr)
+                               break;
                        count      = PAGE_SIZE_PTE_COUNT(unmap_size);
                        for (i = 0; i < count; i++)
                                pte[i] = 0ULL;
@@ -1493,7 +1510,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
                unmapped += unmap_size;
        }
 
-       BUG_ON(!is_power_of_2(unmapped));
+       BUG_ON(unmapped && !is_power_of_2(unmapped));
 
        return unmapped;
 }
@@ -1893,34 +1910,59 @@ static void domain_id_free(int id)
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 }
 
+#define DEFINE_FREE_PT_FN(LVL, FN)                             \
+static void free_pt_##LVL (unsigned long __pt)                 \
+{                                                              \
+       unsigned long p;                                        \
+       u64 *pt;                                                \
+       int i;                                                  \
+                                                               \
+       pt = (u64 *)__pt;                                       \
+                                                               \
+       for (i = 0; i < 512; ++i) {                             \
+               if (!IOMMU_PTE_PRESENT(pt[i]))                  \
+                       continue;                               \
+                                                               \
+               p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);       \
+               FN(p);                                          \
+       }                                                       \
+       free_page((unsigned long)pt);                           \
+}
+
+DEFINE_FREE_PT_FN(l2, free_page)
+DEFINE_FREE_PT_FN(l3, free_pt_l2)
+DEFINE_FREE_PT_FN(l4, free_pt_l3)
+DEFINE_FREE_PT_FN(l5, free_pt_l4)
+DEFINE_FREE_PT_FN(l6, free_pt_l5)
+
 static void free_pagetable(struct protection_domain *domain)
 {
-       int i, j;
-       u64 *p1, *p2, *p3;
-
-       p1 = domain->pt_root;
-
-       if (!p1)
-               return;
-
-       for (i = 0; i < 512; ++i) {
-               if (!IOMMU_PTE_PRESENT(p1[i]))
-                       continue;
-
-               p2 = IOMMU_PTE_PAGE(p1[i]);
-               for (j = 0; j < 512; ++j) {
-                       if (!IOMMU_PTE_PRESENT(p2[j]))
-                               continue;
-                       p3 = IOMMU_PTE_PAGE(p2[j]);
-                       free_page((unsigned long)p3);
-               }
+       unsigned long root = (unsigned long)domain->pt_root;
 
-               free_page((unsigned long)p2);
+       switch (domain->mode) {
+       case PAGE_MODE_NONE:
+               break;
+       case PAGE_MODE_1_LEVEL:
+               free_page(root);
+               break;
+       case PAGE_MODE_2_LEVEL:
+               free_pt_l2(root);
+               break;
+       case PAGE_MODE_3_LEVEL:
+               free_pt_l3(root);
+               break;
+       case PAGE_MODE_4_LEVEL:
+               free_pt_l4(root);
+               break;
+       case PAGE_MODE_5_LEVEL:
+               free_pt_l5(root);
+               break;
+       case PAGE_MODE_6_LEVEL:
+               free_pt_l6(root);
+               break;
+       default:
+               BUG();
        }
-
-       free_page((unsigned long)p1);
-
-       domain->pt_root = NULL;
 }
 
 static void free_gcr3_tbl_level1(u64 *tbl)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
new file mode 100644 (file)
index 0000000..ebd0a4c
--- /dev/null
@@ -0,0 +1,1969 @@
+/*
+ * IOMMU API for ARM architected SMMU implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2013 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This driver currently supports:
+ *     - SMMUv1 and v2 implementations
+ *     - Stream-matching and stream-indexing
+ *     - v7/v8 long-descriptor format
+ *     - Non-secure access to the SMMU
+ *     - 4k and 64k pages, with contiguous pte hints.
+ *     - Up to 39-bit addressing
+ *     - Context fault reporting
+ */
+
+#define pr_fmt(fmt) "arm-smmu: " fmt
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/amba/bus.h>
+
+#include <asm/pgalloc.h>
+
+/* Maximum number of stream IDs assigned to a single device */
+#define MAX_MASTER_STREAMIDS           8
+
+/* Maximum number of context banks per SMMU */
+#define ARM_SMMU_MAX_CBS               128
+
+/* Maximum number of mapping groups per SMMU */
+#define ARM_SMMU_MAX_SMRS              128
+
+/* Number of VMIDs per SMMU */
+#define ARM_SMMU_NUM_VMIDS             256
+
+/* SMMU global address space */
+#define ARM_SMMU_GR0(smmu)             ((smmu)->base)
+#define ARM_SMMU_GR1(smmu)             ((smmu)->base + (smmu)->pagesize)
+
+/* Page table bits */
+#define ARM_SMMU_PTE_PAGE              (((pteval_t)3) << 0)
+#define ARM_SMMU_PTE_CONT              (((pteval_t)1) << 52)
+#define ARM_SMMU_PTE_AF                        (((pteval_t)1) << 10)
+#define ARM_SMMU_PTE_SH_NS             (((pteval_t)0) << 8)
+#define ARM_SMMU_PTE_SH_OS             (((pteval_t)2) << 8)
+#define ARM_SMMU_PTE_SH_IS             (((pteval_t)3) << 8)
+
+#if PAGE_SIZE == SZ_4K
+#define ARM_SMMU_PTE_CONT_ENTRIES      16
+#elif PAGE_SIZE == SZ_64K
+#define ARM_SMMU_PTE_CONT_ENTRIES      32
+#else
+#define ARM_SMMU_PTE_CONT_ENTRIES      1
+#endif
+
+#define ARM_SMMU_PTE_CONT_SIZE         (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
+#define ARM_SMMU_PTE_CONT_MASK         (~(ARM_SMMU_PTE_CONT_SIZE - 1))
+#define ARM_SMMU_PTE_HWTABLE_SIZE      (PTRS_PER_PTE * sizeof(pte_t))
+
+/* Stage-1 PTE */
+#define ARM_SMMU_PTE_AP_UNPRIV         (((pteval_t)1) << 6)
+#define ARM_SMMU_PTE_AP_RDONLY         (((pteval_t)2) << 6)
+#define ARM_SMMU_PTE_ATTRINDX_SHIFT    2
+
+/* Stage-2 PTE */
+#define ARM_SMMU_PTE_HAP_FAULT         (((pteval_t)0) << 6)
+#define ARM_SMMU_PTE_HAP_READ          (((pteval_t)1) << 6)
+#define ARM_SMMU_PTE_HAP_WRITE         (((pteval_t)2) << 6)
+#define ARM_SMMU_PTE_MEMATTR_OIWB      (((pteval_t)0xf) << 2)
+#define ARM_SMMU_PTE_MEMATTR_NC                (((pteval_t)0x5) << 2)
+#define ARM_SMMU_PTE_MEMATTR_DEV       (((pteval_t)0x1) << 2)
+
+/* Configuration registers */
+#define ARM_SMMU_GR0_sCR0              0x0
+#define sCR0_CLIENTPD                  (1 << 0)
+#define sCR0_GFRE                      (1 << 1)
+#define sCR0_GFIE                      (1 << 2)
+#define sCR0_GCFGFRE                   (1 << 4)
+#define sCR0_GCFGFIE                   (1 << 5)
+#define sCR0_USFCFG                    (1 << 10)
+#define sCR0_VMIDPNE                   (1 << 11)
+#define sCR0_PTM                       (1 << 12)
+#define sCR0_FB                                (1 << 13)
+#define sCR0_BSU_SHIFT                 14
+#define sCR0_BSU_MASK                  0x3
+
+/* Identification registers */
+#define ARM_SMMU_GR0_ID0               0x20
+#define ARM_SMMU_GR0_ID1               0x24
+#define ARM_SMMU_GR0_ID2               0x28
+#define ARM_SMMU_GR0_ID3               0x2c
+#define ARM_SMMU_GR0_ID4               0x30
+#define ARM_SMMU_GR0_ID5               0x34
+#define ARM_SMMU_GR0_ID6               0x38
+#define ARM_SMMU_GR0_ID7               0x3c
+#define ARM_SMMU_GR0_sGFSR             0x48
+#define ARM_SMMU_GR0_sGFSYNR0          0x50
+#define ARM_SMMU_GR0_sGFSYNR1          0x54
+#define ARM_SMMU_GR0_sGFSYNR2          0x58
+#define ARM_SMMU_GR0_PIDR0             0xfe0
+#define ARM_SMMU_GR0_PIDR1             0xfe4
+#define ARM_SMMU_GR0_PIDR2             0xfe8
+
+#define ID0_S1TS                       (1 << 30)
+#define ID0_S2TS                       (1 << 29)
+#define ID0_NTS                                (1 << 28)
+#define ID0_SMS                                (1 << 27)
+#define ID0_PTFS_SHIFT                 24
+#define ID0_PTFS_MASK                  0x2
+#define ID0_PTFS_V8_ONLY               0x2
+#define ID0_CTTW                       (1 << 14)
+#define ID0_NUMIRPT_SHIFT              16
+#define ID0_NUMIRPT_MASK               0xff
+#define ID0_NUMSMRG_SHIFT              0
+#define ID0_NUMSMRG_MASK               0xff
+
+#define ID1_PAGESIZE                   (1 << 31)
+#define ID1_NUMPAGENDXB_SHIFT          28
+#define ID1_NUMPAGENDXB_MASK           7
+#define ID1_NUMS2CB_SHIFT              16
+#define ID1_NUMS2CB_MASK               0xff
+#define ID1_NUMCB_SHIFT                        0
+#define ID1_NUMCB_MASK                 0xff
+
+#define ID2_OAS_SHIFT                  4
+#define ID2_OAS_MASK                   0xf
+#define ID2_IAS_SHIFT                  0
+#define ID2_IAS_MASK                   0xf
+#define ID2_UBS_SHIFT                  8
+#define ID2_UBS_MASK                   0xf
+#define ID2_PTFS_4K                    (1 << 12)
+#define ID2_PTFS_16K                   (1 << 13)
+#define ID2_PTFS_64K                   (1 << 14)
+
+#define PIDR2_ARCH_SHIFT               4
+#define PIDR2_ARCH_MASK                        0xf
+
+/* Global TLB invalidation */
+#define ARM_SMMU_GR0_STLBIALL          0x60
+#define ARM_SMMU_GR0_TLBIVMID          0x64
+#define ARM_SMMU_GR0_TLBIALLNSNH       0x68
+#define ARM_SMMU_GR0_TLBIALLH          0x6c
+#define ARM_SMMU_GR0_sTLBGSYNC         0x70
+#define ARM_SMMU_GR0_sTLBGSTATUS       0x74
+#define sTLBGSTATUS_GSACTIVE           (1 << 0)
+#define TLB_LOOP_TIMEOUT               1000000 /* 1s! */
+
+/* Stream mapping registers */
+#define ARM_SMMU_GR0_SMR(n)            (0x800 + ((n) << 2))
+#define SMR_VALID                      (1 << 31)
+#define SMR_MASK_SHIFT                 16
+#define SMR_MASK_MASK                  0x7fff
+#define SMR_ID_SHIFT                   0
+#define SMR_ID_MASK                    0x7fff
+
+#define ARM_SMMU_GR0_S2CR(n)           (0xc00 + ((n) << 2))
+#define S2CR_CBNDX_SHIFT               0
+#define S2CR_CBNDX_MASK                        0xff
+#define S2CR_TYPE_SHIFT                        16
+#define S2CR_TYPE_MASK                 0x3
+#define S2CR_TYPE_TRANS                        (0 << S2CR_TYPE_SHIFT)
+#define S2CR_TYPE_BYPASS               (1 << S2CR_TYPE_SHIFT)
+#define S2CR_TYPE_FAULT                        (2 << S2CR_TYPE_SHIFT)
+
+/* Context bank attribute registers */
+#define ARM_SMMU_GR1_CBAR(n)           (0x0 + ((n) << 2))
+#define CBAR_VMID_SHIFT                        0
+#define CBAR_VMID_MASK                 0xff
+#define CBAR_S1_MEMATTR_SHIFT          12
+#define CBAR_S1_MEMATTR_MASK           0xf
+#define CBAR_S1_MEMATTR_WB             0xf
+#define CBAR_TYPE_SHIFT                        16
+#define CBAR_TYPE_MASK                 0x3
+#define CBAR_TYPE_S2_TRANS             (0 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_BYPASS   (1 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_FAULT    (2 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_TRANS    (3 << CBAR_TYPE_SHIFT)
+#define CBAR_IRPTNDX_SHIFT             24
+#define CBAR_IRPTNDX_MASK              0xff
+
+#define ARM_SMMU_GR1_CBA2R(n)          (0x800 + ((n) << 2))
+#define CBA2R_RW64_32BIT               (0 << 0)
+#define CBA2R_RW64_64BIT               (1 << 0)
+
+/* Translation context bank */
+#define ARM_SMMU_CB_BASE(smmu)         ((smmu)->base + ((smmu)->size >> 1))
+#define ARM_SMMU_CB(smmu, n)           ((n) * (smmu)->pagesize)
+
+#define ARM_SMMU_CB_SCTLR              0x0
+#define ARM_SMMU_CB_RESUME             0x8
+#define ARM_SMMU_CB_TTBCR2             0x10
+#define ARM_SMMU_CB_TTBR0_LO           0x20
+#define ARM_SMMU_CB_TTBR0_HI           0x24
+#define ARM_SMMU_CB_TTBCR              0x30
+#define ARM_SMMU_CB_S1_MAIR0           0x38
+#define ARM_SMMU_CB_FSR                        0x58
+#define ARM_SMMU_CB_FAR_LO             0x60
+#define ARM_SMMU_CB_FAR_HI             0x64
+#define ARM_SMMU_CB_FSYNR0             0x68
+
+#define SCTLR_S1_ASIDPNE               (1 << 12)
+#define SCTLR_CFCFG                    (1 << 7)
+#define SCTLR_CFIE                     (1 << 6)
+#define SCTLR_CFRE                     (1 << 5)
+#define SCTLR_E                                (1 << 4)
+#define SCTLR_AFE                      (1 << 2)
+#define SCTLR_TRE                      (1 << 1)
+#define SCTLR_M                                (1 << 0)
+#define SCTLR_EAE_SBOP                 (SCTLR_AFE | SCTLR_TRE)
+
+#define RESUME_RETRY                   (0 << 0)
+#define RESUME_TERMINATE               (1 << 0)
+
+#define TTBCR_EAE                      (1 << 31)
+
+#define TTBCR_PASIZE_SHIFT             16
+#define TTBCR_PASIZE_MASK              0x7
+
+#define TTBCR_TG0_4K                   (0 << 14)
+#define TTBCR_TG0_64K                  (1 << 14)
+
+#define TTBCR_SH0_SHIFT                        12
+#define TTBCR_SH0_MASK                 0x3
+#define TTBCR_SH_NS                    0
+#define TTBCR_SH_OS                    2
+#define TTBCR_SH_IS                    3
+
+#define TTBCR_ORGN0_SHIFT              10
+#define TTBCR_IRGN0_SHIFT              8
+#define TTBCR_RGN_MASK                 0x3
+#define TTBCR_RGN_NC                   0
+#define TTBCR_RGN_WBWA                 1
+#define TTBCR_RGN_WT                   2
+#define TTBCR_RGN_WB                   3
+
+#define TTBCR_SL0_SHIFT                        6
+#define TTBCR_SL0_MASK                 0x3
+#define TTBCR_SL0_LVL_2                        0
+#define TTBCR_SL0_LVL_1                        1
+
+#define TTBCR_T1SZ_SHIFT               16
+#define TTBCR_T0SZ_SHIFT               0
+#define TTBCR_SZ_MASK                  0xf
+
+#define TTBCR2_SEP_SHIFT               15
+#define TTBCR2_SEP_MASK                        0x7
+
+#define TTBCR2_PASIZE_SHIFT            0
+#define TTBCR2_PASIZE_MASK             0x7
+
+/* Common definitions for PASize and SEP fields */
+#define TTBCR2_ADDR_32                 0
+#define TTBCR2_ADDR_36                 1
+#define TTBCR2_ADDR_40                 2
+#define TTBCR2_ADDR_42                 3
+#define TTBCR2_ADDR_44                 4
+#define TTBCR2_ADDR_48                 5
+
+#define MAIR_ATTR_SHIFT(n)             ((n) << 3)
+#define MAIR_ATTR_MASK                 0xff
+#define MAIR_ATTR_DEVICE               0x04
+#define MAIR_ATTR_NC                   0x44
+#define MAIR_ATTR_WBRWA                        0xff
+#define MAIR_ATTR_IDX_NC               0
+#define MAIR_ATTR_IDX_CACHE            1
+#define MAIR_ATTR_IDX_DEV              2
+
+#define FSR_MULTI                      (1 << 31)
+#define FSR_SS                         (1 << 30)
+#define FSR_UUT                                (1 << 8)
+#define FSR_ASF                                (1 << 7)
+#define FSR_TLBLKF                     (1 << 6)
+#define FSR_TLBMCF                     (1 << 5)
+#define FSR_EF                         (1 << 4)
+#define FSR_PF                         (1 << 3)
+#define FSR_AFF                                (1 << 2)
+#define FSR_TF                         (1 << 1)
+
+#define FSR_IGN                                (FSR_AFF | FSR_ASF | FSR_TLBMCF |       \
+                                        FSR_TLBLKF)
+#define FSR_FAULT                      (FSR_MULTI | FSR_SS | FSR_UUT |         \
+                                        FSR_EF | FSR_PF | FSR_TF)
+
+#define FSYNR0_WNR                     (1 << 4)
+
+struct arm_smmu_smr {
+       u8                              idx;
+       u16                             mask;
+       u16                             id;
+};
+
+struct arm_smmu_master {
+       struct device_node              *of_node;
+
+       /*
+        * The following is specific to the master's position in the
+        * SMMU chain.
+        */
+       struct rb_node                  node;
+       int                             num_streamids;
+       u16                             streamids[MAX_MASTER_STREAMIDS];
+
+       /*
+        * We only need to allocate these on the root SMMU, as we
+        * configure unmatched streams to bypass translation.
+        */
+       struct arm_smmu_smr             *smrs;
+};
+
+struct arm_smmu_device {
+       struct device                   *dev;
+       struct device_node              *parent_of_node;
+
+       void __iomem                    *base;
+       unsigned long                   size;
+       unsigned long                   pagesize;
+
+#define ARM_SMMU_FEAT_COHERENT_WALK    (1 << 0)
+#define ARM_SMMU_FEAT_STREAM_MATCH     (1 << 1)
+#define ARM_SMMU_FEAT_TRANS_S1         (1 << 2)
+#define ARM_SMMU_FEAT_TRANS_S2         (1 << 3)
+#define ARM_SMMU_FEAT_TRANS_NESTED     (1 << 4)
+       u32                             features;
+       int                             version;
+
+       u32                             num_context_banks;
+       u32                             num_s2_context_banks;
+       DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
+       atomic_t                        irptndx;
+
+       u32                             num_mapping_groups;
+       DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
+
+       unsigned long                   input_size;
+       unsigned long                   s1_output_size;
+       unsigned long                   s2_output_size;
+
+       u32                             num_global_irqs;
+       u32                             num_context_irqs;
+       unsigned int                    *irqs;
+
+       DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS);
+
+       struct list_head                list;
+       struct rb_root                  masters;
+};
+
+struct arm_smmu_cfg {
+       struct arm_smmu_device          *smmu;
+       u8                              vmid;
+       u8                              cbndx;
+       u8                              irptndx;
+       u32                             cbar;
+       pgd_t                           *pgd;
+};
+
+struct arm_smmu_domain {
+       /*
+        * A domain can span across multiple, chained SMMUs and requires
+        * all devices within the domain to follow the same translation
+        * path.
+        */
+       struct arm_smmu_device          *leaf_smmu;
+       struct arm_smmu_cfg             root_cfg;
+       phys_addr_t                     output_mask;
+
+       spinlock_t                      lock;
+};
+
+static DEFINE_SPINLOCK(arm_smmu_devices_lock);
+static LIST_HEAD(arm_smmu_devices);
+
+static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
+                                               struct device_node *dev_node)
+{
+       struct rb_node *node = smmu->masters.rb_node;
+
+       while (node) {
+               struct arm_smmu_master *master;
+               master = container_of(node, struct arm_smmu_master, node);
+
+               if (dev_node < master->of_node)
+                       node = node->rb_left;
+               else if (dev_node > master->of_node)
+                       node = node->rb_right;
+               else
+                       return master;
+       }
+
+       return NULL;
+}
+
+static int insert_smmu_master(struct arm_smmu_device *smmu,
+                             struct arm_smmu_master *master)
+{
+       struct rb_node **new, *parent;
+
+       new = &smmu->masters.rb_node;
+       parent = NULL;
+       while (*new) {
+               struct arm_smmu_master *this;
+               this = container_of(*new, struct arm_smmu_master, node);
+
+               parent = *new;
+               if (master->of_node < this->of_node)
+                       new = &((*new)->rb_left);
+               else if (master->of_node > this->of_node)
+                       new = &((*new)->rb_right);
+               else
+                       return -EEXIST;
+       }
+
+       rb_link_node(&master->node, parent, new);
+       rb_insert_color(&master->node, &smmu->masters);
+       return 0;
+}
+
+static int register_smmu_master(struct arm_smmu_device *smmu,
+                               struct device *dev,
+                               struct of_phandle_args *masterspec)
+{
+       int i;
+       struct arm_smmu_master *master;
+
+       master = find_smmu_master(smmu, masterspec->np);
+       if (master) {
+               dev_err(dev,
+                       "rejecting multiple registrations for master device %s\n",
+                       masterspec->np->name);
+               return -EBUSY;
+       }
+
+       if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
+               dev_err(dev,
+                       "reached maximum number (%d) of stream IDs for master device %s\n",
+                       MAX_MASTER_STREAMIDS, masterspec->np->name);
+               return -ENOSPC;
+       }
+
+       master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
+       if (!master)
+               return -ENOMEM;
+
+       master->of_node         = masterspec->np;
+       master->num_streamids   = masterspec->args_count;
+
+       for (i = 0; i < master->num_streamids; ++i)
+               master->streamids[i] = masterspec->args[i];
+
+       return insert_smmu_master(smmu, master);
+}
+
+static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu)
+{
+       struct arm_smmu_device *parent;
+
+       if (!smmu->parent_of_node)
+               return NULL;
+
+       spin_lock(&arm_smmu_devices_lock);
+       list_for_each_entry(parent, &arm_smmu_devices, list)
+               if (parent->dev->of_node == smmu->parent_of_node)
+                       goto out_unlock;
+
+       parent = NULL;
+       dev_warn(smmu->dev,
+                "Failed to find SMMU parent despite parent in DT\n");
+out_unlock:
+       spin_unlock(&arm_smmu_devices_lock);
+       return parent;
+}
+
+static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
+{
+       int idx;
+
+       do {
+               idx = find_next_zero_bit(map, end, start);
+               if (idx == end)
+                       return -ENOSPC;
+       } while (test_and_set_bit(idx, map));
+
+       return idx;
+}
+
+static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
+{
+       clear_bit(idx, map);
+}
+
+/* Wait for any pending TLB invalidations to complete */
+static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+{
+       int count = 0;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
+       while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
+              & sTLBGSTATUS_GSACTIVE) {
+               cpu_relax();
+               if (++count == TLB_LOOP_TIMEOUT) {
+                       dev_err_ratelimited(smmu->dev,
+                       "TLB sync timed out -- SMMU may be deadlocked\n");
+                       return;
+               }
+               udelay(1);
+       }
+}
+
+static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
+{
+       int flags, ret;
+       u32 fsr, far, fsynr, resume;
+       unsigned long iova;
+       struct iommu_domain *domain = dev;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+       void __iomem *cb_base;
+
+       cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
+       fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+
+       if (!(fsr & FSR_FAULT))
+               return IRQ_NONE;
+
+       if (fsr & FSR_IGN)
+               dev_err_ratelimited(smmu->dev,
+                                   "Unexpected context fault (fsr 0x%u)\n",
+                                   fsr);
+
+       fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+       flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+       far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
+       iova = far;
+#ifdef CONFIG_64BIT
+       far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
+       iova |= ((unsigned long)far << 32);
+#endif
+
+       if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
+               ret = IRQ_HANDLED;
+               resume = RESUME_RETRY;
+       } else {
+               ret = IRQ_NONE;
+               resume = RESUME_TERMINATE;
+       }
+
+       /* Clear the faulting FSR */
+       writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+
+       /* Retry or terminate any stalled transactions */
+       if (fsr & FSR_SS)
+               writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+
+       return ret;
+}
+
+static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
+{
+       u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
+       struct arm_smmu_device *smmu = dev;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+       gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
+       gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
+       gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+
+       dev_err_ratelimited(smmu->dev,
+               "Unexpected global fault, this could be serious\n");
+       dev_err_ratelimited(smmu->dev,
+               "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+               gfsr, gfsynr0, gfsynr1, gfsynr2);
+
+       writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+       return IRQ_NONE;
+}
+
+static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
+{
+       u32 reg;
+       bool stage1;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+       void __iomem *cb_base, *gr0_base, *gr1_base;
+
+       gr0_base = ARM_SMMU_GR0(smmu);
+       gr1_base = ARM_SMMU_GR1(smmu);
+       stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS;
+       cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
+
+       /* CBAR */
+       reg = root_cfg->cbar |
+             (root_cfg->vmid << CBAR_VMID_SHIFT);
+       if (smmu->version == 1)
+             reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
+
+       /* Use the weakest memory type, so it is overridden by the pte */
+       if (stage1)
+               reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+       writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
+
+       if (smmu->version > 1) {
+               /* CBA2R */
+#ifdef CONFIG_64BIT
+               reg = CBA2R_RW64_64BIT;
+#else
+               reg = CBA2R_RW64_32BIT;
+#endif
+               writel_relaxed(reg,
+                              gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx));
+
+               /* TTBCR2 */
+               switch (smmu->input_size) {
+               case 32:
+                       reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
+                       break;
+               case 36:
+                       reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
+                       break;
+               case 39:
+                       reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
+                       break;
+               case 42:
+                       reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
+                       break;
+               case 44:
+                       reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
+                       break;
+               case 48:
+                       reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
+                       break;
+               }
+
+               switch (smmu->s1_output_size) {
+               case 32:
+                       reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               case 36:
+                       reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               case 39:
+                       reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               case 42:
+                       reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               case 44:
+                       reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               case 48:
+                       reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               }
+
+               if (stage1)
+                       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+       }
+
+       /* TTBR0 */
+       reg = __pa(root_cfg->pgd);
+#ifndef __BIG_ENDIAN
+       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+       reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
+       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+#else
+       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+       reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
+       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+#endif
+
+       /*
+        * TTBCR
+        * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
+        */
+       if (smmu->version > 1) {
+               if (PAGE_SIZE == SZ_4K)
+                       reg = TTBCR_TG0_4K;
+               else
+                       reg = TTBCR_TG0_64K;
+
+               if (!stage1) {
+                       switch (smmu->s2_output_size) {
+                       case 32:
+                               reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       case 36:
+                               reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       case 40:
+                               reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       case 42:
+                               reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       case 44:
+                               reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       case 48:
+                               reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       }
+               } else {
+                       reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT;
+               }
+       } else {
+               reg = 0;
+       }
+
+       reg |= TTBCR_EAE |
+             (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
+             (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
+             (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
+             (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+
+       /* MAIR0 (stage-1 only) */
+       if (stage1) {
+               reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) |
+                     (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
+                     (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
+               writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
+       }
+
+       /* Nuke the TLB */
+       writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
+       arm_smmu_tlb_sync(smmu);
+
+       /* SCTLR */
+       reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+       if (stage1)
+               reg |= SCTLR_S1_ASIDPNE;
+#ifdef __BIG_ENDIAN
+       reg |= SCTLR_E;
+#endif
+       writel(reg, cb_base + ARM_SMMU_CB_SCTLR);
+}
+
+static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+                                       struct device *dev)
+{
+       int irq, ret, start;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu, *parent;
+
+       /*
+        * Walk the SMMU chain to find the root device for this chain.
+        * We assume that no masters have translations which terminate
+        * early, and therefore check that the root SMMU does indeed have
+        * a StreamID for the master in question.
+        */
+       parent = dev->archdata.iommu;
+       smmu_domain->output_mask = -1;
+       do {
+               smmu = parent;
+               smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1;
+       } while ((parent = find_parent_smmu(smmu)));
+
+       if (!find_smmu_master(smmu, dev->of_node)) {
+               dev_err(dev, "unable to find root SMMU for device\n");
+               return -ENODEV;
+       }
+
+       ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS);
+       if (IS_ERR_VALUE(ret))
+               return ret;
+
+       root_cfg->vmid = ret;
+       if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
+               /*
+                * We will likely want to change this if/when KVM gets
+                * involved.
+                */
+               root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+               start = smmu->num_s2_context_banks;
+       } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) {
+               root_cfg->cbar = CBAR_TYPE_S2_TRANS;
+               start = 0;
+       } else {
+               root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+               start = smmu->num_s2_context_banks;
+       }
+
+       ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
+                                     smmu->num_context_banks);
+       if (IS_ERR_VALUE(ret))
+               goto out_free_vmid;
+
+       root_cfg->cbndx = ret;
+
+       if (smmu->version == 1) {
+               root_cfg->irptndx = atomic_inc_return(&smmu->irptndx);
+               root_cfg->irptndx %= smmu->num_context_irqs;
+       } else {
+               root_cfg->irptndx = root_cfg->cbndx;
+       }
+
+       irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
+       ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
+                         "arm-smmu-context-fault", domain);
+       if (IS_ERR_VALUE(ret)) {
+               dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
+                       root_cfg->irptndx, irq);
+               root_cfg->irptndx = -1;
+               goto out_free_context;
+       }
+
+       root_cfg->smmu = smmu;
+       arm_smmu_init_context_bank(smmu_domain);
+       return ret;
+
+out_free_context:
+       __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
+out_free_vmid:
+       __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
+       return ret;
+}
+
+static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+{
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+       int irq;
+
+       if (!smmu)
+               return;
+
+       if (root_cfg->irptndx != -1) {
+               irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
+               free_irq(irq, domain);
+       }
+
+       __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
+       __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
+}
+
+static int arm_smmu_domain_init(struct iommu_domain *domain)
+{
+       struct arm_smmu_domain *smmu_domain;
+       pgd_t *pgd;
+
+       /*
+        * Allocate the domain and initialise some of its data structures.
+        * We can't really do anything meaningful until we've added a
+        * master.
+        */
+       smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
+       if (!smmu_domain)
+               return -ENOMEM;
+
+       pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+       if (!pgd)
+               goto out_free_domain;
+       smmu_domain->root_cfg.pgd = pgd;
+
+       spin_lock_init(&smmu_domain->lock);
+       domain->priv = smmu_domain;
+       return 0;
+
+out_free_domain:
+       kfree(smmu_domain);
+       return -ENOMEM;
+}
+
+static void arm_smmu_free_ptes(pmd_t *pmd)
+{
+       pgtable_t table = pmd_pgtable(*pmd);
+       pgtable_page_dtor(table);
+       __free_page(table);
+}
+
+static void arm_smmu_free_pmds(pud_t *pud)
+{
+       int i;
+       pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
+
+       pmd = pmd_base;
+       for (i = 0; i < PTRS_PER_PMD; ++i) {
+               if (pmd_none(*pmd))
+                       continue;
+
+               arm_smmu_free_ptes(pmd);
+               pmd++;
+       }
+
+       pmd_free(NULL, pmd_base);
+}
+
+static void arm_smmu_free_puds(pgd_t *pgd)
+{
+       int i;
+       pud_t *pud, *pud_base = pud_offset(pgd, 0);
+
+       pud = pud_base;
+       for (i = 0; i < PTRS_PER_PUD; ++i) {
+               if (pud_none(*pud))
+                       continue;
+
+               arm_smmu_free_pmds(pud);
+               pud++;
+       }
+
+       pud_free(NULL, pud_base);
+}
+
+static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
+{
+       int i;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       pgd_t *pgd, *pgd_base = root_cfg->pgd;
+
+       /*
+        * Recursively free the page tables for this domain. We don't
+        * care about speculative TLB filling, because the TLB will be
+        * nuked next time this context bank is re-allocated and no devices
+        * currently map to these tables.
+        */
+       pgd = pgd_base;
+       for (i = 0; i < PTRS_PER_PGD; ++i) {
+               if (pgd_none(*pgd))
+                       continue;
+               arm_smmu_free_puds(pgd);
+               pgd++;
+       }
+
+       kfree(pgd_base);
+}
+
+static void arm_smmu_domain_destroy(struct iommu_domain *domain)
+{
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       arm_smmu_destroy_domain_context(domain);
+       arm_smmu_free_pgtables(smmu_domain);
+       kfree(smmu_domain);
+}
+
+static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
+                                         struct arm_smmu_master *master)
+{
+       int i;
+       struct arm_smmu_smr *smrs;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
+               return 0;
+
+       if (master->smrs)
+               return -EEXIST;
+
+       smrs = kmalloc(sizeof(*smrs) * master->num_streamids, GFP_KERNEL);
+       if (!smrs) {
+               dev_err(smmu->dev, "failed to allocate %d SMRs for master %s\n",
+                       master->num_streamids, master->of_node->name);
+               return -ENOMEM;
+       }
+
+       /* Allocate the SMRs on the root SMMU */
+       for (i = 0; i < master->num_streamids; ++i) {
+               int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
+                                                 smmu->num_mapping_groups);
+               if (IS_ERR_VALUE(idx)) {
+                       dev_err(smmu->dev, "failed to allocate free SMR\n");
+                       goto err_free_smrs;
+               }
+
+               smrs[i] = (struct arm_smmu_smr) {
+                       .idx    = idx,
+                       .mask   = 0, /* We don't currently share SMRs */
+                       .id     = master->streamids[i],
+               };
+       }
+
+       /* It worked! Now, poke the actual hardware */
+       for (i = 0; i < master->num_streamids; ++i) {
+               u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
+                         smrs[i].mask << SMR_MASK_SHIFT;
+               writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
+       }
+
+       master->smrs = smrs;
+       return 0;
+
+err_free_smrs:
+       while (--i >= 0)
+               __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
+       kfree(smrs);
+       return -ENOSPC;
+}
+
+static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
+                                     struct arm_smmu_master *master)
+{
+       int i;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+       struct arm_smmu_smr *smrs = master->smrs;
+
+       /* Invalidate the SMRs before freeing back to the allocator */
+       for (i = 0; i < master->num_streamids; ++i) {
+               u8 idx = smrs[i].idx;
+               writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
+               __arm_smmu_free_bitmap(smmu->smr_map, idx);
+       }
+
+       master->smrs = NULL;
+       kfree(smrs);
+}
+
+static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
+                                          struct arm_smmu_master *master)
+{
+       int i;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       for (i = 0; i < master->num_streamids; ++i) {
+               u16 sid = master->streamids[i];
+               writel_relaxed(S2CR_TYPE_BYPASS,
+                              gr0_base + ARM_SMMU_GR0_S2CR(sid));
+       }
+}
+
+static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
+                                     struct arm_smmu_master *master)
+{
+       int i, ret;
+       struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       ret = arm_smmu_master_configure_smrs(smmu, master);
+       if (ret)
+               return ret;
+
+       /* Bypass the leaves */
+       smmu = smmu_domain->leaf_smmu;
+       while ((parent = find_parent_smmu(smmu))) {
+               /*
+                * We won't have a StreamID match for anything but the root
+                * smmu, so we only need to worry about StreamID indexing,
+                * where we must install bypass entries in the S2CRs.
+                */
+               if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)
+                       continue;
+
+               arm_smmu_bypass_stream_mapping(smmu, master);
+               smmu = parent;
+       }
+
+       /* Now we're at the root, time to point at our context bank */
+       for (i = 0; i < master->num_streamids; ++i) {
+               u32 idx, s2cr;
+               idx = master->smrs ? master->smrs[i].idx : master->streamids[i];
+               s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) |
+                      (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
+               writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
+       }
+
+       return 0;
+}
+
+static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
+                                         struct arm_smmu_master *master)
+{
+       struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu;
+
+       /*
+        * We *must* clear the S2CR first, because freeing the SMR means
+        * that it can be re-allocated immediately.
+        */
+       arm_smmu_bypass_stream_mapping(smmu, master);
+       arm_smmu_master_free_smrs(smmu, master);
+}
+
+static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+       int ret = -EINVAL;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_device *device_smmu = dev->archdata.iommu;
+       struct arm_smmu_master *master;
+
+       if (!device_smmu) {
+               dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
+               return -ENXIO;
+       }
+
+       /*
+        * Sanity check the domain. We don't currently support domains
+        * that cross between different SMMU chains.
+        */
+       spin_lock(&smmu_domain->lock);
+       if (!smmu_domain->leaf_smmu) {
+               /* Now that we have a master, we can finalise the domain */
+               ret = arm_smmu_init_domain_context(domain, dev);
+               if (IS_ERR_VALUE(ret))
+                       goto err_unlock;
+
+               smmu_domain->leaf_smmu = device_smmu;
+       } else if (smmu_domain->leaf_smmu != device_smmu) {
+               dev_err(dev,
+                       "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
+                       dev_name(smmu_domain->leaf_smmu->dev),
+                       dev_name(device_smmu->dev));
+               goto err_unlock;
+       }
+       spin_unlock(&smmu_domain->lock);
+
+       /* Looks ok, so add the device to the domain */
+       master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
+       if (!master)
+               return -ENODEV;
+
+       return arm_smmu_domain_add_master(smmu_domain, master);
+
+err_unlock:
+       spin_unlock(&smmu_domain->lock);
+       return ret;
+}
+
+static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
+{
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_master *master;
+
+       master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
+       if (master)
+               arm_smmu_domain_remove_master(smmu_domain, master);
+}
+
+static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
+                                  size_t size)
+{
+       unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+       /*
+        * If the SMMU can't walk tables in the CPU caches, treat them
+        * like non-coherent DMA since we need to flush the new entries
+        * all the way out to memory. There's no possibility of recursion
+        * here as the SMMU table walker will not be wired through another
+        * SMMU.
+        */
+       if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
+               dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+                            DMA_TO_DEVICE);
+}
+
+static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
+                                            unsigned long end)
+{
+       return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
+               (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
+}
+
+static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
+                                  unsigned long addr, unsigned long end,
+                                  unsigned long pfn, int flags, int stage)
+{
+       pte_t *pte, *start;
+       pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
+
+       if (pmd_none(*pmd)) {
+               /* Allocate a new set of tables */
+               pgtable_t table = alloc_page(PGALLOC_GFP);
+               if (!table)
+                       return -ENOMEM;
+
+               arm_smmu_flush_pgtable(smmu, page_address(table),
+                                      ARM_SMMU_PTE_HWTABLE_SIZE);
+               pgtable_page_ctor(table);
+               pmd_populate(NULL, pmd, table);
+               arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
+       }
+
+       if (stage == 1) {
+               pteval |= ARM_SMMU_PTE_AP_UNPRIV;
+               if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
+                       pteval |= ARM_SMMU_PTE_AP_RDONLY;
+
+               if (flags & IOMMU_CACHE)
+                       pteval |= (MAIR_ATTR_IDX_CACHE <<
+                                  ARM_SMMU_PTE_ATTRINDX_SHIFT);
+       } else {
+               pteval |= ARM_SMMU_PTE_HAP_FAULT;
+               if (flags & IOMMU_READ)
+                       pteval |= ARM_SMMU_PTE_HAP_READ;
+               if (flags & IOMMU_WRITE)
+                       pteval |= ARM_SMMU_PTE_HAP_WRITE;
+               if (flags & IOMMU_CACHE)
+                       pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
+               else
+                       pteval |= ARM_SMMU_PTE_MEMATTR_NC;
+       }
+
+       /* If no access, create a faulting entry to avoid TLB fills */
+       if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
+               pteval &= ~ARM_SMMU_PTE_PAGE;
+
+       pteval |= ARM_SMMU_PTE_SH_IS;
+       start = pmd_page_vaddr(*pmd) + pte_index(addr);
+       pte = start;
+
+       /*
+        * Install the page table entries. This is fairly complicated
+        * since we attempt to make use of the contiguous hint in the
+        * ptes where possible. The contiguous hint indicates a series
+        * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
+        * contiguous region with the following constraints:
+        *
+        *   - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
+        *   - Each pte in the region has the contiguous hint bit set
+        *
+        * This complicates unmapping (also handled by this code, when
+        * neither IOMMU_READ or IOMMU_WRITE are set) because it is
+        * possible, yet highly unlikely, that a client may unmap only
+        * part of a contiguous range. This requires clearing of the
+        * contiguous hint bits in the range before installing the new
+        * faulting entries.
+        *
+        * Note that re-mapping an address range without first unmapping
+        * it is not supported, so TLB invalidation is not required here
+        * and is instead performed at unmap and domain-init time.
+        */
+       do {
+               int i = 1;
+               pteval &= ~ARM_SMMU_PTE_CONT;
+
+               if (arm_smmu_pte_is_contiguous_range(addr, end)) {
+                       i = ARM_SMMU_PTE_CONT_ENTRIES;
+                       pteval |= ARM_SMMU_PTE_CONT;
+               } else if (pte_val(*pte) &
+                          (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
+                       int j;
+                       pte_t *cont_start;
+                       unsigned long idx = pte_index(addr);
+
+                       idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
+                       cont_start = pmd_page_vaddr(*pmd) + idx;
+                       for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
+                               pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT;
+
+                       arm_smmu_flush_pgtable(smmu, cont_start,
+                                              sizeof(*pte) *
+                                              ARM_SMMU_PTE_CONT_ENTRIES);
+               }
+
+               do {
+                       *pte = pfn_pte(pfn, __pgprot(pteval));
+               } while (pte++, pfn++, addr += PAGE_SIZE, --i);
+       } while (addr != end);
+
+       arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
+       return 0;
+}
+
+static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
+                                  unsigned long addr, unsigned long end,
+                                  phys_addr_t phys, int flags, int stage)
+{
+       int ret;
+       pmd_t *pmd;
+       unsigned long next, pfn = __phys_to_pfn(phys);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+       if (pud_none(*pud)) {
+               pmd = pmd_alloc_one(NULL, addr);
+               if (!pmd)
+                       return -ENOMEM;
+       } else
+#endif
+               pmd = pmd_offset(pud, addr);
+
+       do {
+               next = pmd_addr_end(addr, end);
+               ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
+                                             flags, stage);
+               pud_populate(NULL, pud, pmd);
+               arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
+               phys += next - addr;
+       } while (pmd++, addr = next, addr < end);
+
+       return ret;
+}
+
+static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
+                                  unsigned long addr, unsigned long end,
+                                  phys_addr_t phys, int flags, int stage)
+{
+       int ret = 0;
+       pud_t *pud;
+       unsigned long next;
+
+#ifndef __PAGETABLE_PUD_FOLDED
+       if (pgd_none(*pgd)) {
+               pud = pud_alloc_one(NULL, addr);
+               if (!pud)
+                       return -ENOMEM;
+       } else
+#endif
+               pud = pud_offset(pgd, addr);
+
+       do {
+               next = pud_addr_end(addr, end);
+               ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
+                                             flags, stage);
+               pgd_populate(NULL, pud, pgd);
+               arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
+               phys += next - addr;
+       } while (pud++, addr = next, addr < end);
+
+       return ret;
+}
+
+static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
+                                  unsigned long iova, phys_addr_t paddr,
+                                  size_t size, int flags)
+{
+       int ret, stage;
+       unsigned long end;
+       phys_addr_t input_mask, output_mask;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       pgd_t *pgd = root_cfg->pgd;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+
+       if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
+               stage = 2;
+               output_mask = (1ULL << smmu->s2_output_size) - 1;
+       } else {
+               stage = 1;
+               output_mask = (1ULL << smmu->s1_output_size) - 1;
+       }
+
+       if (!pgd)
+               return -EINVAL;
+
+       if (size & ~PAGE_MASK)
+               return -EINVAL;
+
+       input_mask = (1ULL << smmu->input_size) - 1;
+       if ((phys_addr_t)iova & ~input_mask)
+               return -ERANGE;
+
+       if (paddr & ~output_mask)
+               return -ERANGE;
+
+       spin_lock(&smmu_domain->lock);
+       pgd += pgd_index(iova);
+       end = iova + size;
+       do {
+               unsigned long next = pgd_addr_end(iova, end);
+
+               ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
+                                             flags, stage);
+               if (ret)
+                       goto out_unlock;
+
+               paddr += next - iova;
+               iova = next;
+       } while (pgd++, iova != end);
+
+out_unlock:
+       spin_unlock(&smmu_domain->lock);
+
+       /* Ensure new page tables are visible to the hardware walker */
+       if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+               dsb();
+
+       return ret;
+}
+
+static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+                       phys_addr_t paddr, size_t size, int flags)
+{
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
+
+       if (!smmu_domain || !smmu)
+               return -ENODEV;
+
+       /* Check for silent address truncation up the SMMU chain. */
+       if ((phys_addr_t)iova & ~smmu_domain->output_mask)
+               return -ERANGE;
+
+       return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags);
+}
+
+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+                            size_t size)
+{
+       int ret;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
+       writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
+       arm_smmu_tlb_sync(smmu);
+       return ret ? ret : size;
+}
+
+static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+                                        dma_addr_t iova)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+
+       spin_lock(&smmu_domain->lock);
+       pgd = root_cfg->pgd;
+       if (!pgd)
+               goto err_unlock;
+
+       pgd += pgd_index(iova);
+       if (pgd_none_or_clear_bad(pgd))
+               goto err_unlock;
+
+       pud = pud_offset(pgd, iova);
+       if (pud_none_or_clear_bad(pud))
+               goto err_unlock;
+
+       pmd = pmd_offset(pud, iova);
+       if (pmd_none_or_clear_bad(pmd))
+               goto err_unlock;
+
+       pte = pmd_page_vaddr(*pmd) + pte_index(iova);
+       if (pte_none(pte))
+               goto err_unlock;
+
+       spin_unlock(&smmu_domain->lock);
+       return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
+
+err_unlock:
+       spin_unlock(&smmu_domain->lock);
+       dev_warn(smmu->dev,
+                "invalid (corrupt?) page tables detected for iova 0x%llx\n",
+                (unsigned long long)iova);
+       return -EINVAL;
+}
+
+static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
+                                  unsigned long cap)
+{
+       unsigned long caps = 0;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+
+       if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+               caps |= IOMMU_CAP_CACHE_COHERENCY;
+
+       return !!(cap & caps);
+}
+
+static int arm_smmu_add_device(struct device *dev)
+{
+       struct arm_smmu_device *child, *parent, *smmu;
+       struct arm_smmu_master *master = NULL;
+
+       spin_lock(&arm_smmu_devices_lock);
+       list_for_each_entry(parent, &arm_smmu_devices, list) {
+               smmu = parent;
+
+               /* Try to find a child of the current SMMU. */
+               list_for_each_entry(child, &arm_smmu_devices, list) {
+                       if (child->parent_of_node == parent->dev->of_node) {
+                               /* Does the child sit above our master? */
+                               master = find_smmu_master(child, dev->of_node);
+                               if (master) {
+                                       smmu = NULL;
+                                       break;
+                               }
+                       }
+               }
+
+               /* We found some children, so keep searching. */
+               if (!smmu) {
+                       master = NULL;
+                       continue;
+               }
+
+               master = find_smmu_master(smmu, dev->of_node);
+               if (master)
+                       break;
+       }
+       spin_unlock(&arm_smmu_devices_lock);
+
+       if (!master)
+               return -ENODEV;
+
+       dev->archdata.iommu = smmu;
+       return 0;
+}
+
+static void arm_smmu_remove_device(struct device *dev)
+{
+       dev->archdata.iommu = NULL;
+}
+
+static struct iommu_ops arm_smmu_ops = {
+       .domain_init    = arm_smmu_domain_init,
+       .domain_destroy = arm_smmu_domain_destroy,
+       .attach_dev     = arm_smmu_attach_dev,
+       .detach_dev     = arm_smmu_detach_dev,
+       .map            = arm_smmu_map,
+       .unmap          = arm_smmu_unmap,
+       .iova_to_phys   = arm_smmu_iova_to_phys,
+       .domain_has_cap = arm_smmu_domain_has_cap,
+       .add_device     = arm_smmu_add_device,
+       .remove_device  = arm_smmu_remove_device,
+       .pgsize_bitmap  = (SECTION_SIZE |
+                          ARM_SMMU_PTE_CONT_SIZE |
+                          PAGE_SIZE),
+};
+
+static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
+{
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+       int i = 0;
+       u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
+
+       /* Mark all SMRn as invalid and all S2CRn as bypass */
+       for (i = 0; i < smmu->num_mapping_groups; ++i) {
+               writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
+               writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
+       }
+
+       /* Invalidate the TLB, just in case */
+       writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
+       writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
+       writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+
+       /* Enable fault reporting */
+       scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
+
+       /* Disable TLB broadcasting. */
+       scr0 |= (sCR0_VMIDPNE | sCR0_PTM);
+
+       /* Enable client access, but bypass when no mapping is found */
+       scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
+
+       /* Disable forced broadcasting */
+       scr0 &= ~sCR0_FB;
+
+       /* Don't upgrade barriers */
+       scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
+
+       /* Push the button */
+       arm_smmu_tlb_sync(smmu);
+       writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0);
+}
+
+static int arm_smmu_id_size_to_bits(int size)
+{
+       switch (size) {
+       case 0:
+               return 32;
+       case 1:
+               return 36;
+       case 2:
+               return 40;
+       case 3:
+               return 42;
+       case 4:
+               return 44;
+       case 5:
+       default:
+               return 48;
+       }
+}
+
+static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+{
+       unsigned long size;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+       u32 id;
+
+       dev_notice(smmu->dev, "probing hardware configuration...\n");
+
+       /* Primecell ID */
+       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2);
+       smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1;
+       dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
+
+       /* ID0 */
+       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
+#ifndef CONFIG_64BIT
+       if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
+               dev_err(smmu->dev, "\tno v7 descriptor support!\n");
+               return -ENODEV;
+       }
+#endif
+       if (id & ID0_S1TS) {
+               smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
+               dev_notice(smmu->dev, "\tstage 1 translation\n");
+       }
+
+       if (id & ID0_S2TS) {
+               smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
+               dev_notice(smmu->dev, "\tstage 2 translation\n");
+       }
+
+       if (id & ID0_NTS) {
+               smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
+               dev_notice(smmu->dev, "\tnested translation\n");
+       }
+
+       if (!(smmu->features &
+               (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 |
+                ARM_SMMU_FEAT_TRANS_NESTED))) {
+               dev_err(smmu->dev, "\tno translation support!\n");
+               return -ENODEV;
+       }
+
+       if (id & ID0_CTTW) {
+               smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+               dev_notice(smmu->dev, "\tcoherent table walk\n");
+       }
+
+       if (id & ID0_SMS) {
+               u32 smr, sid, mask;
+
+               smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
+               smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
+                                          ID0_NUMSMRG_MASK;
+               if (smmu->num_mapping_groups == 0) {
+                       dev_err(smmu->dev,
+                               "stream-matching supported, but no SMRs present!\n");
+                       return -ENODEV;
+               }
+
+               smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
+               smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
+               writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+               smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+
+               mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+               sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
+               if ((mask & sid) != sid) {
+                       dev_err(smmu->dev,
+                               "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
+                               mask, sid);
+                       return -ENODEV;
+               }
+
+               dev_notice(smmu->dev,
+                          "\tstream matching with %u register groups, mask 0x%x",
+                          smmu->num_mapping_groups, mask);
+       }
+
+       /* ID1 */
+       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
+       smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
+
+       /* Check that we ioremapped enough */
+       size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
+       size *= (smmu->pagesize << 1);
+       if (smmu->size < size)
+               dev_warn(smmu->dev,
+                        "device is 0x%lx bytes but only mapped 0x%lx!\n",
+                        size, smmu->size);
+
+       smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
+                                     ID1_NUMS2CB_MASK;
+       smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
+       if (smmu->num_s2_context_banks > smmu->num_context_banks) {
+               dev_err(smmu->dev, "impossible number of S2 context banks!\n");
+               return -ENODEV;
+       }
+       dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
+                  smmu->num_context_banks, smmu->num_s2_context_banks);
+
+       /* ID2 */
+       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
+       size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
+
+       /*
+        * Stage-1 output limited by stage-2 input size due to pgd
+        * allocation (PTRS_PER_PGD).
+        */
+#ifdef CONFIG_64BIT
+       /* Current maximum output size of 39 bits */
+       smmu->s1_output_size = min(39UL, size);
+#else
+       smmu->s1_output_size = min(32UL, size);
+#endif
+
+       /* The stage-2 output mask is also applied for bypass */
+       size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
+       smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size);
+
+       if (smmu->version == 1) {
+               smmu->input_size = 32;
+       } else {
+#ifdef CONFIG_64BIT
+               size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
+               size = min(39, arm_smmu_id_size_to_bits(size));
+#else
+               size = 32;
+#endif
+               smmu->input_size = size;
+
+               if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
+                   (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
+                   (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) {
+                       dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n",
+                               PAGE_SIZE);
+                       return -ENODEV;
+               }
+       }
+
+       dev_notice(smmu->dev,
+                  "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
+                  smmu->input_size, smmu->s1_output_size, smmu->s2_output_size);
+       return 0;
+}
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct arm_smmu_device *smmu;
+       struct device_node *dev_node;
+       struct device *dev = &pdev->dev;
+       struct rb_node *node;
+       struct of_phandle_args masterspec;
+       int num_irqs, i, err;
+
+       smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
+       if (!smmu) {
+               dev_err(dev, "failed to allocate arm_smmu_device\n");
+               return -ENOMEM;
+       }
+       smmu->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "missing base address/size\n");
+               return -ENODEV;
+       }
+
+       smmu->size = resource_size(res);
+       smmu->base = devm_request_and_ioremap(dev, res);
+       if (!smmu->base)
+               return -EADDRNOTAVAIL;
+
+       if (of_property_read_u32(dev->of_node, "#global-interrupts",
+                                &smmu->num_global_irqs)) {
+               dev_err(dev, "missing #global-interrupts property\n");
+               return -ENODEV;
+       }
+
+       num_irqs = 0;
+       while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
+               num_irqs++;
+               if (num_irqs > smmu->num_global_irqs)
+                       smmu->num_context_irqs++;
+       }
+
+       if (num_irqs < smmu->num_global_irqs) {
+               dev_warn(dev, "found %d interrupts but expected at least %d\n",
+                        num_irqs, smmu->num_global_irqs);
+               smmu->num_global_irqs = num_irqs;
+       }
+       smmu->num_context_irqs = num_irqs - smmu->num_global_irqs;
+
+       smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
+                                 GFP_KERNEL);
+       if (!smmu->irqs) {
+               dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < num_irqs; ++i) {
+               int irq = platform_get_irq(pdev, i);
+               if (irq < 0) {
+                       dev_err(dev, "failed to get irq index %d\n", i);
+                       return -ENODEV;
+               }
+               smmu->irqs[i] = irq;
+       }
+
+       i = 0;
+       smmu->masters = RB_ROOT;
+       while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
+                                          "#stream-id-cells", i,
+                                          &masterspec)) {
+               err = register_smmu_master(smmu, dev, &masterspec);
+               if (err) {
+                       dev_err(dev, "failed to add master %s\n",
+                               masterspec.np->name);
+                       goto out_put_masters;
+               }
+
+               i++;
+       }
+       dev_notice(dev, "registered %d master devices\n", i);
+
+       if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0)))
+               smmu->parent_of_node = dev_node;
+
+       err = arm_smmu_device_cfg_probe(smmu);
+       if (err)
+               goto out_put_parent;
+
+       if (smmu->version > 1 &&
+           smmu->num_context_banks != smmu->num_context_irqs) {
+               dev_err(dev,
+                       "found only %d context interrupt(s) but %d required\n",
+                       smmu->num_context_irqs, smmu->num_context_banks);
+               goto out_put_parent;
+       }
+
+       arm_smmu_device_reset(smmu);
+
+       for (i = 0; i < smmu->num_global_irqs; ++i) {
+               err = request_irq(smmu->irqs[i],
+                                 arm_smmu_global_fault,
+                                 IRQF_SHARED,
+                                 "arm-smmu global fault",
+                                 smmu);
+               if (err) {
+                       dev_err(dev, "failed to request global IRQ %d (%u)\n",
+                               i, smmu->irqs[i]);
+                       goto out_free_irqs;
+               }
+       }
+
+       INIT_LIST_HEAD(&smmu->list);
+       spin_lock(&arm_smmu_devices_lock);
+       list_add(&smmu->list, &arm_smmu_devices);
+       spin_unlock(&arm_smmu_devices_lock);
+       return 0;
+
+out_free_irqs:
+       while (i--)
+               free_irq(smmu->irqs[i], smmu);
+
+out_put_parent:
+       if (smmu->parent_of_node)
+               of_node_put(smmu->parent_of_node);
+
+out_put_masters:
+       for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
+               struct arm_smmu_master *master;
+               master = container_of(node, struct arm_smmu_master, node);
+               of_node_put(master->of_node);
+       }
+
+       return err;
+}
+
+static int arm_smmu_device_remove(struct platform_device *pdev)
+{
+       int i;
+       struct device *dev = &pdev->dev;
+       struct arm_smmu_device *curr, *smmu = NULL;
+       struct rb_node *node;
+
+       spin_lock(&arm_smmu_devices_lock);
+       list_for_each_entry(curr, &arm_smmu_devices, list) {
+               if (curr->dev == dev) {
+                       smmu = curr;
+                       list_del(&smmu->list);
+                       break;
+               }
+       }
+       spin_unlock(&arm_smmu_devices_lock);
+
+       if (!smmu)
+               return -ENODEV;
+
+       if (smmu->parent_of_node)
+               of_node_put(smmu->parent_of_node);
+
+       for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
+               struct arm_smmu_master *master;
+               master = container_of(node, struct arm_smmu_master, node);
+               of_node_put(master->of_node);
+       }
+
+       if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS))
+               dev_err(dev, "removing device with active domains!\n");
+
+       for (i = 0; i < smmu->num_global_irqs; ++i)
+               free_irq(smmu->irqs[i], smmu);
+
+       /* Turn the thing off */
+       writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id arm_smmu_of_match[] = {
+       { .compatible = "arm,smmu-v1", },
+       { .compatible = "arm,smmu-v2", },
+       { .compatible = "arm,mmu-400", },
+       { .compatible = "arm,mmu-500", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+#endif
+
+static struct platform_driver arm_smmu_driver = {
+       .driver = {
+               .owner          = THIS_MODULE,
+               .name           = "arm-smmu",
+               .of_match_table = of_match_ptr(arm_smmu_of_match),
+       },
+       .probe  = arm_smmu_device_dt_probe,
+       .remove = arm_smmu_device_remove,
+};
+
+static int __init arm_smmu_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&arm_smmu_driver);
+       if (ret)
+               return ret;
+
+       /* Oh, for a proper bus abstraction */
+       if (!iommu_present(&platform_bus_type));
+               bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+
+       if (!iommu_present(&amba_bustype));
+               bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+
+       return 0;
+}
+
+static void __exit arm_smmu_exit(void)
+{
+       return platform_driver_unregister(&arm_smmu_driver);
+}
+
+module_init(arm_smmu_init);
+module_exit(arm_smmu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPL v2");
index a7967ce..785675a 100644 (file)
@@ -309,6 +309,7 @@ parse_dmar_table(void)
        struct acpi_table_dmar *dmar;
        struct acpi_dmar_header *entry_header;
        int ret = 0;
+       int drhd_count = 0;
 
        /*
         * Do it again, earlier dmar_tbl mapping could be mapped with
@@ -347,6 +348,7 @@ parse_dmar_table(void)
 
                switch (entry_header->type) {
                case ACPI_DMAR_TYPE_HARDWARE_UNIT:
+                       drhd_count++;
                        ret = dmar_parse_one_drhd(entry_header);
                        break;
                case ACPI_DMAR_TYPE_RESERVED_MEMORY:
@@ -371,6 +373,8 @@ parse_dmar_table(void)
 
                entry_header = ((void *)entry_header + entry_header->length);
        }
+       if (drhd_count == 0)
+               pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
        return ret;
 }
 
index b4f0e28..eec0d3e 100644 (file)
@@ -4182,14 +4182,27 @@ static int intel_iommu_add_device(struct device *dev)
 
        /*
         * If it's a multifunction device that does not support our
-        * required ACS flags, add to the same group as function 0.
+        * required ACS flags, add to the same group as lowest numbered
+        * function that also does not suport the required ACS flags.
         */
        if (dma_pdev->multifunction &&
-           !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
-               swap_pci_ref(&dma_pdev,
-                            pci_get_slot(dma_pdev->bus,
-                                         PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
-                                         0)));
+           !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
+               u8 i, slot = PCI_SLOT(dma_pdev->devfn);
+
+               for (i = 0; i < 8; i++) {
+                       struct pci_dev *tmp;
+
+                       tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
+                       if (!tmp)
+                               continue;
+
+                       if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
+                               swap_pci_ref(&dma_pdev, tmp);
+                               break;
+                       }
+                       pci_dev_put(tmp);
+               }
+       }
 
        /*
         * Devices on the root bus go through the iommu.  If that's not us,
index 5b19b2d..f71673d 100644 (file)
@@ -664,8 +664,7 @@ error:
         */
 
        if (x2apic_present)
-               WARN(1, KERN_WARNING
-                       "Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n");
+               pr_warn("Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n");
 
        return -1;
 }
index d8f98b1..fbe9ca7 100644 (file)
@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
 }
 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
 
+static size_t iommu_pgsize(struct iommu_domain *domain,
+                          unsigned long addr_merge, size_t size)
+{
+       unsigned int pgsize_idx;
+       size_t pgsize;
+
+       /* Max page size that still fits into 'size' */
+       pgsize_idx = __fls(size);
+
+       /* need to consider alignment requirements ? */
+       if (likely(addr_merge)) {
+               /* Max page size allowed by address */
+               unsigned int align_pgsize_idx = __ffs(addr_merge);
+               pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+       }
+
+       /* build a mask of acceptable page sizes */
+       pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+       /* throw away page sizes not supported by the hardware */
+       pgsize &= domain->ops->pgsize_bitmap;
+
+       /* make sure we're still sane */
+       BUG_ON(!pgsize);
+
+       /* pick the biggest page */
+       pgsize_idx = __fls(pgsize);
+       pgsize = 1UL << pgsize_idx;
+
+       return pgsize;
+}
+
 int iommu_map(struct iommu_domain *domain, unsigned long iova,
              phys_addr_t paddr, size_t size, int prot)
 {
@@ -775,45 +807,18 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
         * size of the smallest page supported by the hardware
         */
        if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
-               pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
-                       "0x%x\n", iova, (unsigned long)paddr,
-                       (unsigned long)size, min_pagesz);
+               pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
+                      iova, &paddr, size, min_pagesz);
                return -EINVAL;
        }
 
-       pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
-                               (unsigned long)paddr, (unsigned long)size);
+       pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
 
        while (size) {
-               unsigned long pgsize, addr_merge = iova | paddr;
-               unsigned int pgsize_idx;
-
-               /* Max page size that still fits into 'size' */
-               pgsize_idx = __fls(size);
-
-               /* need to consider alignment requirements ? */
-               if (likely(addr_merge)) {
-                       /* Max page size allowed by both iova and paddr */
-                       unsigned int align_pgsize_idx = __ffs(addr_merge);
-
-                       pgsize_idx = min(pgsize_idx, align_pgsize_idx);
-               }
-
-               /* build a mask of acceptable page sizes */
-               pgsize = (1UL << (pgsize_idx + 1)) - 1;
-
-               /* throw away page sizes not supported by the hardware */
-               pgsize &= domain->ops->pgsize_bitmap;
-
-               /* make sure we're still sane */
-               BUG_ON(!pgsize);
-
-               /* pick the biggest page */
-               pgsize_idx = __fls(pgsize);
-               pgsize = 1UL << pgsize_idx;
+               size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
 
-               pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
-                                       (unsigned long)paddr, pgsize);
+               pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
+                        iova, &paddr, pgsize);
 
                ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
                if (ret)
@@ -850,27 +855,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
         * by the hardware
         */
        if (!IS_ALIGNED(iova | size, min_pagesz)) {
-               pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
-                                       iova, (unsigned long)size, min_pagesz);
+               pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+                      iova, size, min_pagesz);
                return -EINVAL;
        }
 
-       pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
-                                                       (unsigned long)size);
+       pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
 
        /*
         * Keep iterating until we either unmap 'size' bytes (or more)
         * or we hit an area that isn't mapped.
         */
        while (unmapped < size) {
-               size_t left = size - unmapped;
+               size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
 
-               unmapped_page = domain->ops->unmap(domain, iova, left);
+               unmapped_page = domain->ops->unmap(domain, iova, pgsize);
                if (!unmapped_page)
                        break;
 
-               pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
-                                       (unsigned long)unmapped_page);
+               pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
+                        iova, unmapped_page);
 
                iova += unmapped_page;
                unmapped += unmapped_page;
index e02e5d7..0ba3766 100644 (file)
@@ -833,16 +833,15 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
        iopgd = iopgd_offset(obj, da);
 
        if (!iopgd_is_table(*iopgd)) {
-               dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
-                       "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
+               dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
+                               obj->name, errs, da, iopgd, *iopgd);
                return IRQ_NONE;
        }
 
        iopte = iopte_offset(iopgd, da);
 
-       dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
-               "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
-               iopte, *iopte);
+       dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
+                       obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
 
        return IRQ_NONE;
 }
@@ -1235,14 +1234,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
                else if (iopte_is_large(*pte))
                        ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
                else
-                       dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
+                       dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
+                                                       (unsigned long long)da);
        } else {
                if (iopgd_is_section(*pgd))
                        ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
                else if (iopgd_is_super(*pgd))
                        ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
                else
-                       dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
+                       dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
+                                                       (unsigned long long)da);
        }
 
        return ret;
index cd4ae9e..f4003d5 100644 (file)
@@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 #define iopte_offset(iopgd, da)        (iopgd_page_vaddr(iopgd) + iopte_index(da))
 
 #define to_iommu(dev)                                                  \
-       (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
+       ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
index 46d8756..d147259 100644 (file)
@@ -102,8 +102,8 @@ static size_t sgtable_len(const struct sg_table *sgt)
                }
 
                if (i && sg->offset) {
-                       pr_err("%s: sg[%d] offset not allowed in internal "
-                                       "entries\n", __func__, i);
+                       pr_err("%s: sg[%d] offset not allowed in internal entries\n",
+                               __func__, i);
                        return 0;
                }
 
index 42d670a..3d2a90a 100644 (file)
@@ -902,7 +902,6 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
        struct scatterlist sg;
        struct virtio_net_ctrl_mq s;
        struct net_device *dev = vi->dev;
-       int i;
 
        if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
                return 0;
@@ -916,10 +915,8 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
                         queue_pairs);
                return -EINVAL;
        } else {
-               for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
-                       if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
-                               schedule_delayed_work(&vi->refill, 0);
                vi->curr_queue_pairs = queue_pairs;
+               schedule_delayed_work(&vi->refill, 0);
        }
 
        return 0;
index 259ad28..c488da5 100644 (file)
@@ -76,6 +76,7 @@ struct vfio_group {
        struct notifier_block           nb;
        struct list_head                vfio_next;
        struct list_head                container_next;
+       atomic_t                        opened;
 };
 
 struct vfio_device {
@@ -206,6 +207,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
        INIT_LIST_HEAD(&group->device_list);
        mutex_init(&group->device_lock);
        atomic_set(&group->container_users, 0);
+       atomic_set(&group->opened, 0);
        group->iommu_group = iommu_group;
 
        group->nb.notifier_call = vfio_iommu_group_notifier;
@@ -1236,12 +1238,22 @@ static long vfio_group_fops_compat_ioctl(struct file *filep,
 static int vfio_group_fops_open(struct inode *inode, struct file *filep)
 {
        struct vfio_group *group;
+       int opened;
 
        group = vfio_group_get_from_minor(iminor(inode));
        if (!group)
                return -ENODEV;
 
+       /* Do we need multiple instances of the group open?  Seems not. */
+       opened = atomic_cmpxchg(&group->opened, 0, 1);
+       if (opened) {
+               vfio_group_put(group);
+               return -EBUSY;
+       }
+
+       /* Is something still in use from a previous open? */
        if (group->container) {
+               atomic_dec(&group->opened);
                vfio_group_put(group);
                return -EBUSY;
        }
@@ -1259,6 +1271,8 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
 
        vfio_group_try_dissolve_container(group);
 
+       atomic_dec(&group->opened);
+
        vfio_group_put(group);
 
        return 0;
index 6f3fbc4..a9807de 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/pci.h>         /* pci_bus_type */
+#include <linux/rbtree.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
@@ -47,19 +48,25 @@ module_param_named(allow_unsafe_interrupts,
 MODULE_PARM_DESC(allow_unsafe_interrupts,
                 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
 
+static bool disable_hugepages;
+module_param_named(disable_hugepages,
+                  disable_hugepages, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_hugepages,
+                "Disable VFIO IOMMU support for IOMMU hugepages.");
+
 struct vfio_iommu {
        struct iommu_domain     *domain;
        struct mutex            lock;
-       struct list_head        dma_list;
+       struct rb_root          dma_list;
        struct list_head        group_list;
        bool                    cache;
 };
 
 struct vfio_dma {
-       struct list_head        next;
+       struct rb_node          node;
        dma_addr_t              iova;           /* Device address */
        unsigned long           vaddr;          /* Process virtual addr */
-       long                    npage;          /* Number of pages */
+       size_t                  size;           /* Map size (bytes) */
        int                     prot;           /* IOMMU_READ/WRITE */
 };
 
@@ -73,7 +80,48 @@ struct vfio_group {
  * into DMA'ble space using the IOMMU
  */
 
-#define NPAGE_TO_SIZE(npage)   ((size_t)(npage) << PAGE_SHIFT)
+static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
+                                     dma_addr_t start, size_t size)
+{
+       struct rb_node *node = iommu->dma_list.rb_node;
+
+       while (node) {
+               struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
+
+               if (start + size <= dma->iova)
+                       node = node->rb_left;
+               else if (start >= dma->iova + dma->size)
+                       node = node->rb_right;
+               else
+                       return dma;
+       }
+
+       return NULL;
+}
+
+static void vfio_insert_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
+{
+       struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
+       struct vfio_dma *dma;
+
+       while (*link) {
+               parent = *link;
+               dma = rb_entry(parent, struct vfio_dma, node);
+
+               if (new->iova + new->size <= dma->iova)
+                       link = &(*link)->rb_left;
+               else
+                       link = &(*link)->rb_right;
+       }
+
+       rb_link_node(&new->node, parent, link);
+       rb_insert_color(&new->node, &iommu->dma_list);
+}
+
+static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
+{
+       rb_erase(&old->node, &iommu->dma_list);
+}
 
 struct vwork {
        struct mm_struct        *mm;
@@ -100,8 +148,8 @@ static void vfio_lock_acct(long npage)
        struct vwork *vwork;
        struct mm_struct *mm;
 
-       if (!current->mm)
-               return; /* process exited */
+       if (!current->mm || !npage)
+               return; /* process exited or nothing to do */
 
        if (down_write_trylock(&current->mm->mmap_sem)) {
                current->mm->locked_vm += npage;
@@ -173,33 +221,6 @@ static int put_pfn(unsigned long pfn, int prot)
        return 0;
 }
 
-/* Unmap DMA region */
-static long __vfio_dma_do_unmap(struct vfio_iommu *iommu, dma_addr_t iova,
-                            long npage, int prot)
-{
-       long i, unlocked = 0;
-
-       for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
-               unsigned long pfn;
-
-               pfn = iommu_iova_to_phys(iommu->domain, iova) >> PAGE_SHIFT;
-               if (pfn) {
-                       iommu_unmap(iommu->domain, iova, PAGE_SIZE);
-                       unlocked += put_pfn(pfn, prot);
-               }
-       }
-       return unlocked;
-}
-
-static void vfio_dma_unmap(struct vfio_iommu *iommu, dma_addr_t iova,
-                          long npage, int prot)
-{
-       long unlocked;
-
-       unlocked = __vfio_dma_do_unmap(iommu, iova, npage, prot);
-       vfio_lock_acct(-unlocked);
-}
-
 static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
 {
        struct page *page[1];
@@ -226,198 +247,306 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
        return ret;
 }
 
-/* Map DMA region */
-static int __vfio_dma_map(struct vfio_iommu *iommu, dma_addr_t iova,
-                         unsigned long vaddr, long npage, int prot)
+/*
+ * Attempt to pin pages.  We really don't want to track all the pfns and
+ * the iommu can only map chunks of consecutive pfns anyway, so get the
+ * first page and all consecutive pages with the same locking.
+ */
+static long vfio_pin_pages(unsigned long vaddr, long npage,
+                          int prot, unsigned long *pfn_base)
 {
-       dma_addr_t start = iova;
-       long i, locked = 0;
-       int ret;
+       unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+       bool lock_cap = capable(CAP_IPC_LOCK);
+       long ret, i;
 
-       /* Verify that pages are not already mapped */
-       for (i = 0; i < npage; i++, iova += PAGE_SIZE)
-               if (iommu_iova_to_phys(iommu->domain, iova))
-                       return -EBUSY;
+       if (!current->mm)
+               return -ENODEV;
 
-       iova = start;
+       ret = vaddr_get_pfn(vaddr, prot, pfn_base);
+       if (ret)
+               return ret;
 
-       if (iommu->cache)
-               prot |= IOMMU_CACHE;
+       if (is_invalid_reserved_pfn(*pfn_base))
+               return 1;
 
-       /*
-        * XXX We break mappings into pages and use get_user_pages_fast to
-        * pin the pages in memory.  It's been suggested that mlock might
-        * provide a more efficient mechanism, but nothing prevents the
-        * user from munlocking the pages, which could then allow the user
-        * access to random host memory.  We also have no guarantee from the
-        * IOMMU API that the iommu driver can unmap sub-pages of previous
-        * mappings.  This means we might lose an entire range if a single
-        * page within it is unmapped.  Single page mappings are inefficient,
-        * but provide the most flexibility for now.
-        */
-       for (i = 0; i < npage; i++, iova += PAGE_SIZE, vaddr += PAGE_SIZE) {
+       if (!lock_cap && current->mm->locked_vm + 1 > limit) {
+               put_pfn(*pfn_base, prot);
+               pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
+                       limit << PAGE_SHIFT);
+               return -ENOMEM;
+       }
+
+       if (unlikely(disable_hugepages)) {
+               vfio_lock_acct(1);
+               return 1;
+       }
+
+       /* Lock all the consecutive pages from pfn_base */
+       for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
                unsigned long pfn = 0;
 
                ret = vaddr_get_pfn(vaddr, prot, &pfn);
-               if (ret) {
-                       __vfio_dma_do_unmap(iommu, start, i, prot);
-                       return ret;
-               }
+               if (ret)
+                       break;
 
-               /*
-                * Only add actual locked pages to accounting
-                * XXX We're effectively marking a page locked for every
-                * IOVA page even though it's possible the user could be
-                * backing multiple IOVAs with the same vaddr.  This over-
-                * penalizes the user process, but we currently have no
-                * easy way to do this properly.
-                */
-               if (!is_invalid_reserved_pfn(pfn))
-                       locked++;
+               if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) {
+                       put_pfn(pfn, prot);
+                       break;
+               }
 
-               ret = iommu_map(iommu->domain, iova,
-                               (phys_addr_t)pfn << PAGE_SHIFT,
-                               PAGE_SIZE, prot);
-               if (ret) {
-                       /* Back out mappings on error */
+               if (!lock_cap && current->mm->locked_vm + i + 1 > limit) {
                        put_pfn(pfn, prot);
-                       __vfio_dma_do_unmap(iommu, start, i, prot);
-                       return ret;
+                       pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
+                               __func__, limit << PAGE_SHIFT);
+                       break;
                }
        }
-       vfio_lock_acct(locked);
-       return 0;
+
+       vfio_lock_acct(i);
+
+       return i;
 }
 
-static inline bool ranges_overlap(dma_addr_t start1, size_t size1,
-                                 dma_addr_t start2, size_t size2)
+static long vfio_unpin_pages(unsigned long pfn, long npage,
+                            int prot, bool do_accounting)
 {
-       if (start1 < start2)
-               return (start2 - start1 < size1);
-       else if (start2 < start1)
-               return (start1 - start2 < size2);
-       return (size1 > 0 && size2 > 0);
+       unsigned long unlocked = 0;
+       long i;
+
+       for (i = 0; i < npage; i++)
+               unlocked += put_pfn(pfn++, prot);
+
+       if (do_accounting)
+               vfio_lock_acct(-unlocked);
+
+       return unlocked;
 }
 
-static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
-                                               dma_addr_t start, size_t size)
+static int vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
+                           dma_addr_t iova, size_t *size)
 {
-       struct vfio_dma *dma;
+       dma_addr_t start = iova, end = iova + *size;
+       long unlocked = 0;
 
-       list_for_each_entry(dma, &iommu->dma_list, next) {
-               if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage),
-                                  start, size))
-                       return dma;
+       while (iova < end) {
+               size_t unmapped;
+               phys_addr_t phys;
+
+               /*
+                * We use the IOMMU to track the physical address.  This
+                * saves us from having a lot more entries in our mapping
+                * tree.  The downside is that we don't track the size
+                * used to do the mapping.  We request unmap of a single
+                * page, but expect IOMMUs that support large pages to
+                * unmap a larger chunk.
+                */
+               phys = iommu_iova_to_phys(iommu->domain, iova);
+               if (WARN_ON(!phys)) {
+                       iova += PAGE_SIZE;
+                       continue;
+               }
+
+               unmapped = iommu_unmap(iommu->domain, iova, PAGE_SIZE);
+               if (!unmapped)
+                       break;
+
+               unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT,
+                                            unmapped >> PAGE_SHIFT,
+                                            dma->prot, false);
+               iova += unmapped;
        }
-       return NULL;
+
+       vfio_lock_acct(-unlocked);
+
+       *size = iova - start;
+
+       return 0;
 }
 
-static long vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start,
-                                   size_t size, struct vfio_dma *dma)
+static int vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start,
+                                  size_t *size, struct vfio_dma *dma)
 {
+       size_t offset, overlap, tmp;
        struct vfio_dma *split;
-       long npage_lo, npage_hi;
-
-       /* Existing dma region is completely covered, unmap all */
-       if (start <= dma->iova &&
-           start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) {
-               vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot);
-               list_del(&dma->next);
-               npage_lo = dma->npage;
+       int ret;
+
+       if (!*size)
+               return 0;
+
+       /*
+        * Existing dma region is completely covered, unmap all.  This is
+        * the likely case since userspace tends to map and unmap buffers
+        * in one shot rather than multiple mappings within a buffer.
+        */
+       if (likely(start <= dma->iova &&
+                  start + *size >= dma->iova + dma->size)) {
+               *size = dma->size;
+               ret = vfio_unmap_unpin(iommu, dma, dma->iova, size);
+               if (ret)
+                       return ret;
+
+               /*
+                * Did we remove more than we have?  Should never happen
+                * since a vfio_dma is contiguous in iova and vaddr.
+                */
+               WARN_ON(*size != dma->size);
+
+               vfio_remove_dma(iommu, dma);
                kfree(dma);
-               return npage_lo;
+               return 0;
        }
 
        /* Overlap low address of existing range */
        if (start <= dma->iova) {
-               size_t overlap;
+               overlap = start + *size - dma->iova;
+               ret = vfio_unmap_unpin(iommu, dma, dma->iova, &overlap);
+               if (ret)
+                       return ret;
 
-               overlap = start + size - dma->iova;
-               npage_lo = overlap >> PAGE_SHIFT;
+               vfio_remove_dma(iommu, dma);
 
-               vfio_dma_unmap(iommu, dma->iova, npage_lo, dma->prot);
-               dma->iova += overlap;
-               dma->vaddr += overlap;
-               dma->npage -= npage_lo;
-               return npage_lo;
+               /*
+                * Check, we may have removed to whole vfio_dma.  If not
+                * fixup and re-insert.
+                */
+               if (overlap < dma->size) {
+                       dma->iova += overlap;
+                       dma->vaddr += overlap;
+                       dma->size -= overlap;
+                       vfio_insert_dma(iommu, dma);
+               } else
+                       kfree(dma);
+
+               *size = overlap;
+               return 0;
        }
 
        /* Overlap high address of existing range */
-       if (start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) {
-               size_t overlap;
+       if (start + *size >= dma->iova + dma->size) {
+               offset = start - dma->iova;
+               overlap = dma->size - offset;
 
-               overlap = dma->iova + NPAGE_TO_SIZE(dma->npage) - start;
-               npage_hi = overlap >> PAGE_SHIFT;
+               ret = vfio_unmap_unpin(iommu, dma, start, &overlap);
+               if (ret)
+                       return ret;
 
-               vfio_dma_unmap(iommu, start, npage_hi, dma->prot);
-               dma->npage -= npage_hi;
-               return npage_hi;
+               dma->size -= overlap;
+               *size = overlap;
+               return 0;
        }
 
        /* Split existing */
-       npage_lo = (start - dma->iova) >> PAGE_SHIFT;
-       npage_hi = dma->npage - (size >> PAGE_SHIFT) - npage_lo;
 
-       split = kzalloc(sizeof *split, GFP_KERNEL);
+       /*
+        * Allocate our tracking structure early even though it may not
+        * be used.  An Allocation failure later loses track of pages and
+        * is more difficult to unwind.
+        */
+       split = kzalloc(sizeof(*split), GFP_KERNEL);
        if (!split)
                return -ENOMEM;
 
-       vfio_dma_unmap(iommu, start, size >> PAGE_SHIFT, dma->prot);
+       offset = start - dma->iova;
+
+       ret = vfio_unmap_unpin(iommu, dma, start, size);
+       if (ret || !*size) {
+               kfree(split);
+               return ret;
+       }
+
+       tmp = dma->size;
 
-       dma->npage = npage_lo;
+       /* Resize the lower vfio_dma in place, before the below insert */
+       dma->size = offset;
 
-       split->npage = npage_hi;
-       split->iova = start + size;
-       split->vaddr = dma->vaddr + NPAGE_TO_SIZE(npage_lo) + size;
-       split->prot = dma->prot;
-       list_add(&split->next, &iommu->dma_list);
-       return size >> PAGE_SHIFT;
+       /* Insert new for remainder, assuming it didn't all get unmapped */
+       if (likely(offset + *size < tmp)) {
+               split->size = tmp - offset - *size;
+               split->iova = dma->iova + offset + *size;
+               split->vaddr = dma->vaddr + offset + *size;
+               split->prot = dma->prot;
+               vfio_insert_dma(iommu, split);
+       } else
+               kfree(split);
+
+       return 0;
 }
 
 static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
                             struct vfio_iommu_type1_dma_unmap *unmap)
 {
-       long ret = 0, npage = unmap->size >> PAGE_SHIFT;
-       struct vfio_dma *dma, *tmp;
        uint64_t mask;
+       struct vfio_dma *dma;
+       size_t unmapped = 0, size;
+       int ret = 0;
 
        mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
 
        if (unmap->iova & mask)
                return -EINVAL;
-       if (unmap->size & mask)
+       if (!unmap->size || unmap->size & mask)
                return -EINVAL;
 
-       /* XXX We still break these down into PAGE_SIZE */
        WARN_ON(mask & PAGE_MASK);
 
        mutex_lock(&iommu->lock);
 
-       list_for_each_entry_safe(dma, tmp, &iommu->dma_list, next) {
-               if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage),
-                                  unmap->iova, unmap->size)) {
-                       ret = vfio_remove_dma_overlap(iommu, unmap->iova,
-                                                     unmap->size, dma);
-                       if (ret > 0)
-                               npage -= ret;
-                       if (ret < 0 || npage == 0)
-                               break;
-               }
+       while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
+               size = unmap->size;
+               ret = vfio_remove_dma_overlap(iommu, unmap->iova, &size, dma);
+               if (ret || !size)
+                       break;
+               unmapped += size;
        }
+
        mutex_unlock(&iommu->lock);
-       return ret > 0 ? 0 : (int)ret;
+
+       /*
+        * We may unmap more than requested, update the unmap struct so
+        * userspace can know.
+        */
+       unmap->size = unmapped;
+
+       return ret;
+}
+
+/*
+ * Turns out AMD IOMMU has a page table bug where it won't map large pages
+ * to a region that previously mapped smaller pages.  This should be fixed
+ * soon, so this is just a temporary workaround to break mappings down into
+ * PAGE_SIZE.  Better to map smaller pages than nothing.
+ */
+static int map_try_harder(struct vfio_iommu *iommu, dma_addr_t iova,
+                         unsigned long pfn, long npage, int prot)
+{
+       long i;
+       int ret;
+
+       for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
+               ret = iommu_map(iommu->domain, iova,
+                               (phys_addr_t)pfn << PAGE_SHIFT,
+                               PAGE_SIZE, prot);
+               if (ret)
+                       break;
+       }
+
+       for (; i < npage && i > 0; i--, iova -= PAGE_SIZE)
+               iommu_unmap(iommu->domain, iova, PAGE_SIZE);
+
+       return ret;
 }
 
 static int vfio_dma_do_map(struct vfio_iommu *iommu,
                           struct vfio_iommu_type1_dma_map *map)
 {
-       struct vfio_dma *dma, *pdma = NULL;
-       dma_addr_t iova = map->iova;
-       unsigned long locked, lock_limit, vaddr = map->vaddr;
+       dma_addr_t end, iova;
+       unsigned long vaddr = map->vaddr;
        size_t size = map->size;
+       long npage;
        int ret = 0, prot = 0;
        uint64_t mask;
-       long npage;
+
+       end = map->iova + map->size;
 
        mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
 
@@ -430,104 +559,144 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
        if (!prot)
                return -EINVAL; /* No READ/WRITE? */
 
+       if (iommu->cache)
+               prot |= IOMMU_CACHE;
+
        if (vaddr & mask)
                return -EINVAL;
-       if (iova & mask)
+       if (map->iova & mask)
                return -EINVAL;
-       if (size & mask)
+       if (!map->size || map->size & mask)
                return -EINVAL;
 
-       /* XXX We still break these down into PAGE_SIZE */
        WARN_ON(mask & PAGE_MASK);
 
        /* Don't allow IOVA wrap */
-       if (iova + size && iova + size < iova)
+       if (end && end < map->iova)
                return -EINVAL;
 
        /* Don't allow virtual address wrap */
-       if (vaddr + size && vaddr + size < vaddr)
-               return -EINVAL;
-
-       npage = size >> PAGE_SHIFT;
-       if (!npage)
+       if (vaddr + map->size && vaddr + map->size < vaddr)
                return -EINVAL;
 
        mutex_lock(&iommu->lock);
 
-       if (vfio_find_dma(iommu, iova, size)) {
-               ret = -EBUSY;
-               goto out_lock;
+       if (vfio_find_dma(iommu, map->iova, map->size)) {
+               mutex_unlock(&iommu->lock);
+               return -EEXIST;
        }
 
-       /* account for locked pages */
-       locked = current->mm->locked_vm + npage;
-       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-       if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
-               pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
-                       __func__, rlimit(RLIMIT_MEMLOCK));
-               ret = -ENOMEM;
-               goto out_lock;
-       }
+       for (iova = map->iova; iova < end; iova += size, vaddr += size) {
+               struct vfio_dma *dma = NULL;
+               unsigned long pfn;
+               long i;
+
+               /* Pin a contiguous chunk of memory */
+               npage = vfio_pin_pages(vaddr, (end - iova) >> PAGE_SHIFT,
+                                      prot, &pfn);
+               if (npage <= 0) {
+                       WARN_ON(!npage);
+                       ret = (int)npage;
+                       break;
+               }
 
-       ret = __vfio_dma_map(iommu, iova, vaddr, npage, prot);
-       if (ret)
-               goto out_lock;
+               /* Verify pages are not already mapped */
+               for (i = 0; i < npage; i++) {
+                       if (iommu_iova_to_phys(iommu->domain,
+                                              iova + (i << PAGE_SHIFT))) {
+                               vfio_unpin_pages(pfn, npage, prot, true);
+                               ret = -EBUSY;
+                               break;
+                       }
+               }
 
-       /* Check if we abut a region below - nothing below 0 */
-       if (iova) {
-               dma = vfio_find_dma(iommu, iova - 1, 1);
-               if (dma && dma->prot == prot &&
-                   dma->vaddr + NPAGE_TO_SIZE(dma->npage) == vaddr) {
+               ret = iommu_map(iommu->domain, iova,
+                               (phys_addr_t)pfn << PAGE_SHIFT,
+                               npage << PAGE_SHIFT, prot);
+               if (ret) {
+                       if (ret != -EBUSY ||
+                           map_try_harder(iommu, iova, pfn, npage, prot)) {
+                               vfio_unpin_pages(pfn, npage, prot, true);
+                               break;
+                       }
+               }
 
-                       dma->npage += npage;
-                       iova = dma->iova;
-                       vaddr = dma->vaddr;
-                       npage = dma->npage;
-                       size = NPAGE_TO_SIZE(npage);
+               size = npage << PAGE_SHIFT;
 
-                       pdma = dma;
+               /*
+                * Check if we abut a region below - nothing below 0.
+                * This is the most likely case when mapping chunks of
+                * physically contiguous regions within a virtual address
+                * range.  Update the abutting entry in place since iova
+                * doesn't change.
+                */
+               if (likely(iova)) {
+                       struct vfio_dma *tmp;
+                       tmp = vfio_find_dma(iommu, iova - 1, 1);
+                       if (tmp && tmp->prot == prot &&
+                           tmp->vaddr + tmp->size == vaddr) {
+                               tmp->size += size;
+                               iova = tmp->iova;
+                               size = tmp->size;
+                               vaddr = tmp->vaddr;
+                               dma = tmp;
+                       }
+               }
+
+               /*
+                * Check if we abut a region above - nothing above ~0 + 1.
+                * If we abut above and below, remove and free.  If only
+                * abut above, remove, modify, reinsert.
+                */
+               if (likely(iova + size)) {
+                       struct vfio_dma *tmp;
+                       tmp = vfio_find_dma(iommu, iova + size, 1);
+                       if (tmp && tmp->prot == prot &&
+                           tmp->vaddr == vaddr + size) {
+                               vfio_remove_dma(iommu, tmp);
+                               if (dma) {
+                                       dma->size += tmp->size;
+                                       kfree(tmp);
+                               } else {
+                                       size += tmp->size;
+                                       tmp->size = size;
+                                       tmp->iova = iova;
+                                       tmp->vaddr = vaddr;
+                                       vfio_insert_dma(iommu, tmp);
+                                       dma = tmp;
+                               }
+                       }
                }
-       }
 
-       /* Check if we abut a region above - nothing above ~0 + 1 */
-       if (iova + size) {
-               dma = vfio_find_dma(iommu, iova + size, 1);
-               if (dma && dma->prot == prot &&
-                   dma->vaddr == vaddr + size) {
+               if (!dma) {
+                       dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+                       if (!dma) {
+                               iommu_unmap(iommu->domain, iova, size);
+                               vfio_unpin_pages(pfn, npage, prot, true);
+                               ret = -ENOMEM;
+                               break;
+                       }
 
-                       dma->npage += npage;
+                       dma->size = size;
                        dma->iova = iova;
                        dma->vaddr = vaddr;
-
-                       /*
-                        * If merged above and below, remove previously
-                        * merged entry.  New entry covers it.
-                        */
-                       if (pdma) {
-                               list_del(&pdma->next);
-                               kfree(pdma);
-                       }
-                       pdma = dma;
+                       dma->prot = prot;
+                       vfio_insert_dma(iommu, dma);
                }
        }
 
-       /* Isolated, new region */
-       if (!pdma) {
-               dma = kzalloc(sizeof *dma, GFP_KERNEL);
-               if (!dma) {
-                       ret = -ENOMEM;
-                       vfio_dma_unmap(iommu, iova, npage, prot);
-                       goto out_lock;
+       if (ret) {
+               struct vfio_dma *tmp;
+               iova = map->iova;
+               size = map->size;
+               while ((tmp = vfio_find_dma(iommu, iova, size))) {
+                       int r = vfio_remove_dma_overlap(iommu, iova,
+                                                       &size, tmp);
+                       if (WARN_ON(r || !size))
+                               break;
                }
-
-               dma->npage = npage;
-               dma->iova = iova;
-               dma->vaddr = vaddr;
-               dma->prot = prot;
-               list_add(&dma->next, &iommu->dma_list);
        }
 
-out_lock:
        mutex_unlock(&iommu->lock);
        return ret;
 }
@@ -606,7 +775,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
                return ERR_PTR(-ENOMEM);
 
        INIT_LIST_HEAD(&iommu->group_list);
-       INIT_LIST_HEAD(&iommu->dma_list);
+       iommu->dma_list = RB_ROOT;
        mutex_init(&iommu->lock);
 
        /*
@@ -640,7 +809,7 @@ static void vfio_iommu_type1_release(void *iommu_data)
 {
        struct vfio_iommu *iommu = iommu_data;
        struct vfio_group *group, *group_tmp;
-       struct vfio_dma *dma, *dma_tmp;
+       struct rb_node *node;
 
        list_for_each_entry_safe(group, group_tmp, &iommu->group_list, next) {
                iommu_detach_group(iommu->domain, group->iommu_group);
@@ -648,10 +817,12 @@ static void vfio_iommu_type1_release(void *iommu_data)
                kfree(group);
        }
 
-       list_for_each_entry_safe(dma, dma_tmp, &iommu->dma_list, next) {
-               vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot);
-               list_del(&dma->next);
-               kfree(dma);
+       while ((node = rb_first(&iommu->dma_list))) {
+               struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
+               size_t size = dma->size;
+               vfio_remove_dma_overlap(iommu, dma->iova, &size, dma);
+               if (WARN_ON(!size))
+                       break;
        }
 
        iommu_domain_free(iommu->domain);
@@ -706,6 +877,7 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
 
        } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
                struct vfio_iommu_type1_dma_unmap unmap;
+               long ret;
 
                minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
 
@@ -715,7 +887,11 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
                if (unmap.argsz < minsz || unmap.flags)
                        return -EINVAL;
 
-               return vfio_dma_do_unmap(iommu, &unmap);
+               ret = vfio_dma_do_unmap(iommu, &unmap);
+               if (ret)
+                       return ret;
+
+               return copy_to_user((void __user *)arg, &unmap, minsz);
        }
 
        return -ENOTTY;
index 8b9226d..017a1e8 100644 (file)
@@ -1,6 +1,7 @@
 config VHOST_NET
        tristate "Host kernel accelerator for virtio net"
        depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
+       select VHOST
        select VHOST_RING
        ---help---
          This kernel module can be loaded in host kernel to accelerate
@@ -13,6 +14,7 @@ config VHOST_NET
 config VHOST_SCSI
        tristate "VHOST_SCSI TCM fabric driver"
        depends on TARGET_CORE && EVENTFD && m
+       select VHOST
        select VHOST_RING
        default n
        ---help---
@@ -24,3 +26,9 @@ config VHOST_RING
        ---help---
          This option is selected by any driver which needs to access
          the host side of a virtio ring.
+
+config VHOST
+       tristate
+       ---help---
+         This option is selected by any driver which needs to access
+         the core of vhost.
index 654e9af..e0441c3 100644 (file)
@@ -1,7 +1,8 @@
 obj-$(CONFIG_VHOST_NET) += vhost_net.o
-vhost_net-y := vhost.o net.o
+vhost_net-y := net.o
 
 obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
 vhost_scsi-y := scsi.o
 
 obj-$(CONFIG_VHOST_RING) += vringh.o
+obj-$(CONFIG_VHOST)    += vhost.o
index 8ca5ac7..027be91 100644 (file)
@@ -168,7 +168,7 @@ static void vhost_net_clear_ubuf_info(struct vhost_net *n)
        }
 }
 
-int vhost_net_set_ubuf_info(struct vhost_net *n)
+static int vhost_net_set_ubuf_info(struct vhost_net *n)
 {
        bool zcopy;
        int i;
@@ -189,7 +189,7 @@ err:
        return -ENOMEM;
 }
 
-void vhost_net_vq_reset(struct vhost_net *n)
+static void vhost_net_vq_reset(struct vhost_net *n)
 {
        int i;
 
index 7014202..4264840 100644 (file)
@@ -49,7 +49,6 @@
 #include <linux/llist.h>
 #include <linux/bitmap.h>
 
-#include "vhost.c"
 #include "vhost.h"
 
 #define TCM_VHOST_VERSION  "v0.1"
@@ -116,7 +115,6 @@ struct tcm_vhost_nacl {
        struct se_node_acl se_node_acl;
 };
 
-struct vhost_scsi;
 struct tcm_vhost_tpg {
        /* Vhost port target portal group tag for TCM */
        u16 tport_tpgt;
@@ -218,7 +216,7 @@ static int iov_num_pages(struct iovec *iov)
               ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
-void tcm_vhost_done_inflight(struct kref *kref)
+static void tcm_vhost_done_inflight(struct kref *kref)
 {
        struct vhost_scsi_inflight *inflight;
 
@@ -329,11 +327,12 @@ static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
        return 1;
 }
 
-static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code,
-       unsigned char *buf)
+static u32
+tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
+                             struct se_node_acl *se_nacl,
+                             struct t10_pr_registration *pr_reg,
+                             int *format_code,
+                             unsigned char *buf)
 {
        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
@@ -359,10 +358,11 @@ static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
                        format_code, buf);
 }
 
-static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code)
+static u32
+tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+                                 struct se_node_acl *se_nacl,
+                                 struct t10_pr_registration *pr_reg,
+                                 int *format_code)
 {
        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
@@ -388,10 +388,11 @@ static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
                        format_code);
 }
 
-static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
-       const char *buf,
-       u32 *out_tid_len,
-       char **port_nexus_ptr)
+static char *
+tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+                                   const char *buf,
+                                   u32 *out_tid_len,
+                                   char **port_nexus_ptr)
 {
        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
@@ -417,8 +418,8 @@ static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
                        port_nexus_ptr);
 }
 
-static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
-       struct se_portal_group *se_tpg)
+static struct se_node_acl *
+tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
 {
        struct tcm_vhost_nacl *nacl;
 
@@ -431,8 +432,9 @@ static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
        return &nacl->se_node_acl;
 }
 
-static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl)
+static void
+tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
+                            struct se_node_acl *se_nacl)
 {
        struct tcm_vhost_nacl *nacl = container_of(se_nacl,
                        struct tcm_vhost_nacl, se_node_acl);
@@ -491,28 +493,28 @@ static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
        return 0;
 }
 
-static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
+static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
 {
-       struct vhost_scsi *vs = tv_cmd->tvc_vhost;
+       struct vhost_scsi *vs = cmd->tvc_vhost;
 
-       llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
+       llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
 
        vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 }
 
 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+       struct tcm_vhost_cmd *cmd = container_of(se_cmd,
                                struct tcm_vhost_cmd, tvc_se_cmd);
-       vhost_scsi_complete_cmd(tv_cmd);
+       vhost_scsi_complete_cmd(cmd);
        return 0;
 }
 
 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+       struct tcm_vhost_cmd *cmd = container_of(se_cmd,
                                struct tcm_vhost_cmd, tvc_se_cmd);
-       vhost_scsi_complete_cmd(tv_cmd);
+       vhost_scsi_complete_cmd(cmd);
        return 0;
 }
 
@@ -527,8 +529,9 @@ static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
        kfree(evt);
 }
 
-static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
-       u32 event, u32 reason)
+static struct tcm_vhost_evt *
+tcm_vhost_allocate_evt(struct vhost_scsi *vs,
+                      u32 event, u32 reason)
 {
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        struct tcm_vhost_evt *evt;
@@ -552,28 +555,28 @@ static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
        return evt;
 }
 
-static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
+static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
 {
-       struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+       struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 
        /* TODO locking against target/backend threads? */
        transport_generic_free_cmd(se_cmd, 1);
 
-       if (tv_cmd->tvc_sgl_count) {
+       if (cmd->tvc_sgl_count) {
                u32 i;
-               for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
-                       put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+               for (i = 0; i < cmd->tvc_sgl_count; i++)
+                       put_page(sg_page(&cmd->tvc_sgl[i]));
 
-               kfree(tv_cmd->tvc_sgl);
+               kfree(cmd->tvc_sgl);
        }
 
-       tcm_vhost_put_inflight(tv_cmd->inflight);
+       tcm_vhost_put_inflight(cmd->inflight);
 
-       kfree(tv_cmd);
+       kfree(cmd);
 }
 
-static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
-       struct tcm_vhost_evt *evt)
+static void
+tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
 {
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        struct virtio_scsi_event *event = &evt->event;
@@ -652,7 +655,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                                        vs_completion_work);
        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
        struct virtio_scsi_cmd_resp v_rsp;
-       struct tcm_vhost_cmd *tv_cmd;
+       struct tcm_vhost_cmd *cmd;
        struct llist_node *llnode;
        struct se_cmd *se_cmd;
        int ret, vq;
@@ -660,32 +663,32 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
        llnode = llist_del_all(&vs->vs_completion_list);
        while (llnode) {
-               tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
+               cmd = llist_entry(llnode, struct tcm_vhost_cmd,
                                     tvc_completion_list);
                llnode = llist_next(llnode);
-               se_cmd = &tv_cmd->tvc_se_cmd;
+               se_cmd = &cmd->tvc_se_cmd;
 
                pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
-                       tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
+                       cmd, se_cmd->residual_count, se_cmd->scsi_status);
 
                memset(&v_rsp, 0, sizeof(v_rsp));
                v_rsp.resid = se_cmd->residual_count;
                /* TODO is status_qualifier field needed? */
                v_rsp.status = se_cmd->scsi_status;
                v_rsp.sense_len = se_cmd->scsi_sense_length;
-               memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
+               memcpy(v_rsp.sense, cmd->tvc_sense_buf,
                       v_rsp.sense_len);
-               ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
+               ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
                if (likely(ret == 0)) {
                        struct vhost_scsi_virtqueue *q;
-                       vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
-                       q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
+                       vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
+                       q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
                        vq = q - vs->vqs;
                        __set_bit(vq, signal);
                } else
                        pr_err("Faulted on virtio_scsi_cmd_resp\n");
 
-               vhost_scsi_free_cmd(tv_cmd);
+               vhost_scsi_free_cmd(cmd);
        }
 
        vq = -1;
@@ -694,35 +697,35 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 }
 
-static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
-       struct vhost_virtqueue *vq,
-       struct tcm_vhost_tpg *tv_tpg,
-       struct virtio_scsi_cmd_req *v_req,
-       u32 exp_data_len,
-       int data_direction)
+static struct tcm_vhost_cmd *
+vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
+                       struct tcm_vhost_tpg *tpg,
+                       struct virtio_scsi_cmd_req *v_req,
+                       u32 exp_data_len,
+                       int data_direction)
 {
-       struct tcm_vhost_cmd *tv_cmd;
+       struct tcm_vhost_cmd *cmd;
        struct tcm_vhost_nexus *tv_nexus;
 
-       tv_nexus = tv_tpg->tpg_nexus;
+       tv_nexus = tpg->tpg_nexus;
        if (!tv_nexus) {
                pr_err("Unable to locate active struct tcm_vhost_nexus\n");
                return ERR_PTR(-EIO);
        }
 
-       tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
-       if (!tv_cmd) {
+       cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
+       if (!cmd) {
                pr_err("Unable to allocate struct tcm_vhost_cmd\n");
                return ERR_PTR(-ENOMEM);
        }
-       tv_cmd->tvc_tag = v_req->tag;
-       tv_cmd->tvc_task_attr = v_req->task_attr;
-       tv_cmd->tvc_exp_data_len = exp_data_len;
-       tv_cmd->tvc_data_direction = data_direction;
-       tv_cmd->tvc_nexus = tv_nexus;
-       tv_cmd->inflight = tcm_vhost_get_inflight(vq);
+       cmd->tvc_tag = v_req->tag;
+       cmd->tvc_task_attr = v_req->task_attr;
+       cmd->tvc_exp_data_len = exp_data_len;
+       cmd->tvc_data_direction = data_direction;
+       cmd->tvc_nexus = tv_nexus;
+       cmd->inflight = tcm_vhost_get_inflight(vq);
 
-       return tv_cmd;
+       return cmd;
 }
 
 /*
@@ -730,8 +733,11 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
  *
  * Returns the number of scatterlist entries used or -errno on error.
  */
-static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
-       unsigned int sgl_count, struct iovec *iov, int write)
+static int
+vhost_scsi_map_to_sgl(struct scatterlist *sgl,
+                     unsigned int sgl_count,
+                     struct iovec *iov,
+                     int write)
 {
        unsigned int npages = 0, pages_nr, offset, nbytes;
        struct scatterlist *sg = sgl;
@@ -775,8 +781,11 @@ out:
        return ret;
 }
 
-static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
-       struct iovec *iov, unsigned int niov, int write)
+static int
+vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
+                         struct iovec *iov,
+                         unsigned int niov,
+                         int write)
 {
        int ret;
        unsigned int i;
@@ -792,25 +801,25 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
 
        /* TODO overflow checking */
 
-       sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
+       sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
        if (!sg)
                return -ENOMEM;
        pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
               sg, sgl_count, !sg);
        sg_init_table(sg, sgl_count);
 
-       tv_cmd->tvc_sgl = sg;
-       tv_cmd->tvc_sgl_count = sgl_count;
+       cmd->tvc_sgl = sg;
+       cmd->tvc_sgl_count = sgl_count;
 
        pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
        for (i = 0; i < niov; i++) {
                ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
                if (ret < 0) {
-                       for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
-                               put_page(sg_page(&tv_cmd->tvc_sgl[i]));
-                       kfree(tv_cmd->tvc_sgl);
-                       tv_cmd->tvc_sgl = NULL;
-                       tv_cmd->tvc_sgl_count = 0;
+                       for (i = 0; i < cmd->tvc_sgl_count; i++)
+                               put_page(sg_page(&cmd->tvc_sgl[i]));
+                       kfree(cmd->tvc_sgl);
+                       cmd->tvc_sgl = NULL;
+                       cmd->tvc_sgl_count = 0;
                        return ret;
                }
 
@@ -822,15 +831,15 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
 
 static void tcm_vhost_submission_work(struct work_struct *work)
 {
-       struct tcm_vhost_cmd *tv_cmd =
+       struct tcm_vhost_cmd *cmd =
                container_of(work, struct tcm_vhost_cmd, work);
        struct tcm_vhost_nexus *tv_nexus;
-       struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+       struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
        struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
        int rc, sg_no_bidi = 0;
 
-       if (tv_cmd->tvc_sgl_count) {
-               sg_ptr = tv_cmd->tvc_sgl;
+       if (cmd->tvc_sgl_count) {
+               sg_ptr = cmd->tvc_sgl;
 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
 #if 0
                if (se_cmd->se_cmd_flags & SCF_BIDI) {
@@ -841,13 +850,13 @@ static void tcm_vhost_submission_work(struct work_struct *work)
        } else {
                sg_ptr = NULL;
        }
-       tv_nexus = tv_cmd->tvc_nexus;
+       tv_nexus = cmd->tvc_nexus;
 
        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
-                       tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
-                       tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
-                       tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
-                       0, sg_ptr, tv_cmd->tvc_sgl_count,
+                       cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
+                       cmd->tvc_lun, cmd->tvc_exp_data_len,
+                       cmd->tvc_task_attr, cmd->tvc_data_direction,
+                       0, sg_ptr, cmd->tvc_sgl_count,
                        sg_bidi_ptr, sg_no_bidi);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
@@ -856,8 +865,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
        }
 }
 
-static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
-       struct vhost_virtqueue *vq, int head, unsigned out)
+static void
+vhost_scsi_send_bad_target(struct vhost_scsi *vs,
+                          struct vhost_virtqueue *vq,
+                          int head, unsigned out)
 {
        struct virtio_scsi_cmd_resp __user *resp;
        struct virtio_scsi_cmd_resp rsp;
@@ -873,13 +884,13 @@ static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
                pr_err("Faulted on virtio_scsi_cmd_resp\n");
 }
 
-static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
-       struct vhost_virtqueue *vq)
+static void
+vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 {
        struct tcm_vhost_tpg **vs_tpg;
        struct virtio_scsi_cmd_req v_req;
-       struct tcm_vhost_tpg *tv_tpg;
-       struct tcm_vhost_cmd *tv_cmd;
+       struct tcm_vhost_tpg *tpg;
+       struct tcm_vhost_cmd *cmd;
        u32 exp_data_len, data_first, data_num, data_direction;
        unsigned out, in, i;
        int head, ret;
@@ -964,10 +975,10 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
 
                /* Extract the tpgt */
                target = v_req.lun[1];
-               tv_tpg = ACCESS_ONCE(vs_tpg[target]);
+               tpg = ACCESS_ONCE(vs_tpg[target]);
 
                /* Target does not exist, fail the request */
-               if (unlikely(!tv_tpg)) {
+               if (unlikely(!tpg)) {
                        vhost_scsi_send_bad_target(vs, vq, head, out);
                        continue;
                }
@@ -976,46 +987,46 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
                for (i = 0; i < data_num; i++)
                        exp_data_len += vq->iov[data_first + i].iov_len;
 
-               tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
+               cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req,
                                        exp_data_len, data_direction);
-               if (IS_ERR(tv_cmd)) {
+               if (IS_ERR(cmd)) {
                        vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
-                                       PTR_ERR(tv_cmd));
+                                       PTR_ERR(cmd));
                        goto err_cmd;
                }
                pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
-                       ": %d\n", tv_cmd, exp_data_len, data_direction);
+                       ": %d\n", cmd, exp_data_len, data_direction);
 
-               tv_cmd->tvc_vhost = vs;
-               tv_cmd->tvc_vq = vq;
-               tv_cmd->tvc_resp = vq->iov[out].iov_base;
+               cmd->tvc_vhost = vs;
+               cmd->tvc_vq = vq;
+               cmd->tvc_resp = vq->iov[out].iov_base;
 
                /*
-                * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
+                * Copy in the recieved CDB descriptor into cmd->tvc_cdb
                 * that will be used by tcm_vhost_new_cmd_map() and down into
                 * target_setup_cmd_from_cdb()
                 */
-               memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
+               memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
                /*
                 * Check that the recieved CDB size does not exceeded our
                 * hardcoded max for tcm_vhost
                 */
                /* TODO what if cdb was too small for varlen cdb header? */
-               if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
+               if (unlikely(scsi_command_size(cmd->tvc_cdb) >
                                        TCM_VHOST_MAX_CDB_SIZE)) {
                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
-                               scsi_command_size(tv_cmd->tvc_cdb),
+                               scsi_command_size(cmd->tvc_cdb),
                                TCM_VHOST_MAX_CDB_SIZE);
                        goto err_free;
                }
-               tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
+               cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
 
                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
-                       tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
+                       cmd->tvc_cdb[0], cmd->tvc_lun);
 
                if (data_direction != DMA_NONE) {
-                       ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
+                       ret = vhost_scsi_map_iov_to_sgl(cmd,
                                        &vq->iov[data_first], data_num,
                                        data_direction == DMA_TO_DEVICE);
                        if (unlikely(ret)) {
@@ -1029,22 +1040,22 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
                 * complete the virtio-scsi request in TCM callback context via
                 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
                 */
-               tv_cmd->tvc_vq_desc = head;
+               cmd->tvc_vq_desc = head;
                /*
                 * Dispatch tv_cmd descriptor for cmwq execution in process
                 * context provided by tcm_vhost_workqueue.  This also ensures
                 * tv_cmd is executed on the same kworker CPU as this vhost
                 * thread to gain positive L2 cache locality effects..
                 */
-               INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
-               queue_work(tcm_vhost_workqueue, &tv_cmd->work);
+               INIT_WORK(&cmd->work, tcm_vhost_submission_work);
+               queue_work(tcm_vhost_workqueue, &cmd->work);
        }
 
        mutex_unlock(&vq->mutex);
        return;
 
 err_free:
-       vhost_scsi_free_cmd(tv_cmd);
+       vhost_scsi_free_cmd(cmd);
 err_cmd:
        vhost_scsi_send_bad_target(vs, vq, head, out);
        mutex_unlock(&vq->mutex);
@@ -1055,8 +1066,12 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
        pr_debug("%s: The handling func for control queue.\n", __func__);
 }
 
-static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
-       struct se_lun *lun, u32 event, u32 reason)
+static void
+tcm_vhost_send_evt(struct vhost_scsi *vs,
+                  struct tcm_vhost_tpg *tpg,
+                  struct se_lun *lun,
+                  u32 event,
+                  u32 reason)
 {
        struct tcm_vhost_evt *evt;
 
@@ -1146,12 +1161,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
  *  The lock nesting rule is:
  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
  */
-static int vhost_scsi_set_endpoint(
-       struct vhost_scsi *vs,
-       struct vhost_scsi_target *t)
+static int
+vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+                       struct vhost_scsi_target *t)
 {
        struct tcm_vhost_tport *tv_tport;
-       struct tcm_vhost_tpg *tv_tpg;
+       struct tcm_vhost_tpg *tpg;
        struct tcm_vhost_tpg **vs_tpg;
        struct vhost_virtqueue *vq;
        int index, ret, i, len;
@@ -1178,32 +1193,32 @@ static int vhost_scsi_set_endpoint(
        if (vs->vs_tpg)
                memcpy(vs_tpg, vs->vs_tpg, len);
 
-       list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
-               mutex_lock(&tv_tpg->tv_tpg_mutex);
-               if (!tv_tpg->tpg_nexus) {
-                       mutex_unlock(&tv_tpg->tv_tpg_mutex);
+       list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
+               mutex_lock(&tpg->tv_tpg_mutex);
+               if (!tpg->tpg_nexus) {
+                       mutex_unlock(&tpg->tv_tpg_mutex);
                        continue;
                }
-               if (tv_tpg->tv_tpg_vhost_count != 0) {
-                       mutex_unlock(&tv_tpg->tv_tpg_mutex);
+               if (tpg->tv_tpg_vhost_count != 0) {
+                       mutex_unlock(&tpg->tv_tpg_mutex);
                        continue;
                }
-               tv_tport = tv_tpg->tport;
+               tv_tport = tpg->tport;
 
                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
-                       if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
+                       if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
                                kfree(vs_tpg);
-                               mutex_unlock(&tv_tpg->tv_tpg_mutex);
+                               mutex_unlock(&tpg->tv_tpg_mutex);
                                ret = -EEXIST;
                                goto out;
                        }
-                       tv_tpg->tv_tpg_vhost_count++;
-                       tv_tpg->vhost_scsi = vs;
-                       vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
+                       tpg->tv_tpg_vhost_count++;
+                       tpg->vhost_scsi = vs;
+                       vs_tpg[tpg->tport_tpgt] = tpg;
                        smp_mb__after_atomic_inc();
                        match = true;
                }
-               mutex_unlock(&tv_tpg->tv_tpg_mutex);
+               mutex_unlock(&tpg->tv_tpg_mutex);
        }
 
        if (match) {
@@ -1236,12 +1251,12 @@ out:
        return ret;
 }
 
-static int vhost_scsi_clear_endpoint(
-       struct vhost_scsi *vs,
-       struct vhost_scsi_target *t)
+static int
+vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
+                         struct vhost_scsi_target *t)
 {
        struct tcm_vhost_tport *tv_tport;
-       struct tcm_vhost_tpg *tv_tpg;
+       struct tcm_vhost_tpg *tpg;
        struct vhost_virtqueue *vq;
        bool match = false;
        int index, ret, i;
@@ -1264,30 +1279,30 @@ static int vhost_scsi_clear_endpoint(
 
        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
                target = i;
-               tv_tpg = vs->vs_tpg[target];
-               if (!tv_tpg)
+               tpg = vs->vs_tpg[target];
+               if (!tpg)
                        continue;
 
-               mutex_lock(&tv_tpg->tv_tpg_mutex);
-               tv_tport = tv_tpg->tport;
+               mutex_lock(&tpg->tv_tpg_mutex);
+               tv_tport = tpg->tport;
                if (!tv_tport) {
                        ret = -ENODEV;
                        goto err_tpg;
                }
 
                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
-                       pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
+                       pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
                                " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
-                               tv_tport->tport_name, tv_tpg->tport_tpgt,
+                               tv_tport->tport_name, tpg->tport_tpgt,
                                t->vhost_wwpn, t->vhost_tpgt);
                        ret = -EINVAL;
                        goto err_tpg;
                }
-               tv_tpg->tv_tpg_vhost_count--;
-               tv_tpg->vhost_scsi = NULL;
+               tpg->tv_tpg_vhost_count--;
+               tpg->vhost_scsi = NULL;
                vs->vs_tpg[target] = NULL;
                match = true;
-               mutex_unlock(&tv_tpg->tv_tpg_mutex);
+               mutex_unlock(&tpg->tv_tpg_mutex);
        }
        if (match) {
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
@@ -1311,7 +1326,7 @@ static int vhost_scsi_clear_endpoint(
        return 0;
 
 err_tpg:
-       mutex_unlock(&tv_tpg->tv_tpg_mutex);
+       mutex_unlock(&tpg->tv_tpg_mutex);
 err_dev:
        mutex_unlock(&vs->dev.mutex);
        mutex_unlock(&tcm_vhost_mutex);
@@ -1338,68 +1353,70 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
 
 static int vhost_scsi_open(struct inode *inode, struct file *f)
 {
-       struct vhost_scsi *s;
+       struct vhost_scsi *vs;
        struct vhost_virtqueue **vqs;
        int r, i;
 
-       s = kzalloc(sizeof(*s), GFP_KERNEL);
-       if (!s)
+       vs = kzalloc(sizeof(*vs), GFP_KERNEL);
+       if (!vs)
                return -ENOMEM;
 
        vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
        if (!vqs) {
-               kfree(s);
+               kfree(vs);
                return -ENOMEM;
        }
 
-       vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
-       vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
+       vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
+       vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
 
-       s->vs_events_nr = 0;
-       s->vs_events_missed = false;
+       vs->vs_events_nr = 0;
+       vs->vs_events_missed = false;
 
-       vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
-       vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
-       s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
-       s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
+       vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
+       vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
+       vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
+       vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
        for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
-               vqs[i] = &s->vqs[i].vq;
-               s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
+               vqs[i] = &vs->vqs[i].vq;
+               vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
        }
-       r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
+       r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
 
-       tcm_vhost_init_inflight(s, NULL);
+       tcm_vhost_init_inflight(vs, NULL);
 
        if (r < 0) {
                kfree(vqs);
-               kfree(s);
+               kfree(vs);
                return r;
        }
 
-       f->private_data = s;
+       f->private_data = vs;
        return 0;
 }
 
 static int vhost_scsi_release(struct inode *inode, struct file *f)
 {
-       struct vhost_scsi *s = f->private_data;
+       struct vhost_scsi *vs = f->private_data;
        struct vhost_scsi_target t;
 
-       mutex_lock(&s->dev.mutex);
-       memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
-       mutex_unlock(&s->dev.mutex);
-       vhost_scsi_clear_endpoint(s, &t);
-       vhost_dev_stop(&s->dev);
-       vhost_dev_cleanup(&s->dev, false);
+       mutex_lock(&vs->dev.mutex);
+       memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
+       mutex_unlock(&vs->dev.mutex);
+       vhost_scsi_clear_endpoint(vs, &t);
+       vhost_dev_stop(&vs->dev);
+       vhost_dev_cleanup(&vs->dev, false);
        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
-       vhost_scsi_flush(s);
-       kfree(s->dev.vqs);
-       kfree(s);
+       vhost_scsi_flush(vs);
+       kfree(vs->dev.vqs);
+       kfree(vs);
        return 0;
 }
 
-static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
-                               unsigned long arg)
+static long
+vhost_scsi_ioctl(struct file *f,
+                unsigned int ioctl,
+                unsigned long arg)
 {
        struct vhost_scsi *vs = f->private_data;
        struct vhost_scsi_target backend;
@@ -1515,8 +1532,9 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
        return "Unknown";
 }
 
-static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
-       struct se_lun *lun, bool plug)
+static void
+tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
+                 struct se_lun *lun, bool plug)
 {
 
        struct vhost_scsi *vs = tpg->vhost_scsi;
@@ -1556,18 +1574,18 @@ static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
 }
 
 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
-       struct se_lun *lun)
+                              struct se_lun *lun)
 {
-       struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
 
        mutex_lock(&tcm_vhost_mutex);
 
-       mutex_lock(&tv_tpg->tv_tpg_mutex);
-       tv_tpg->tv_tpg_port_count++;
-       mutex_unlock(&tv_tpg->tv_tpg_mutex);
+       mutex_lock(&tpg->tv_tpg_mutex);
+       tpg->tv_tpg_port_count++;
+       mutex_unlock(&tpg->tv_tpg_mutex);
 
-       tcm_vhost_hotplug(tv_tpg, lun);
+       tcm_vhost_hotplug(tpg, lun);
 
        mutex_unlock(&tcm_vhost_mutex);
 
@@ -1575,26 +1593,26 @@ static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
 }
 
 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
-       struct se_lun *lun)
+                                 struct se_lun *lun)
 {
-       struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
 
        mutex_lock(&tcm_vhost_mutex);
 
-       mutex_lock(&tv_tpg->tv_tpg_mutex);
-       tv_tpg->tv_tpg_port_count--;
-       mutex_unlock(&tv_tpg->tv_tpg_mutex);
+       mutex_lock(&tpg->tv_tpg_mutex);
+       tpg->tv_tpg_port_count--;
+       mutex_unlock(&tpg->tv_tpg_mutex);
 
-       tcm_vhost_hotunplug(tv_tpg, lun);
+       tcm_vhost_hotunplug(tpg, lun);
 
        mutex_unlock(&tcm_vhost_mutex);
 }
 
-static struct se_node_acl *tcm_vhost_make_nodeacl(
-       struct se_portal_group *se_tpg,
-       struct config_group *group,
-       const char *name)
+static struct se_node_acl *
+tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
+                      struct config_group *group,
+                      const char *name)
 {
        struct se_node_acl *se_nacl, *se_nacl_new;
        struct tcm_vhost_nacl *nacl;
@@ -1635,23 +1653,23 @@ static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
        kfree(nacl);
 }
 
-static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
-       const char *name)
+static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
+                               const char *name)
 {
        struct se_portal_group *se_tpg;
        struct tcm_vhost_nexus *tv_nexus;
 
-       mutex_lock(&tv_tpg->tv_tpg_mutex);
-       if (tv_tpg->tpg_nexus) {
-               mutex_unlock(&tv_tpg->tv_tpg_mutex);
-               pr_debug("tv_tpg->tpg_nexus already exists\n");
+       mutex_lock(&tpg->tv_tpg_mutex);
+       if (tpg->tpg_nexus) {
+               mutex_unlock(&tpg->tv_tpg_mutex);
+               pr_debug("tpg->tpg_nexus already exists\n");
                return -EEXIST;
        }
-       se_tpg = &tv_tpg->se_tpg;
+       se_tpg = &tpg->se_tpg;
 
        tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
        if (!tv_nexus) {
-               mutex_unlock(&tv_tpg->tv_tpg_mutex);
+               mutex_unlock(&tpg->tv_tpg_mutex);
                pr_err("Unable to allocate struct tcm_vhost_nexus\n");
                return -ENOMEM;
        }
@@ -1660,7 +1678,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
         */
        tv_nexus->tvn_se_sess = transport_init_session();
        if (IS_ERR(tv_nexus->tvn_se_sess)) {
-               mutex_unlock(&tv_tpg->tv_tpg_mutex);
+               mutex_unlock(&tpg->tv_tpg_mutex);
                kfree(tv_nexus);
                return -ENOMEM;
        }
@@ -1672,7 +1690,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
        tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
                                se_tpg, (unsigned char *)name);
        if (!tv_nexus->tvn_se_sess->se_node_acl) {
-               mutex_unlock(&tv_tpg->tv_tpg_mutex);
+               mutex_unlock(&tpg->tv_tpg_mutex);
                pr_debug("core_tpg_check_initiator_node_acl() failed"
                                " for %s\n", name);
                transport_free_session(tv_nexus->tvn_se_sess);
@@ -1685,9 +1703,9 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
         */
        __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
                        tv_nexus->tvn_se_sess, tv_nexus);
-       tv_tpg->tpg_nexus = tv_nexus;
+       tpg->tpg_nexus = tv_nexus;
 
-       mutex_unlock(&tv_tpg->tv_tpg_mutex);
+       mutex_unlock(&tpg->tv_tpg_mutex);
        return 0;
 }
 
@@ -1740,40 +1758,40 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
 }
 
 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
-       char *page)
+                                       char *page)
 {
-       struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
        struct tcm_vhost_nexus *tv_nexus;
        ssize_t ret;
 
-       mutex_lock(&tv_tpg->tv_tpg_mutex);
-       tv_nexus = tv_tpg->tpg_nexus;
+       mutex_lock(&tpg->tv_tpg_mutex);
+       tv_nexus = tpg->tpg_nexus;
        if (!tv_nexus) {
-               mutex_unlock(&tv_tpg->tv_tpg_mutex);
+               mutex_unlock(&tpg->tv_tpg_mutex);
                return -ENODEV;
        }
        ret = snprintf(page, PAGE_SIZE, "%s\n",
                        tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
-       mutex_unlock(&tv_tpg->tv_tpg_mutex);
+       mutex_unlock(&tpg->tv_tpg_mutex);
 
        return ret;
 }
 
 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
-       const char *page,
-       size_t count)
+                                        const char *page,
+                                        size_t count)
 {
-       struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
+       struct tcm_vhost_tport *tport_wwn = tpg->tport;
        unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
        int ret;
        /*
         * Shutdown the active I_T nexus if 'NULL' is passed..
         */
        if (!strncmp(page, "NULL", 4)) {
-               ret = tcm_vhost_drop_nexus(tv_tpg);
+               ret = tcm_vhost_drop_nexus(tpg);
                return (!ret) ? count : ret;
        }
        /*
@@ -1831,7 +1849,7 @@ check_newline:
        if (i_port[strlen(i_port)-1] == '\n')
                i_port[strlen(i_port)-1] = '\0';
 
-       ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
+       ret = tcm_vhost_make_nexus(tpg, port_ptr);
        if (ret < 0)
                return ret;
 
@@ -1845,9 +1863,10 @@ static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
        NULL,
 };
 
-static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
-       struct config_group *group,
-       const char *name)
+static struct se_portal_group *
+tcm_vhost_make_tpg(struct se_wwn *wwn,
+                  struct config_group *group,
+                  const char *name)
 {
        struct tcm_vhost_tport *tport = container_of(wwn,
                        struct tcm_vhost_tport, tport_wwn);
@@ -1903,9 +1922,10 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
        kfree(tpg);
 }
 
-static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
-       struct config_group *group,
-       const char *name)
+static struct se_wwn *
+tcm_vhost_make_tport(struct target_fabric_configfs *tf,
+                    struct config_group *group,
+                    const char *name)
 {
        struct tcm_vhost_tport *tport;
        char *ptr;
@@ -1975,9 +1995,9 @@ static void tcm_vhost_drop_tport(struct se_wwn *wwn)
        kfree(tport);
 }
 
-static ssize_t tcm_vhost_wwn_show_attr_version(
-       struct target_fabric_configfs *tf,
-       char *page)
+static ssize_t
+tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
+                               char *page)
 {
        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
                "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
index 1ee45bc..a73ea21 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/slab.h>
 
 #include "test.h"
-#include "vhost.c"
+#include "vhost.h"
 
 /* Max number of bytes transferred before requeueing the job.
  * Using this limit prevents one virtqueue from starving others. */
@@ -38,17 +38,19 @@ struct vhost_test {
  * read-size critical section for our kind of RCU. */
 static void handle_vq(struct vhost_test *n)
 {
-       struct vhost_virtqueue *vq = &n->dev.vqs[VHOST_TEST_VQ];
+       struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
        unsigned out, in;
        int head;
        size_t len, total_len = 0;
        void *private;
 
-       private = rcu_dereference_check(vq->private_data, 1);
-       if (!private)
+       mutex_lock(&vq->mutex);
+       private = vq->private_data;
+       if (!private) {
+               mutex_unlock(&vq->mutex);
                return;
+       }
 
-       mutex_lock(&vq->mutex);
        vhost_disable_notify(&n->dev, vq);
 
        for (;;) {
@@ -102,15 +104,23 @@ static int vhost_test_open(struct inode *inode, struct file *f)
 {
        struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
        struct vhost_dev *dev;
+       struct vhost_virtqueue **vqs;
        int r;
 
        if (!n)
                return -ENOMEM;
+       vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
+       if (!vqs) {
+               kfree(n);
+               return -ENOMEM;
+       }
 
        dev = &n->dev;
+       vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
        n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
-       r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX);
+       r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
        if (r < 0) {
+               kfree(vqs);
                kfree(n);
                return r;
        }
@@ -126,9 +136,8 @@ static void *vhost_test_stop_vq(struct vhost_test *n,
        void *private;
 
        mutex_lock(&vq->mutex);
-       private = rcu_dereference_protected(vq->private_data,
-                                        lockdep_is_held(&vq->mutex));
-       rcu_assign_pointer(vq->private_data, NULL);
+       private = vq->private_data;
+       vq->private_data = NULL;
        mutex_unlock(&vq->mutex);
        return private;
 }
@@ -140,7 +149,7 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep)
 
 static void vhost_test_flush_vq(struct vhost_test *n, int index)
 {
-       vhost_poll_flush(&n->dev.vqs[index].poll);
+       vhost_poll_flush(&n->vqs[index].poll);
 }
 
 static void vhost_test_flush(struct vhost_test *n)
@@ -268,14 +277,14 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
                        return -EFAULT;
                return vhost_test_run(n, test);
        case VHOST_GET_FEATURES:
-               features = VHOST_NET_FEATURES;
+               features = VHOST_FEATURES;
                if (copy_to_user(featurep, &features, sizeof features))
                        return -EFAULT;
                return 0;
        case VHOST_SET_FEATURES:
                if (copy_from_user(&features, featurep, sizeof features))
                        return -EFAULT;
-               if (features & ~VHOST_NET_FEATURES)
+               if (features & ~VHOST_FEATURES)
                        return -EOPNOTSUPP;
                return vhost_test_set_features(n, features);
        case VHOST_RESET_OWNER:
index 60aa5ad..e58cf00 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/slab.h>
 #include <linux/kthread.h>
 #include <linux/cgroup.h>
+#include <linux/module.h>
 
 #include "vhost.h"
 
@@ -66,6 +67,7 @@ void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
        work->flushing = 0;
        work->queue_seq = work->done_seq = 0;
 }
+EXPORT_SYMBOL_GPL(vhost_work_init);
 
 /* Init poll structure */
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
@@ -79,6 +81,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 
        vhost_work_init(&poll->work, fn);
 }
+EXPORT_SYMBOL_GPL(vhost_poll_init);
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
  * keep a reference to a file until after vhost_poll_stop is called. */
@@ -101,6 +104,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(vhost_poll_start);
 
 /* Stop polling a file. After this function returns, it becomes safe to drop the
  * file reference. You must also flush afterwards. */
@@ -111,6 +115,7 @@ void vhost_poll_stop(struct vhost_poll *poll)
                poll->wqh = NULL;
        }
 }
+EXPORT_SYMBOL_GPL(vhost_poll_stop);
 
 static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
                                unsigned seq)
@@ -123,7 +128,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
        return left <= 0;
 }
 
-static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
+void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 {
        unsigned seq;
        int flushing;
@@ -138,6 +143,7 @@ static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
        spin_unlock_irq(&dev->work_lock);
        BUG_ON(flushing < 0);
 }
+EXPORT_SYMBOL_GPL(vhost_work_flush);
 
 /* Flush any work that has been scheduled. When calling this, don't hold any
  * locks that are also used by the callback. */
@@ -145,6 +151,7 @@ void vhost_poll_flush(struct vhost_poll *poll)
 {
        vhost_work_flush(poll->dev, &poll->work);
 }
+EXPORT_SYMBOL_GPL(vhost_poll_flush);
 
 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 {
@@ -158,11 +165,13 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
        }
        spin_unlock_irqrestore(&dev->work_lock, flags);
 }
+EXPORT_SYMBOL_GPL(vhost_work_queue);
 
 void vhost_poll_queue(struct vhost_poll *poll)
 {
        vhost_work_queue(poll->dev, &poll->work);
 }
+EXPORT_SYMBOL_GPL(vhost_poll_queue);
 
 static void vhost_vq_reset(struct vhost_dev *dev,
                           struct vhost_virtqueue *vq)
@@ -251,17 +260,16 @@ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 /* Helper to allocate iovec buffers for all vqs. */
 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 {
+       struct vhost_virtqueue *vq;
        int i;
 
        for (i = 0; i < dev->nvqs; ++i) {
-               dev->vqs[i]->indirect = kmalloc(sizeof *dev->vqs[i]->indirect *
-                                              UIO_MAXIOV, GFP_KERNEL);
-               dev->vqs[i]->log = kmalloc(sizeof *dev->vqs[i]->log * UIO_MAXIOV,
-                                         GFP_KERNEL);
-               dev->vqs[i]->heads = kmalloc(sizeof *dev->vqs[i]->heads *
-                                           UIO_MAXIOV, GFP_KERNEL);
-               if (!dev->vqs[i]->indirect || !dev->vqs[i]->log ||
-                       !dev->vqs[i]->heads)
+               vq = dev->vqs[i];
+               vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
+                                      GFP_KERNEL);
+               vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
+               vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
+               if (!vq->indirect || !vq->log || !vq->heads)
                        goto err_nomem;
        }
        return 0;
@@ -283,6 +291,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 long vhost_dev_init(struct vhost_dev *dev,
                    struct vhost_virtqueue **vqs, int nvqs)
 {
+       struct vhost_virtqueue *vq;
        int i;
 
        dev->vqs = vqs;
@@ -297,19 +306,21 @@ long vhost_dev_init(struct vhost_dev *dev,
        dev->worker = NULL;
 
        for (i = 0; i < dev->nvqs; ++i) {
-               dev->vqs[i]->log = NULL;
-               dev->vqs[i]->indirect = NULL;
-               dev->vqs[i]->heads = NULL;
-               dev->vqs[i]->dev = dev;
-               mutex_init(&dev->vqs[i]->mutex);
-               vhost_vq_reset(dev, dev->vqs[i]);
-               if (dev->vqs[i]->handle_kick)
-                       vhost_poll_init(&dev->vqs[i]->poll,
-                                       dev->vqs[i]->handle_kick, POLLIN, dev);
+               vq = dev->vqs[i];
+               vq->log = NULL;
+               vq->indirect = NULL;
+               vq->heads = NULL;
+               vq->dev = dev;
+               mutex_init(&vq->mutex);
+               vhost_vq_reset(dev, vq);
+               if (vq->handle_kick)
+                       vhost_poll_init(&vq->poll, vq->handle_kick,
+                                       POLLIN, dev);
        }
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(vhost_dev_init);
 
 /* Caller should have device mutex */
 long vhost_dev_check_owner(struct vhost_dev *dev)
@@ -317,6 +328,7 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
        /* Are you the owner? If not, I don't think you mean to do that */
        return dev->mm == current->mm ? 0 : -EPERM;
 }
+EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 
 struct vhost_attach_cgroups_struct {
        struct vhost_work work;
@@ -348,6 +360,7 @@ bool vhost_dev_has_owner(struct vhost_dev *dev)
 {
        return dev->mm;
 }
+EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 
 /* Caller should have device mutex */
 long vhost_dev_set_owner(struct vhost_dev *dev)
@@ -391,11 +404,13 @@ err_worker:
 err_mm:
        return err;
 }
+EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
 
 struct vhost_memory *vhost_dev_reset_owner_prepare(void)
 {
        return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
 }
+EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
 
 /* Caller should have device mutex */
 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
@@ -406,6 +421,7 @@ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
        memory->nregions = 0;
        RCU_INIT_POINTER(dev->memory, memory);
 }
+EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 
 void vhost_dev_stop(struct vhost_dev *dev)
 {
@@ -418,6 +434,7 @@ void vhost_dev_stop(struct vhost_dev *dev)
                }
        }
 }
+EXPORT_SYMBOL_GPL(vhost_dev_stop);
 
 /* Caller should have device mutex if and only if locked is set */
 void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
@@ -458,6 +475,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
                mmput(dev->mm);
        dev->mm = NULL;
 }
+EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 
 static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 {
@@ -543,6 +561,7 @@ int vhost_log_access_ok(struct vhost_dev *dev)
                                       lockdep_is_held(&dev->mutex));
        return memory_access_ok(dev, mp, 1);
 }
+EXPORT_SYMBOL_GPL(vhost_log_access_ok);
 
 /* Verify access for write logging. */
 /* Caller should have vq mutex and device mutex */
@@ -568,6 +587,7 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
        return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
                vq_log_access_ok(vq->dev, vq, vq->log_base);
 }
+EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
 
 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 {
@@ -797,6 +817,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
                vhost_poll_flush(&vq->poll);
        return r;
 }
+EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
 
 /* Caller must have device mutex */
 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
@@ -877,6 +898,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
 done:
        return r;
 }
+EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
 
 static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
                                                     __u64 addr, __u32 len)
@@ -968,6 +990,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
        BUG();
        return 0;
 }
+EXPORT_SYMBOL_GPL(vhost_log_write);
 
 static int vhost_update_used_flags(struct vhost_virtqueue *vq)
 {
@@ -1019,6 +1042,7 @@ int vhost_init_used(struct vhost_virtqueue *vq)
        vq->signalled_used_valid = false;
        return get_user(vq->last_used_idx, &vq->used->idx);
 }
+EXPORT_SYMBOL_GPL(vhost_init_used);
 
 static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
                          struct iovec iov[], int iov_size)
@@ -1295,12 +1319,14 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
        BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
        return head;
 }
+EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
 
 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
 {
        vq->last_avail_idx -= n;
 }
+EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
 
 /* After we've used one of their buffers, we tell them about it.  We'll then
  * want to notify the guest, using eventfd. */
@@ -1349,6 +1375,7 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
                vq->signalled_used_valid = false;
        return 0;
 }
+EXPORT_SYMBOL_GPL(vhost_add_used);
 
 static int __vhost_add_used_n(struct vhost_virtqueue *vq,
                            struct vring_used_elem *heads,
@@ -1418,6 +1445,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
        }
        return r;
 }
+EXPORT_SYMBOL_GPL(vhost_add_used_n);
 
 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 {
@@ -1462,6 +1490,7 @@ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
        if (vq->call_ctx && vhost_notify(dev, vq))
                eventfd_signal(vq->call_ctx, 1);
 }
+EXPORT_SYMBOL_GPL(vhost_signal);
 
 /* And here's the combo meal deal.  Supersize me! */
 void vhost_add_used_and_signal(struct vhost_dev *dev,
@@ -1471,6 +1500,7 @@ void vhost_add_used_and_signal(struct vhost_dev *dev,
        vhost_add_used(vq, head, len);
        vhost_signal(dev, vq);
 }
+EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
 
 /* multi-buffer version of vhost_add_used_and_signal */
 void vhost_add_used_and_signal_n(struct vhost_dev *dev,
@@ -1480,6 +1510,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
        vhost_add_used_n(vq, heads, count);
        vhost_signal(dev, vq);
 }
+EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
 
 /* OK, now we need to know about added descriptors. */
 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
@@ -1517,6 +1548,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 
        return avail_idx != vq->avail_idx;
 }
+EXPORT_SYMBOL_GPL(vhost_enable_notify);
 
 /* We don't need to be notified again. */
 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
@@ -1533,3 +1565,21 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
                               &vq->used->flags, r);
        }
 }
+EXPORT_SYMBOL_GPL(vhost_disable_notify);
+
+static int __init vhost_init(void)
+{
+       return 0;
+}
+
+static void __exit vhost_exit(void)
+{
+}
+
+module_init(vhost_init);
+module_exit(vhost_exit);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Michael S. Tsirkin");
+MODULE_DESCRIPTION("Host kernel accelerator for virtio");
index 64adcf9..42298cd 100644 (file)
@@ -46,6 +46,8 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 void vhost_poll_stop(struct vhost_poll *poll);
 void vhost_poll_flush(struct vhost_poll *poll);
 void vhost_poll_queue(struct vhost_poll *poll);
+void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
+long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
 
 struct vhost_log {
        u64 addr;
index 0098810..1f572c0 100644 (file)
@@ -192,7 +192,8 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
         * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
         * is true, we *have* to do it in this order
         */
-       tell_host(vb, vb->deflate_vq);
+       if (vb->num_pfns != 0)
+               tell_host(vb, vb->deflate_vq);
        mutex_unlock(&vb->balloon_lock);
        release_pages_by_pfn(vb->pfns, vb->num_pfns);
 }
index a7ce730..1aba255 100644 (file)
@@ -289,9 +289,9 @@ static void vp_free_vectors(struct virtio_device *vdev)
 
                pci_disable_msix(vp_dev->pci_dev);
                vp_dev->msix_enabled = 0;
-               vp_dev->msix_vectors = 0;
        }
 
+       vp_dev->msix_vectors = 0;
        vp_dev->msix_used_vectors = 0;
        kfree(vp_dev->msix_names);
        vp_dev->msix_names = NULL;
@@ -309,6 +309,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
        unsigned i, v;
        int err = -ENOMEM;
 
+       vp_dev->msix_vectors = nvectors;
+
        vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
                                       GFP_KERNEL);
        if (!vp_dev->msix_entries)
@@ -336,7 +338,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
                err = -ENOSPC;
        if (err)
                goto error;
-       vp_dev->msix_vectors = nvectors;
        vp_dev->msix_enabled = 1;
 
        /* Set the vector used for configuration */
index bce8769..89dec7f 100644 (file)
@@ -255,8 +255,6 @@ static int load_aout_binary(struct linux_binprm * bprm)
                (current->mm->start_data = N_DATADDR(ex));
        current->mm->brk = ex.a_bss +
                (current->mm->start_brk = N_BSSADDR(ex));
-       current->mm->free_area_cache = current->mm->mmap_base;
-       current->mm->cached_hole_size = 0;
 
        retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
        if (retval < 0) {
index f8a0b0e..100edcc 100644 (file)
@@ -738,8 +738,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
 
        /* Do this so that we can load the interpreter, if need be.  We will
           change some of these later */
-       current->mm->free_area_cache = current->mm->mmap_base;
-       current->mm->cached_hole_size = 0;
        retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
                                 executable_stack);
        if (retval < 0) {
index ace9a5f..fb425aa 100644 (file)
@@ -330,12 +330,9 @@ struct mm_struct {
        unsigned long (*get_unmapped_area) (struct file *filp,
                                unsigned long addr, unsigned long len,
                                unsigned long pgoff, unsigned long flags);
-       void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
 #endif
        unsigned long mmap_base;                /* base of mmap area */
        unsigned long task_size;                /* size of task vm space */
-       unsigned long cached_hole_size;         /* if non-zero, the largest hole below free_area_cache */
-       unsigned long free_area_cache;          /* first hole of size cached_hole_size or larger */
        unsigned long highest_vm_end;           /* highest vma end address */
        pgd_t * pgd;
        atomic_t mm_users;                      /* How many users with user space? */
index 137b419..27d9da3 100644 (file)
@@ -439,7 +439,7 @@ extern struct kernel_param_ops param_ops_string;
 extern int param_set_copystring(const char *val, const struct kernel_param *);
 extern int param_get_string(char *buffer, const struct kernel_param *kp);
 
-/* for exporting parameters in /sys/parameters */
+/* for exporting parameters in /sys/module/.../parameters */
 
 struct module;
 
index f99d57e..50d04b9 100644 (file)
@@ -322,8 +322,6 @@ extern unsigned long
 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
                          unsigned long len, unsigned long pgoff,
                          unsigned long flags);
-extern void arch_unmap_area(struct mm_struct *, unsigned long);
-extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
 #else
 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
 #endif
index ca3ad41..b300787 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _LINUX_VIRTIO_RING_H
 #define _LINUX_VIRTIO_RING_H
 
+#include <asm/barrier.h>
 #include <linux/irqreturn.h>
 #include <uapi/linux/virtio_ring.h>
 
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
new file mode 100644 (file)
index 0000000..2571a5c
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef _ZBUD_H_
+#define _ZBUD_H_
+
+#include <linux/types.h>
+
+struct zbud_pool;
+
+struct zbud_ops {
+       int (*evict)(struct zbud_pool *pool, unsigned long handle);
+};
+
+struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops);
+void zbud_destroy_pool(struct zbud_pool *pool);
+int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp,
+       unsigned long *handle);
+void zbud_free(struct zbud_pool *pool, unsigned long handle);
+int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries);
+void *zbud_map(struct zbud_pool *pool, unsigned long handle);
+void zbud_unmap(struct zbud_pool *pool, unsigned long handle);
+u64 zbud_get_pool_size(struct zbud_pool *pool);
+
+#endif /* _ZBUD_H_ */
index 87ee4f4..916e444 100644 (file)
@@ -362,10 +362,14 @@ struct vfio_iommu_type1_dma_map {
 #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
 
 /**
- * VFIO_IOMMU_UNMAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 14, struct vfio_dma_unmap)
+ * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
+ *                                                     struct vfio_dma_unmap)
  *
  * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
- * Caller sets argsz.
+ * Caller sets argsz.  The actual unmapped size is returned in the size
+ * field.  No guarantee is made to the user that arbitrary unmaps of iova
+ * or size different from those used in the original mapping call will
+ * succeed.
  */
 struct vfio_iommu_type1_dma_unmap {
        __u32   argsz;
index b7cda39..3ce768c 100644 (file)
@@ -51,4 +51,7 @@
  * suppressed them? */
 #define VIRTIO_F_NOTIFY_ON_EMPTY       24
 
+/* Can the device handle any descriptor layout? */
+#define VIRTIO_F_ANY_LAYOUT            27
+
 #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
index 1db3af9..1833bc5 100644 (file)
@@ -182,7 +182,7 @@ void update_perf_cpu_limits(void)
        u64 tmp = perf_sample_period_ns;
 
        tmp *= sysctl_perf_cpu_time_max_percent;
-       tmp = do_div(tmp, 100);
+       do_div(tmp, 100);
        atomic_set(&perf_sample_allowed_ns, tmp);
 }
 
@@ -232,7 +232,7 @@ DEFINE_PER_CPU(u64, running_sample_length);
 void perf_sample_event_took(u64 sample_len_ns)
 {
        u64 avg_local_sample_len;
-       u64 local_samples_len = __get_cpu_var(running_sample_length);
+       u64 local_samples_len;
 
        if (atomic_read(&perf_sample_allowed_ns) == 0)
                return;
index 6e6a1c1..66635c8 100644 (file)
@@ -365,8 +365,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
        mm->locked_vm = 0;
        mm->mmap = NULL;
        mm->mmap_cache = NULL;
-       mm->free_area_cache = oldmm->mmap_base;
-       mm->cached_hole_size = ~0UL;
        mm->map_count = 0;
        cpumask_clear(mm_cpumask(mm));
        mm->mm_rb = RB_ROOT;
@@ -540,8 +538,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
        mm->nr_ptes = 0;
        memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
        spin_lock_init(&mm->page_table_lock);
-       mm->free_area_cache = TASK_UNMAPPED_BASE;
-       mm->cached_hole_size = ~0UL;
        mm_init_aio(mm);
        mm_init_owner(mm, p);
 
index cab4bce..2069158 100644 (file)
@@ -455,7 +455,7 @@ const struct kernel_symbol *find_symbol(const char *name,
 EXPORT_SYMBOL_GPL(find_symbol);
 
 /* Search for module by name: must hold module_mutex. */
-static struct module *find_module_all(const char *name,
+static struct module *find_module_all(const char *name, size_t len,
                                      bool even_unformed)
 {
        struct module *mod;
@@ -463,7 +463,7 @@ static struct module *find_module_all(const char *name,
        list_for_each_entry(mod, &modules, list) {
                if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
                        continue;
-               if (strcmp(mod->name, name) == 0)
+               if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
                        return mod;
        }
        return NULL;
@@ -471,7 +471,7 @@ static struct module *find_module_all(const char *name,
 
 struct module *find_module(const char *name)
 {
-       return find_module_all(name, false);
+       return find_module_all(name, strlen(name), false);
 }
 EXPORT_SYMBOL_GPL(find_module);
 
@@ -482,23 +482,28 @@ static inline void __percpu *mod_percpu(struct module *mod)
        return mod->percpu;
 }
 
-static int percpu_modalloc(struct module *mod,
-                          unsigned long size, unsigned long align)
+static int percpu_modalloc(struct module *mod, struct load_info *info)
 {
+       Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
+       unsigned long align = pcpusec->sh_addralign;
+
+       if (!pcpusec->sh_size)
+               return 0;
+
        if (align > PAGE_SIZE) {
                printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
                       mod->name, align, PAGE_SIZE);
                align = PAGE_SIZE;
        }
 
-       mod->percpu = __alloc_reserved_percpu(size, align);
+       mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
        if (!mod->percpu) {
                printk(KERN_WARNING
                       "%s: Could not allocate %lu bytes percpu data\n",
-                      mod->name, size);
+                      mod->name, (unsigned long)pcpusec->sh_size);
                return -ENOMEM;
        }
-       mod->percpu_size = size;
+       mod->percpu_size = pcpusec->sh_size;
        return 0;
 }
 
@@ -563,10 +568,12 @@ static inline void __percpu *mod_percpu(struct module *mod)
 {
        return NULL;
 }
-static inline int percpu_modalloc(struct module *mod,
-                                 unsigned long size, unsigned long align)
+static int percpu_modalloc(struct module *mod, struct load_info *info)
 {
-       return -ENOMEM;
+       /* UP modules shouldn't have this section: ENOMEM isn't quite right */
+       if (info->sechdrs[info->index.pcpu].sh_size != 0)
+               return -ENOMEM;
+       return 0;
 }
 static inline void percpu_modfree(struct module *mod)
 {
@@ -2927,7 +2934,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
 {
        /* Module within temporary copy. */
        struct module *mod;
-       Elf_Shdr *pcpusec;
        int err;
 
        mod = setup_load_info(info, flags);
@@ -2942,17 +2948,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
        err = module_frob_arch_sections(info->hdr, info->sechdrs,
                                        info->secstrings, mod);
        if (err < 0)
-               goto out;
+               return ERR_PTR(err);
 
-       pcpusec = &info->sechdrs[info->index.pcpu];
-       if (pcpusec->sh_size) {
-               /* We have a special allocation for this section. */
-               err = percpu_modalloc(mod,
-                                     pcpusec->sh_size, pcpusec->sh_addralign);
-               if (err)
-                       goto out;
-               pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
-       }
+       /* We will do a special allocation for per-cpu sections later. */
+       info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
 
        /* Determine total sizes, and put offsets in sh_entsize.  For now
           this is done generically; there doesn't appear to be any
@@ -2963,17 +2962,12 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
        /* Allocate and move to the final place */
        err = move_module(mod, info);
        if (err)
-               goto free_percpu;
+               return ERR_PTR(err);
 
        /* Module has been copied to its final place now: return it. */
        mod = (void *)info->sechdrs[info->index.mod].sh_addr;
        kmemleak_load_module(mod, info);
        return mod;
-
-free_percpu:
-       percpu_modfree(mod);
-out:
-       return ERR_PTR(err);
 }
 
 /* mod is no longer valid after this! */
@@ -3014,7 +3008,7 @@ static bool finished_loading(const char *name)
        bool ret;
 
        mutex_lock(&module_mutex);
-       mod = find_module_all(name, true);
+       mod = find_module_all(name, strlen(name), true);
        ret = !mod || mod->state == MODULE_STATE_LIVE
                || mod->state == MODULE_STATE_GOING;
        mutex_unlock(&module_mutex);
@@ -3152,7 +3146,8 @@ static int add_unformed_module(struct module *mod)
 
 again:
        mutex_lock(&module_mutex);
-       if ((old = find_module_all(mod->name, true)) != NULL) {
+       old = find_module_all(mod->name, strlen(mod->name), true);
+       if (old != NULL) {
                if (old->state == MODULE_STATE_COMING
                    || old->state == MODULE_STATE_UNFORMED) {
                        /* Wait in case it fails to load. */
@@ -3198,6 +3193,17 @@ out:
        return err;
 }
 
+static int unknown_module_param_cb(char *param, char *val, const char *modname)
+{
+       /* Check for magic 'dyndbg' arg */ 
+       int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
+       if (ret != 0) {
+               printk(KERN_WARNING "%s: unknown parameter '%s' ignored\n",
+                      modname, param);
+       }
+       return 0;
+}
+
 /* Allocate and load the module: note that size of section 0 is always
    zero, and we rely on this for optional sections. */
 static int load_module(struct load_info *info, const char __user *uargs,
@@ -3237,6 +3243,11 @@ static int load_module(struct load_info *info, const char __user *uargs,
        }
 #endif
 
+       /* To avoid stressing percpu allocator, do this once we're unique. */
+       err = percpu_modalloc(mod, info);
+       if (err)
+               goto unlink_mod;
+
        /* Now module is in final location, initialize linked lists, etc. */
        err = module_unload_init(mod);
        if (err)
@@ -3284,7 +3295,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
 
        /* Module is ready to execute: parsing args may do that. */
        err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
-                        -32768, 32767, &ddebug_dyndbg_module_param_cb);
+                        -32768, 32767, unknown_module_param_cb);
        if (err < 0)
                goto bug_cleanup;
 
@@ -3563,10 +3574,8 @@ unsigned long module_kallsyms_lookup_name(const char *name)
        /* Don't lock: we're in enough trouble already. */
        preempt_disable();
        if ((colon = strchr(name, ':')) != NULL) {
-               *colon = '\0';
-               if ((mod = find_module(name)) != NULL)
+               if ((mod = find_module_all(name, colon - name, false)) != NULL)
                        ret = mod_find_symname(mod, colon+1);
-               *colon = ':';
        } else {
                list_for_each_entry_rcu(mod, &modules, list) {
                        if (mod->state == MODULE_STATE_UNFORMED)
index 53b958f..440e65d 100644 (file)
@@ -787,7 +787,7 @@ static void __init kernel_add_sysfs_param(const char *name,
 }
 
 /*
- * param_sysfs_builtin - add contents in /sys/parameters for built-in modules
+ * param_sysfs_builtin - add sysfs parameters for built-in modules
  *
  * Add module_parameters to sysfs for "modules" built into the kernel.
  *
index 8212c1a..d37d45c 100644 (file)
@@ -1369,9 +1369,9 @@ static int console_trylock_for_printk(unsigned int cpu)
                }
        }
        logbuf_cpu = UINT_MAX;
+       raw_spin_unlock(&logbuf_lock);
        if (wake)
                up(&console_sem);
-       raw_spin_unlock(&logbuf_lock);
        return retval;
 }
 
index 7e28ecf..8028dcc 100644 (file)
@@ -478,6 +478,36 @@ config FRONTSWAP
 
          If unsure, say Y to enable frontswap.
 
+config ZBUD
+       tristate
+       default n
+       help
+         A special purpose allocator for storing compressed pages.
+         It is designed to store up to two compressed pages per physical
+         page.  While this design limits storage density, it has simple and
+         deterministic reclaim properties that make it preferable to a higher
+         density approach when reclaim will be used.
+
+config ZSWAP
+       bool "Compressed cache for swap pages (EXPERIMENTAL)"
+       depends on FRONTSWAP && CRYPTO=y
+       select CRYPTO_LZO
+       select ZBUD
+       default n
+       help
+         A lightweight compressed cache for swap pages.  It takes
+         pages that are in the process of being swapped out and attempts to
+         compress them into a dynamically allocated RAM-based memory pool.
+         This can result in a significant I/O reduction on swap device and,
+         in the case where decompressing from RAM is faster that swap device
+         reads, can also improve workload performance.
+
+         This is marked experimental because it is a new feature (as of
+         v3.11) that interacts heavily with memory reclaim.  While these
+         interactions don't cause any known issues on simple memory setups,
+         they have not be fully explored on the large set of potential
+         configurations and workloads that exist.
+
 config MEM_SOFT_DIRTY
        bool "Track memory changes"
        depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY
index 72c5acb..f008033 100644 (file)
@@ -32,6 +32,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
 obj-$(CONFIG_BOUNCE)   += bounce.o
 obj-$(CONFIG_SWAP)     += page_io.o swap_state.o swapfile.o
 obj-$(CONFIG_FRONTSWAP)        += frontswap.o
+obj-$(CONFIG_ZSWAP)    += zswap.o
 obj-$(CONFIG_HAS_DMA)  += dmapool.o
 obj-$(CONFIG_HUGETLBFS)        += hugetlb.o
 obj-$(CONFIG_NUMA)     += mempolicy.o
@@ -58,3 +59,4 @@ obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
 obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
 obj-$(CONFIG_CLEANCACHE) += cleancache.o
 obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
+obj-$(CONFIG_ZBUD)     += zbud.o
index f813111..fbad7b0 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1878,15 +1878,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 }
 #endif 
 
-void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
-{
-       /*
-        * Is this a new hole at the lowest possible address?
-        */
-       if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
-               mm->free_area_cache = addr;
-}
-
 /*
  * This mmap-allocator allocates new areas top-down from below the
  * stack's low limit (the base):
@@ -1943,19 +1934,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 }
 #endif
 
-void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
-{
-       /*
-        * Is this a new hole at the highest possible address?
-        */
-       if (addr > mm->free_area_cache)
-               mm->free_area_cache = addr;
-
-       /* dont allow allocations above current base */
-       if (mm->free_area_cache > mm->mmap_base)
-               mm->free_area_cache = mm->mmap_base;
-}
-
 unsigned long
 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags)
@@ -2376,7 +2354,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct vm_area_struct **insertion_point;
        struct vm_area_struct *tail_vma = NULL;
-       unsigned long addr;
 
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
        vma->vm_prev = NULL;
@@ -2393,11 +2370,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        } else
                mm->highest_vm_end = prev ? prev->vm_end : 0;
        tail_vma->vm_next = NULL;
-       if (mm->unmap_area == arch_unmap_area)
-               addr = prev ? prev->vm_end : mm->mmap_base;
-       else
-               addr = vma ?  vma->vm_start : mm->mmap_base;
-       mm->unmap_area(mm, addr);
        mm->mmap_cache = NULL;          /* Kill the cache. */
 }
 
index e44e6e0..ecd1f15 100644 (file)
@@ -1871,10 +1871,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
        return -ENOMEM;
 }
 
-void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
-{
-}
-
 void unmap_mapping_range(struct address_space *mapping,
                         loff_t const holebegin, loff_t const holelen,
                         int even_cows)
index ab1424d..7441c41 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -295,7 +295,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 {
        mm->mmap_base = TASK_UNMAPPED_BASE;
        mm->get_unmapped_area = arch_get_unmapped_area;
-       mm->unmap_area = arch_unmap_area;
 }
 #endif
 
diff --git a/mm/zbud.c b/mm/zbud.c
new file mode 100644 (file)
index 0000000..9bb4710
--- /dev/null
+++ b/mm/zbud.c
@@ -0,0 +1,527 @@
+/*
+ * zbud.c
+ *
+ * Copyright (C) 2013, Seth Jennings, IBM
+ *
+ * Concepts based on zcache internal zbud allocator by Dan Magenheimer.
+ *
+ * zbud is an special purpose allocator for storing compressed pages.  Contrary
+ * to what its name may suggest, zbud is not a buddy allocator, but rather an
+ * allocator that "buddies" two compressed pages together in a single memory
+ * page.
+ *
+ * While this design limits storage density, it has simple and deterministic
+ * reclaim properties that make it preferable to a higher density approach when
+ * reclaim will be used.
+ *
+ * zbud works by storing compressed pages, or "zpages", together in pairs in a
+ * single memory page called a "zbud page".  The first buddy is "left
+ * justifed" at the beginning of the zbud page, and the last buddy is "right
+ * justified" at the end of the zbud page.  The benefit is that if either
+ * buddy is freed, the freed buddy space, coalesced with whatever slack space
+ * that existed between the buddies, results in the largest possible free region
+ * within the zbud page.
+ *
+ * zbud also provides an attractive lower bound on density. The ratio of zpages
+ * to zbud pages can not be less than 1.  This ensures that zbud can never "do
+ * harm" by using more pages to store zpages than the uncompressed zpages would
+ * have used on their own.
+ *
+ * zbud pages are divided into "chunks".  The size of the chunks is fixed at
+ * compile time and determined by NCHUNKS_ORDER below.  Dividing zbud pages
+ * into chunks allows organizing unbuddied zbud pages into a manageable number
+ * of unbuddied lists according to the number of free chunks available in the
+ * zbud page.
+ *
+ * The zbud API differs from that of conventional allocators in that the
+ * allocation function, zbud_alloc(), returns an opaque handle to the user,
+ * not a dereferenceable pointer.  The user must map the handle using
+ * zbud_map() in order to get a usable pointer by which to access the
+ * allocation data and unmap the handle with zbud_unmap() when operations
+ * on the allocation data are complete.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/zbud.h>
+
+/*****************
+ * Structures
+*****************/
+/*
+ * NCHUNKS_ORDER determines the internal allocation granularity, effectively
+ * adjusting internal fragmentation.  It also determines the number of
+ * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
+ * allocation granularity will be in chunks of size PAGE_SIZE/64, and there
+ * will be 64 freelists per pool.
+ */
+#define NCHUNKS_ORDER  6
+
+#define CHUNK_SHIFT    (PAGE_SHIFT - NCHUNKS_ORDER)
+#define CHUNK_SIZE     (1 << CHUNK_SHIFT)
+#define NCHUNKS                (PAGE_SIZE >> CHUNK_SHIFT)
+#define ZHDR_SIZE_ALIGNED CHUNK_SIZE
+
+/**
+ * struct zbud_pool - stores metadata for each zbud pool
+ * @lock:      protects all pool fields and first|last_chunk fields of any
+ *             zbud page in the pool
+ * @unbuddied: array of lists tracking zbud pages that only contain one buddy;
+ *             the lists each zbud page is added to depends on the size of
+ *             its free region.
+ * @buddied:   list tracking the zbud pages that contain two buddies;
+ *             these zbud pages are full
+ * @lru:       list tracking the zbud pages in LRU order by most recently
+ *             added buddy.
+ * @pages_nr:  number of zbud pages in the pool.
+ * @ops:       pointer to a structure of user defined operations specified at
+ *             pool creation time.
+ *
+ * This structure is allocated at pool creation time and maintains metadata
+ * pertaining to a particular zbud pool.
+ */
+struct zbud_pool {
+       spinlock_t lock;
+       struct list_head unbuddied[NCHUNKS];
+       struct list_head buddied;
+       struct list_head lru;
+       u64 pages_nr;
+       struct zbud_ops *ops;
+};
+
+/*
+ * struct zbud_header - zbud page metadata occupying the first chunk of each
+ *                     zbud page.
+ * @buddy:     links the zbud page into the unbuddied/buddied lists in the pool
+ * @lru:       links the zbud page into the lru list in the pool
+ * @first_chunks:      the size of the first buddy in chunks, 0 if free
+ * @last_chunks:       the size of the last buddy in chunks, 0 if free
+ */
+struct zbud_header {
+       struct list_head buddy;
+       struct list_head lru;
+       unsigned int first_chunks;
+       unsigned int last_chunks;
+       bool under_reclaim;
+};
+
+/*****************
+ * Helpers
+*****************/
+/* Just to make the code easier to read */
+enum buddy {
+       FIRST,
+       LAST
+};
+
+/* Converts an allocation size in bytes to size in zbud chunks */
+static int size_to_chunks(int size)
+{
+       return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
+}
+
+#define for_each_unbuddied_list(_iter, _begin) \
+       for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
+
+/* Initializes the zbud header of a newly allocated zbud page */
+static struct zbud_header *init_zbud_page(struct page *page)
+{
+       struct zbud_header *zhdr = page_address(page);
+       zhdr->first_chunks = 0;
+       zhdr->last_chunks = 0;
+       INIT_LIST_HEAD(&zhdr->buddy);
+       INIT_LIST_HEAD(&zhdr->lru);
+       zhdr->under_reclaim = 0;
+       return zhdr;
+}
+
+/* Resets the struct page fields and frees the page */
+static void free_zbud_page(struct zbud_header *zhdr)
+{
+       __free_page(virt_to_page(zhdr));
+}
+
+/*
+ * Encodes the handle of a particular buddy within a zbud page
+ * Pool lock should be held as this function accesses first|last_chunks
+ */
+static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
+{
+       unsigned long handle;
+
+       /*
+        * For now, the encoded handle is actually just the pointer to the data
+        * but this might not always be the case.  A little information hiding.
+        * Add CHUNK_SIZE to the handle if it is the first allocation to jump
+        * over the zbud header in the first chunk.
+        */
+       handle = (unsigned long)zhdr;
+       if (bud == FIRST)
+               /* skip over zbud header */
+               handle += ZHDR_SIZE_ALIGNED;
+       else /* bud == LAST */
+               handle += PAGE_SIZE - (zhdr->last_chunks  << CHUNK_SHIFT);
+       return handle;
+}
+
+/* Returns the zbud page where a given handle is stored */
+static struct zbud_header *handle_to_zbud_header(unsigned long handle)
+{
+       return (struct zbud_header *)(handle & PAGE_MASK);
+}
+
+/* Returns the number of free chunks in a zbud page */
+static int num_free_chunks(struct zbud_header *zhdr)
+{
+       /*
+        * Rather than branch for different situations, just use the fact that
+        * free buddies have a length of zero to simplify everything. -1 at the
+        * end for the zbud header.
+        */
+       return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks - 1;
+}
+
+/*****************
+ * API Functions
+*****************/
+/**
+ * zbud_create_pool() - create a new zbud pool
+ * @gfp:       gfp flags when allocating the zbud pool structure
+ * @ops:       user-defined operations for the zbud pool
+ *
+ * Return: pointer to the new zbud pool or NULL if the metadata allocation
+ * failed.
+ */
+struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops)
+{
+       struct zbud_pool *pool;
+       int i;
+
+       pool = kmalloc(sizeof(struct zbud_pool), gfp);
+       if (!pool)
+               return NULL;
+       spin_lock_init(&pool->lock);
+       for_each_unbuddied_list(i, 0)
+               INIT_LIST_HEAD(&pool->unbuddied[i]);
+       INIT_LIST_HEAD(&pool->buddied);
+       INIT_LIST_HEAD(&pool->lru);
+       pool->pages_nr = 0;
+       pool->ops = ops;
+       return pool;
+}
+
+/**
+ * zbud_destroy_pool() - destroys an existing zbud pool
+ * @pool:      the zbud pool to be destroyed
+ *
+ * The pool should be emptied before this function is called.
+ */
+void zbud_destroy_pool(struct zbud_pool *pool)
+{
+       kfree(pool);
+}
+
+/**
+ * zbud_alloc() - allocates a region of a given size
+ * @pool:      zbud pool from which to allocate
+ * @size:      size in bytes of the desired allocation
+ * @gfp:       gfp flags used if the pool needs to grow
+ * @handle:    handle of the new allocation
+ *
+ * This function will attempt to find a free region in the pool large enough to
+ * satisfy the allocation request.  A search of the unbuddied lists is
+ * performed first. If no suitable free region is found, then a new page is
+ * allocated and added to the pool to satisfy the request.
+ *
+ * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
+ * as zbud pool pages.
+ *
+ * Return: 0 if success and handle is set, otherwise -EINVAL is the size or
+ * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
+ * a new page.
+ */
+int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp,
+                       unsigned long *handle)
+{
+       int chunks, i, freechunks;
+       struct zbud_header *zhdr = NULL;
+       enum buddy bud;
+       struct page *page;
+
+       if (size <= 0 || gfp & __GFP_HIGHMEM)
+               return -EINVAL;
+       if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED)
+               return -ENOSPC;
+       chunks = size_to_chunks(size);
+       spin_lock(&pool->lock);
+
+       /* First, try to find an unbuddied zbud page. */
+       zhdr = NULL;
+       for_each_unbuddied_list(i, chunks) {
+               if (!list_empty(&pool->unbuddied[i])) {
+                       zhdr = list_first_entry(&pool->unbuddied[i],
+                                       struct zbud_header, buddy);
+                       list_del(&zhdr->buddy);
+                       if (zhdr->first_chunks == 0)
+                               bud = FIRST;
+                       else
+                               bud = LAST;
+                       goto found;
+               }
+       }
+
+       /* Couldn't find unbuddied zbud page, create new one */
+       spin_unlock(&pool->lock);
+       page = alloc_page(gfp);
+       if (!page)
+               return -ENOMEM;
+       spin_lock(&pool->lock);
+       pool->pages_nr++;
+       zhdr = init_zbud_page(page);
+       bud = FIRST;
+
+found:
+       if (bud == FIRST)
+               zhdr->first_chunks = chunks;
+       else
+               zhdr->last_chunks = chunks;
+
+       if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
+               /* Add to unbuddied list */
+               freechunks = num_free_chunks(zhdr);
+               list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+       } else {
+               /* Add to buddied list */
+               list_add(&zhdr->buddy, &pool->buddied);
+       }
+
+       /* Add/move zbud page to beginning of LRU */
+       if (!list_empty(&zhdr->lru))
+               list_del(&zhdr->lru);
+       list_add(&zhdr->lru, &pool->lru);
+
+       *handle = encode_handle(zhdr, bud);
+       spin_unlock(&pool->lock);
+
+       return 0;
+}
+
+/**
+ * zbud_free() - frees the allocation associated with the given handle
+ * @pool:      pool in which the allocation resided
+ * @handle:    handle associated with the allocation returned by zbud_alloc()
+ *
+ * In the case that the zbud page in which the allocation resides is under
+ * reclaim, as indicated by the PG_reclaim flag being set, this function
+ * only sets the first|last_chunks to 0.  The page is actually freed
+ * once both buddies are evicted (see zbud_reclaim_page() below).
+ */
+void zbud_free(struct zbud_pool *pool, unsigned long handle)
+{
+       struct zbud_header *zhdr;
+       int freechunks;
+
+       spin_lock(&pool->lock);
+       zhdr = handle_to_zbud_header(handle);
+
+       /* If first buddy, handle will be page aligned */
+       if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
+               zhdr->last_chunks = 0;
+       else
+               zhdr->first_chunks = 0;
+
+       if (zhdr->under_reclaim) {
+               /* zbud page is under reclaim, reclaim will free */
+               spin_unlock(&pool->lock);
+               return;
+       }
+
+       /* Remove from existing buddy list */
+       list_del(&zhdr->buddy);
+
+       if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+               /* zbud page is empty, free */
+               list_del(&zhdr->lru);
+               free_zbud_page(zhdr);
+               pool->pages_nr--;
+       } else {
+               /* Add to unbuddied list */
+               freechunks = num_free_chunks(zhdr);
+               list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+       }
+
+       spin_unlock(&pool->lock);
+}
+
+#define list_tail_entry(ptr, type, member) \
+       list_entry((ptr)->prev, type, member)
+
+/**
+ * zbud_reclaim_page() - evicts allocations from a pool page and frees it
+ * @pool:      pool from which a page will attempt to be evicted
+ * @retires:   number of pages on the LRU list for which eviction will
+ *             be attempted before failing
+ *
+ * zbud reclaim is different from normal system reclaim in that the reclaim is
+ * done from the bottom, up.  This is because only the bottom layer, zbud, has
+ * information on how the allocations are organized within each zbud page. This
+ * has the potential to create interesting locking situations between zbud and
+ * the user, however.
+ *
+ * To avoid these, this is how zbud_reclaim_page() should be called:
+
+ * The user detects a page should be reclaimed and calls zbud_reclaim_page().
+ * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
+ * the user-defined eviction handler with the pool and handle as arguments.
+ *
+ * If the handle can not be evicted, the eviction handler should return
+ * non-zero. zbud_reclaim_page() will add the zbud page back to the
+ * appropriate list and try the next zbud page on the LRU up to
+ * a user defined number of retries.
+ *
+ * If the handle is successfully evicted, the eviction handler should
+ * return 0 _and_ should have called zbud_free() on the handle. zbud_free()
+ * contains logic to delay freeing the page if the page is under reclaim,
+ * as indicated by the setting of the PG_reclaim flag on the underlying page.
+ *
+ * If all buddies in the zbud page are successfully evicted, then the
+ * zbud page can be freed.
+ *
+ * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
+ * no pages to evict or an eviction handler is not registered, -EAGAIN if
+ * the retry limit was hit.
+ */
+int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
+{
+       int i, ret, freechunks;
+       struct zbud_header *zhdr;
+       unsigned long first_handle = 0, last_handle = 0;
+
+       spin_lock(&pool->lock);
+       if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
+                       retries == 0) {
+               spin_unlock(&pool->lock);
+               return -EINVAL;
+       }
+       for (i = 0; i < retries; i++) {
+               zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru);
+               list_del(&zhdr->lru);
+               list_del(&zhdr->buddy);
+               /* Protect zbud page against free */
+               zhdr->under_reclaim = true;
+               /*
+                * We need encode the handles before unlocking, since we can
+                * race with free that will set (first|last)_chunks to 0
+                */
+               first_handle = 0;
+               last_handle = 0;
+               if (zhdr->first_chunks)
+                       first_handle = encode_handle(zhdr, FIRST);
+               if (zhdr->last_chunks)
+                       last_handle = encode_handle(zhdr, LAST);
+               spin_unlock(&pool->lock);
+
+               /* Issue the eviction callback(s) */
+               if (first_handle) {
+                       ret = pool->ops->evict(pool, first_handle);
+                       if (ret)
+                               goto next;
+               }
+               if (last_handle) {
+                       ret = pool->ops->evict(pool, last_handle);
+                       if (ret)
+                               goto next;
+               }
+next:
+               spin_lock(&pool->lock);
+               zhdr->under_reclaim = false;
+               if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+                       /*
+                        * Both buddies are now free, free the zbud page and
+                        * return success.
+                        */
+                       free_zbud_page(zhdr);
+                       pool->pages_nr--;
+                       spin_unlock(&pool->lock);
+                       return 0;
+               } else if (zhdr->first_chunks == 0 ||
+                               zhdr->last_chunks == 0) {
+                       /* add to unbuddied list */
+                       freechunks = num_free_chunks(zhdr);
+                       list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+               } else {
+                       /* add to buddied list */
+                       list_add(&zhdr->buddy, &pool->buddied);
+               }
+
+               /* add to beginning of LRU */
+               list_add(&zhdr->lru, &pool->lru);
+       }
+       spin_unlock(&pool->lock);
+       return -EAGAIN;
+}
+
+/**
+ * zbud_map() - maps the allocation associated with the given handle
+ * @pool:      pool in which the allocation resides
+ * @handle:    handle associated with the allocation to be mapped
+ *
+ * While trivial for zbud, the mapping functions for others allocators
+ * implementing this allocation API could have more complex information encoded
+ * in the handle and could create temporary mappings to make the data
+ * accessible to the user.
+ *
+ * Returns: a pointer to the mapped allocation
+ */
+void *zbud_map(struct zbud_pool *pool, unsigned long handle)
+{
+       return (void *)(handle);
+}
+
+/**
+ * zbud_unmap() - maps the allocation associated with the given handle
+ * @pool:      pool in which the allocation resides
+ * @handle:    handle associated with the allocation to be unmapped
+ */
+void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
+{
+}
+
+/**
+ * zbud_get_pool_size() - gets the zbud pool size in pages
+ * @pool:      pool whose size is being queried
+ *
+ * Returns: size in pages of the given pool.  The pool lock need not be
+ * taken to access pages_nr.
+ */
+u64 zbud_get_pool_size(struct zbud_pool *pool)
+{
+       return pool->pages_nr;
+}
+
+static int __init init_zbud(void)
+{
+       /* Make sure the zbud header will fit in one chunk */
+       BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
+       pr_info("loaded\n");
+       return 0;
+}
+
+static void __exit exit_zbud(void)
+{
+       pr_info("unloaded\n");
+}
+
+module_init(init_zbud);
+module_exit(exit_zbud);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
diff --git a/mm/zswap.c b/mm/zswap.c
new file mode 100644 (file)
index 0000000..deda2b6
--- /dev/null
@@ -0,0 +1,943 @@
+/*
+ * zswap.c - zswap driver file
+ *
+ * zswap is a backend for frontswap that takes pages that are in the process
+ * of being swapped out and attempts to compress and store them in a
+ * RAM-based memory pool.  This can result in a significant I/O reduction on
+ * the swap device and, in the case where decompressing from RAM is faster
+ * than reading from the swap device, can also improve workload performance.
+ *
+ * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/frontswap.h>
+#include <linux/rbtree.h>
+#include <linux/swap.h>
+#include <linux/crypto.h>
+#include <linux/mempool.h>
+#include <linux/zbud.h>
+
+#include <linux/mm_types.h>
+#include <linux/page-flags.h>
+#include <linux/swapops.h>
+#include <linux/writeback.h>
+#include <linux/pagemap.h>
+
+/*********************************
+* statistics
+**********************************/
+/* Number of memory pages used by the compressed pool */
+static u64 zswap_pool_pages;
+/* The number of compressed pages currently stored in zswap */
+static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
+
+/*
+ * The statistics below are not protected from concurrent access for
+ * performance reasons so they may not be a 100% accurate.  However,
+ * they do provide useful information on roughly how many times a
+ * certain event is occurring.
+*/
+
+/* Pool limit was hit (see zswap_max_pool_percent) */
+static u64 zswap_pool_limit_hit;
+/* Pages written back when pool limit was reached */
+static u64 zswap_written_back_pages;
+/* Store failed due to a reclaim failure after pool limit was reached */
+static u64 zswap_reject_reclaim_fail;
+/* Compressed page was too big for the allocator to (optimally) store */
+static u64 zswap_reject_compress_poor;
+/* Store failed because underlying allocator could not get memory */
+static u64 zswap_reject_alloc_fail;
+/* Store failed because the entry metadata could not be allocated (rare) */
+static u64 zswap_reject_kmemcache_fail;
+/* Duplicate store was encountered (rare) */
+static u64 zswap_duplicate_entry;
+
+/*********************************
+* tunables
+**********************************/
+/* Enable/disable zswap (disabled by default, fixed at boot for now) */
+static bool zswap_enabled __read_mostly;
+module_param_named(enabled, zswap_enabled, bool, 0);
+
+/* Compressor to be used by zswap (fixed at boot for now) */
+#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
+static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
+module_param_named(compressor, zswap_compressor, charp, 0);
+
+/* The maximum percentage of memory that the compressed pool can occupy */
+static unsigned int zswap_max_pool_percent = 20;
+module_param_named(max_pool_percent,
+                       zswap_max_pool_percent, uint, 0644);
+
+/*********************************
+* compression functions
+**********************************/
+/* per-cpu compression transforms */
+static struct crypto_comp * __percpu *zswap_comp_pcpu_tfms;
+
+enum comp_op {
+       ZSWAP_COMPOP_COMPRESS,
+       ZSWAP_COMPOP_DECOMPRESS
+};
+
+static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen,
+                               u8 *dst, unsigned int *dlen)
+{
+       struct crypto_comp *tfm;
+       int ret;
+
+       tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu());
+       switch (op) {
+       case ZSWAP_COMPOP_COMPRESS:
+               ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
+               break;
+       case ZSWAP_COMPOP_DECOMPRESS:
+               ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       put_cpu();
+       return ret;
+}
+
+static int __init zswap_comp_init(void)
+{
+       if (!crypto_has_comp(zswap_compressor, 0, 0)) {
+               pr_info("%s compressor not available\n", zswap_compressor);
+               /* fall back to default compressor */
+               zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
+               if (!crypto_has_comp(zswap_compressor, 0, 0))
+                       /* can't even load the default compressor */
+                       return -ENODEV;
+       }
+       pr_info("using %s compressor\n", zswap_compressor);
+
+       /* alloc percpu transforms */
+       zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
+       if (!zswap_comp_pcpu_tfms)
+               return -ENOMEM;
+       return 0;
+}
+
+static void zswap_comp_exit(void)
+{
+       /* free percpu transforms */
+       if (zswap_comp_pcpu_tfms)
+               free_percpu(zswap_comp_pcpu_tfms);
+}
+
+/*********************************
+* data structures
+**********************************/
+/*
+ * struct zswap_entry
+ *
+ * This structure contains the metadata for tracking a single compressed
+ * page within zswap.
+ *
+ * rbnode - links the entry into red-black tree for the appropriate swap type
+ * refcount - the number of outstanding reference to the entry. This is needed
+ *            to protect against premature freeing of the entry by code
+ *            concurent calls to load, invalidate, and writeback.  The lock
+ *            for the zswap_tree structure that contains the entry must
+ *            be held while changing the refcount.  Since the lock must
+ *            be held, there is no reason to also make refcount atomic.
+ * offset - the swap offset for the entry.  Index into the red-black tree.
+ * handle - zsmalloc allocation handle that stores the compressed page data
+ * length - the length in bytes of the compressed page data.  Needed during
+ *           decompression
+ */
+struct zswap_entry {
+       struct rb_node rbnode;
+       pgoff_t offset;
+       int refcount;
+       unsigned int length;
+       unsigned long handle;
+};
+
+struct zswap_header {
+       swp_entry_t swpentry;
+};
+
+/*
+ * The tree lock in the zswap_tree struct protects a few things:
+ * - the rbtree
+ * - the refcount field of each entry in the tree
+ */
+struct zswap_tree {
+       struct rb_root rbroot;
+       spinlock_t lock;
+       struct zbud_pool *pool;
+};
+
+static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
+
+/*********************************
+* zswap entry functions
+**********************************/
+static struct kmem_cache *zswap_entry_cache;
+
+static int zswap_entry_cache_create(void)
+{
+       zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
+       return (zswap_entry_cache == NULL);
+}
+
+static void zswap_entry_cache_destory(void)
+{
+       kmem_cache_destroy(zswap_entry_cache);
+}
+
+static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
+{
+       struct zswap_entry *entry;
+       entry = kmem_cache_alloc(zswap_entry_cache, gfp);
+       if (!entry)
+               return NULL;
+       entry->refcount = 1;
+       return entry;
+}
+
+static void zswap_entry_cache_free(struct zswap_entry *entry)
+{
+       kmem_cache_free(zswap_entry_cache, entry);
+}
+
+/* caller must hold the tree lock */
+static void zswap_entry_get(struct zswap_entry *entry)
+{
+       entry->refcount++;
+}
+
+/* caller must hold the tree lock */
+static int zswap_entry_put(struct zswap_entry *entry)
+{
+       entry->refcount--;
+       return entry->refcount;
+}
+
+/*********************************
+* rbtree functions
+**********************************/
+static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
+{
+       struct rb_node *node = root->rb_node;
+       struct zswap_entry *entry;
+
+       while (node) {
+               entry = rb_entry(node, struct zswap_entry, rbnode);
+               if (entry->offset > offset)
+                       node = node->rb_left;
+               else if (entry->offset < offset)
+                       node = node->rb_right;
+               else
+                       return entry;
+       }
+       return NULL;
+}
+
+/*
+ * In the case that a entry with the same offset is found, a pointer to
+ * the existing entry is stored in dupentry and the function returns -EEXIST
+ */
+static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
+                       struct zswap_entry **dupentry)
+{
+       struct rb_node **link = &root->rb_node, *parent = NULL;
+       struct zswap_entry *myentry;
+
+       while (*link) {
+               parent = *link;
+               myentry = rb_entry(parent, struct zswap_entry, rbnode);
+               if (myentry->offset > entry->offset)
+                       link = &(*link)->rb_left;
+               else if (myentry->offset < entry->offset)
+                       link = &(*link)->rb_right;
+               else {
+                       *dupentry = myentry;
+                       return -EEXIST;
+               }
+       }
+       rb_link_node(&entry->rbnode, parent, link);
+       rb_insert_color(&entry->rbnode, root);
+       return 0;
+}
+
+/*********************************
+* per-cpu code
+**********************************/
+static DEFINE_PER_CPU(u8 *, zswap_dstmem);
+
+static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu)
+{
+       struct crypto_comp *tfm;
+       u8 *dst;
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+               tfm = crypto_alloc_comp(zswap_compressor, 0, 0);
+               if (IS_ERR(tfm)) {
+                       pr_err("can't allocate compressor transform\n");
+                       return NOTIFY_BAD;
+               }
+               *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm;
+               dst = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
+               if (!dst) {
+                       pr_err("can't allocate compressor buffer\n");
+                       crypto_free_comp(tfm);
+                       *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
+                       return NOTIFY_BAD;
+               }
+               per_cpu(zswap_dstmem, cpu) = dst;
+               break;
+       case CPU_DEAD:
+       case CPU_UP_CANCELED:
+               tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu);
+               if (tfm) {
+                       crypto_free_comp(tfm);
+                       *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
+               }
+               dst = per_cpu(zswap_dstmem, cpu);
+               kfree(dst);
+               per_cpu(zswap_dstmem, cpu) = NULL;
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static int zswap_cpu_notifier(struct notifier_block *nb,
+                               unsigned long action, void *pcpu)
+{
+       unsigned long cpu = (unsigned long)pcpu;
+       return __zswap_cpu_notifier(action, cpu);
+}
+
+static struct notifier_block zswap_cpu_notifier_block = {
+       .notifier_call = zswap_cpu_notifier
+};
+
+static int zswap_cpu_init(void)
+{
+       unsigned long cpu;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu)
+               if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
+                       goto cleanup;
+       register_cpu_notifier(&zswap_cpu_notifier_block);
+       put_online_cpus();
+       return 0;
+
+cleanup:
+       for_each_online_cpu(cpu)
+               __zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
+       put_online_cpus();
+       return -ENOMEM;
+}
+
+/*********************************
+* helpers
+**********************************/
+static bool zswap_is_full(void)
+{
+       return (totalram_pages * zswap_max_pool_percent / 100 <
+               zswap_pool_pages);
+}
+
+/*
+ * Carries out the common pattern of freeing and entry's zsmalloc allocation,
+ * freeing the entry itself, and decrementing the number of stored pages.
+ */
+static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry)
+{
+       zbud_free(tree->pool, entry->handle);
+       zswap_entry_cache_free(entry);
+       atomic_dec(&zswap_stored_pages);
+       zswap_pool_pages = zbud_get_pool_size(tree->pool);
+}
+
+/*********************************
+* writeback code
+**********************************/
+/* return enum for zswap_get_swap_cache_page */
+enum zswap_get_swap_ret {
+       ZSWAP_SWAPCACHE_NEW,
+       ZSWAP_SWAPCACHE_EXIST,
+       ZSWAP_SWAPCACHE_NOMEM
+};
+
+/*
+ * zswap_get_swap_cache_page
+ *
+ * This is an adaption of read_swap_cache_async()
+ *
+ * This function tries to find a page with the given swap entry
+ * in the swapper_space address space (the swap cache).  If the page
+ * is found, it is returned in retpage.  Otherwise, a page is allocated,
+ * added to the swap cache, and returned in retpage.
+ *
+ * If success, the swap cache page is returned in retpage
+ * Returns 0 if page was already in the swap cache, page is not locked
+ * Returns 1 if the new page needs to be populated, page is locked
+ * Returns <0 on error
+ */
+static int zswap_get_swap_cache_page(swp_entry_t entry,
+                               struct page **retpage)
+{
+       struct page *found_page, *new_page = NULL;
+       struct address_space *swapper_space = &swapper_spaces[swp_type(entry)];
+       int err;
+
+       *retpage = NULL;
+       do {
+               /*
+                * First check the swap cache.  Since this is normally
+                * called after lookup_swap_cache() failed, re-calling
+                * that would confuse statistics.
+                */
+               found_page = find_get_page(swapper_space, entry.val);
+               if (found_page)
+                       break;
+
+               /*
+                * Get a new page to read into from swap.
+                */
+               if (!new_page) {
+                       new_page = alloc_page(GFP_KERNEL);
+                       if (!new_page)
+                               break; /* Out of memory */
+               }
+
+               /*
+                * call radix_tree_preload() while we can wait.
+                */
+               err = radix_tree_preload(GFP_KERNEL);
+               if (err)
+                       break;
+
+               /*
+                * Swap entry may have been freed since our caller observed it.
+                */
+               err = swapcache_prepare(entry);
+               if (err == -EEXIST) { /* seems racy */
+                       radix_tree_preload_end();
+                       continue;
+               }
+               if (err) { /* swp entry is obsolete ? */
+                       radix_tree_preload_end();
+                       break;
+               }
+
+               /* May fail (-ENOMEM) if radix-tree node allocation failed. */
+               __set_page_locked(new_page);
+               SetPageSwapBacked(new_page);
+               err = __add_to_swap_cache(new_page, entry);
+               if (likely(!err)) {
+                       radix_tree_preload_end();
+                       lru_cache_add_anon(new_page);
+                       *retpage = new_page;
+                       return ZSWAP_SWAPCACHE_NEW;
+               }
+               radix_tree_preload_end();
+               ClearPageSwapBacked(new_page);
+               __clear_page_locked(new_page);
+               /*
+                * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+                * clear SWAP_HAS_CACHE flag.
+                */
+               swapcache_free(entry, NULL);
+       } while (err != -ENOMEM);
+
+       if (new_page)
+               page_cache_release(new_page);
+       if (!found_page)
+               return ZSWAP_SWAPCACHE_NOMEM;
+       *retpage = found_page;
+       return ZSWAP_SWAPCACHE_EXIST;
+}
+
+/*
+ * Attempts to free an entry by adding a page to the swap cache,
+ * decompressing the entry data into the page, and issuing a
+ * bio write to write the page back to the swap device.
+ *
+ * This can be thought of as a "resumed writeback" of the page
+ * to the swap device.  We are basically resuming the same swap
+ * writeback path that was intercepted with the frontswap_store()
+ * in the first place.  After the page has been decompressed into
+ * the swap cache, the compressed version stored by zswap can be
+ * freed.
+ */
+static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
+{
+       struct zswap_header *zhdr;
+       swp_entry_t swpentry;
+       struct zswap_tree *tree;
+       pgoff_t offset;
+       struct zswap_entry *entry;
+       struct page *page;
+       u8 *src, *dst;
+       unsigned int dlen;
+       int ret, refcount;
+       struct writeback_control wbc = {
+               .sync_mode = WB_SYNC_NONE,
+       };
+
+       /* extract swpentry from data */
+       zhdr = zbud_map(pool, handle);
+       swpentry = zhdr->swpentry; /* here */
+       zbud_unmap(pool, handle);
+       tree = zswap_trees[swp_type(swpentry)];
+       offset = swp_offset(swpentry);
+       BUG_ON(pool != tree->pool);
+
+       /* find and ref zswap entry */
+       spin_lock(&tree->lock);
+       entry = zswap_rb_search(&tree->rbroot, offset);
+       if (!entry) {
+               /* entry was invalidated */
+               spin_unlock(&tree->lock);
+               return 0;
+       }
+       zswap_entry_get(entry);
+       spin_unlock(&tree->lock);
+       BUG_ON(offset != entry->offset);
+
+       /* try to allocate swap cache page */
+       switch (zswap_get_swap_cache_page(swpentry, &page)) {
+       case ZSWAP_SWAPCACHE_NOMEM: /* no memory */
+               ret = -ENOMEM;
+               goto fail;
+
+       case ZSWAP_SWAPCACHE_EXIST: /* page is unlocked */
+               /* page is already in the swap cache, ignore for now */
+               page_cache_release(page);
+               ret = -EEXIST;
+               goto fail;
+
+       case ZSWAP_SWAPCACHE_NEW: /* page is locked */
+               /* decompress */
+               dlen = PAGE_SIZE;
+               src = (u8 *)zbud_map(tree->pool, entry->handle) +
+                       sizeof(struct zswap_header);
+               dst = kmap_atomic(page);
+               ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
+                               entry->length, dst, &dlen);
+               kunmap_atomic(dst);
+               zbud_unmap(tree->pool, entry->handle);
+               BUG_ON(ret);
+               BUG_ON(dlen != PAGE_SIZE);
+
+               /* page is up to date */
+               SetPageUptodate(page);
+       }
+
+       /* start writeback */
+       __swap_writepage(page, &wbc, end_swap_bio_write);
+       page_cache_release(page);
+       zswap_written_back_pages++;
+
+       spin_lock(&tree->lock);
+
+       /* drop local reference */
+       zswap_entry_put(entry);
+       /* drop the initial reference from entry creation */
+       refcount = zswap_entry_put(entry);
+
+       /*
+        * There are three possible values for refcount here:
+        * (1) refcount is 1, load is in progress, unlink from rbtree,
+        *     load will free
+        * (2) refcount is 0, (normal case) entry is valid,
+        *     remove from rbtree and free entry
+        * (3) refcount is -1, invalidate happened during writeback,
+        *     free entry
+        */
+       if (refcount >= 0) {
+               /* no invalidate yet, remove from rbtree */
+               rb_erase(&entry->rbnode, &tree->rbroot);
+       }
+       spin_unlock(&tree->lock);
+       if (refcount <= 0) {
+               /* free the entry */
+               zswap_free_entry(tree, entry);
+               return 0;
+       }
+       return -EAGAIN;
+
+fail:
+       spin_lock(&tree->lock);
+       zswap_entry_put(entry);
+       spin_unlock(&tree->lock);
+       return ret;
+}
+
+/*********************************
+* frontswap hooks
+**********************************/
+/* attempts to compress and store an single page */
+static int zswap_frontswap_store(unsigned type, pgoff_t offset,
+                               struct page *page)
+{
+       struct zswap_tree *tree = zswap_trees[type];
+       struct zswap_entry *entry, *dupentry;
+       int ret;
+       unsigned int dlen = PAGE_SIZE, len;
+       unsigned long handle;
+       char *buf;
+       u8 *src, *dst;
+       struct zswap_header *zhdr;
+
+       if (!tree) {
+               ret = -ENODEV;
+               goto reject;
+       }
+
+       /* reclaim space if needed */
+       if (zswap_is_full()) {
+               zswap_pool_limit_hit++;
+               if (zbud_reclaim_page(tree->pool, 8)) {
+                       zswap_reject_reclaim_fail++;
+                       ret = -ENOMEM;
+                       goto reject;
+               }
+       }
+
+       /* allocate entry */
+       entry = zswap_entry_cache_alloc(GFP_KERNEL);
+       if (!entry) {
+               zswap_reject_kmemcache_fail++;
+               ret = -ENOMEM;
+               goto reject;
+       }
+
+       /* compress */
+       dst = get_cpu_var(zswap_dstmem);
+       src = kmap_atomic(page);
+       ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen);
+       kunmap_atomic(src);
+       if (ret) {
+               ret = -EINVAL;
+               goto freepage;
+       }
+
+       /* store */
+       len = dlen + sizeof(struct zswap_header);
+       ret = zbud_alloc(tree->pool, len, __GFP_NORETRY | __GFP_NOWARN,
+               &handle);
+       if (ret == -ENOSPC) {
+               zswap_reject_compress_poor++;
+               goto freepage;
+       }
+       if (ret) {
+               zswap_reject_alloc_fail++;
+               goto freepage;
+       }
+       zhdr = zbud_map(tree->pool, handle);
+       zhdr->swpentry = swp_entry(type, offset);
+       buf = (u8 *)(zhdr + 1);
+       memcpy(buf, dst, dlen);
+       zbud_unmap(tree->pool, handle);
+       put_cpu_var(zswap_dstmem);
+
+       /* populate entry */
+       entry->offset = offset;
+       entry->handle = handle;
+       entry->length = dlen;
+
+       /* map */
+       spin_lock(&tree->lock);
+       do {
+               ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
+               if (ret == -EEXIST) {
+                       zswap_duplicate_entry++;
+                       /* remove from rbtree */
+                       rb_erase(&dupentry->rbnode, &tree->rbroot);
+                       if (!zswap_entry_put(dupentry)) {
+                               /* free */
+                               zswap_free_entry(tree, dupentry);
+                       }
+               }
+       } while (ret == -EEXIST);
+       spin_unlock(&tree->lock);
+
+       /* update stats */
+       atomic_inc(&zswap_stored_pages);
+       zswap_pool_pages = zbud_get_pool_size(tree->pool);
+
+       return 0;
+
+freepage:
+       put_cpu_var(zswap_dstmem);
+       zswap_entry_cache_free(entry);
+reject:
+       return ret;
+}
+
+/*
+ * returns 0 if the page was successfully decompressed
+ * return -1 on entry not found or error
+*/
+static int zswap_frontswap_load(unsigned type, pgoff_t offset,
+                               struct page *page)
+{
+       struct zswap_tree *tree = zswap_trees[type];
+       struct zswap_entry *entry;
+       u8 *src, *dst;
+       unsigned int dlen;
+       int refcount, ret;
+
+       /* find */
+       spin_lock(&tree->lock);
+       entry = zswap_rb_search(&tree->rbroot, offset);
+       if (!entry) {
+               /* entry was written back */
+               spin_unlock(&tree->lock);
+               return -1;
+       }
+       zswap_entry_get(entry);
+       spin_unlock(&tree->lock);
+
+       /* decompress */
+       dlen = PAGE_SIZE;
+       src = (u8 *)zbud_map(tree->pool, entry->handle) +
+                       sizeof(struct zswap_header);
+       dst = kmap_atomic(page);
+       ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length,
+               dst, &dlen);
+       kunmap_atomic(dst);
+       zbud_unmap(tree->pool, entry->handle);
+       BUG_ON(ret);
+
+       spin_lock(&tree->lock);
+       refcount = zswap_entry_put(entry);
+       if (likely(refcount)) {
+               spin_unlock(&tree->lock);
+               return 0;
+       }
+       spin_unlock(&tree->lock);
+
+       /*
+        * We don't have to unlink from the rbtree because
+        * zswap_writeback_entry() or zswap_frontswap_invalidate page()
+        * has already done this for us if we are the last reference.
+        */
+       /* free */
+
+       zswap_free_entry(tree, entry);
+
+       return 0;
+}
+
+/* frees an entry in zswap */
+static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+       struct zswap_tree *tree = zswap_trees[type];
+       struct zswap_entry *entry;
+       int refcount;
+
+       /* find */
+       spin_lock(&tree->lock);
+       entry = zswap_rb_search(&tree->rbroot, offset);
+       if (!entry) {
+               /* entry was written back */
+               spin_unlock(&tree->lock);
+               return;
+       }
+
+       /* remove from rbtree */
+       rb_erase(&entry->rbnode, &tree->rbroot);
+
+       /* drop the initial reference from entry creation */
+       refcount = zswap_entry_put(entry);
+
+       spin_unlock(&tree->lock);
+
+       if (refcount) {
+               /* writeback in progress, writeback will free */
+               return;
+       }
+
+       /* free */
+       zswap_free_entry(tree, entry);
+}
+
+/* frees all zswap entries for the given swap type */
+static void zswap_frontswap_invalidate_area(unsigned type)
+{
+       struct zswap_tree *tree = zswap_trees[type];
+       struct rb_node *node;
+       struct zswap_entry *entry;
+
+       if (!tree)
+               return;
+
+       /* walk the tree and free everything */
+       spin_lock(&tree->lock);
+       /*
+        * TODO: Even though this code should not be executed because
+        * the try_to_unuse() in swapoff should have emptied the tree,
+        * it is very wasteful to rebalance the tree after every
+        * removal when we are freeing the whole tree.
+        *
+        * If post-order traversal code is ever added to the rbtree
+        * implementation, it should be used here.
+        */
+       while ((node = rb_first(&tree->rbroot))) {
+               entry = rb_entry(node, struct zswap_entry, rbnode);
+               rb_erase(&entry->rbnode, &tree->rbroot);
+               zbud_free(tree->pool, entry->handle);
+               zswap_entry_cache_free(entry);
+               atomic_dec(&zswap_stored_pages);
+       }
+       tree->rbroot = RB_ROOT;
+       spin_unlock(&tree->lock);
+}
+
+static struct zbud_ops zswap_zbud_ops = {
+       .evict = zswap_writeback_entry
+};
+
+static void zswap_frontswap_init(unsigned type)
+{
+       struct zswap_tree *tree;
+
+       tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
+       if (!tree)
+               goto err;
+       tree->pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops);
+       if (!tree->pool)
+               goto freetree;
+       tree->rbroot = RB_ROOT;
+       spin_lock_init(&tree->lock);
+       zswap_trees[type] = tree;
+       return;
+
+freetree:
+       kfree(tree);
+err:
+       pr_err("alloc failed, zswap disabled for swap type %d\n", type);
+}
+
+static struct frontswap_ops zswap_frontswap_ops = {
+       .store = zswap_frontswap_store,
+       .load = zswap_frontswap_load,
+       .invalidate_page = zswap_frontswap_invalidate_page,
+       .invalidate_area = zswap_frontswap_invalidate_area,
+       .init = zswap_frontswap_init
+};
+
+/*********************************
+* debugfs functions
+**********************************/
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static struct dentry *zswap_debugfs_root;
+
+static int __init zswap_debugfs_init(void)
+{
+       if (!debugfs_initialized())
+               return -ENODEV;
+
+       zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
+       if (!zswap_debugfs_root)
+               return -ENOMEM;
+
+       debugfs_create_u64("pool_limit_hit", S_IRUGO,
+                       zswap_debugfs_root, &zswap_pool_limit_hit);
+       debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
+                       zswap_debugfs_root, &zswap_reject_reclaim_fail);
+       debugfs_create_u64("reject_alloc_fail", S_IRUGO,
+                       zswap_debugfs_root, &zswap_reject_alloc_fail);
+       debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
+                       zswap_debugfs_root, &zswap_reject_kmemcache_fail);
+       debugfs_create_u64("reject_compress_poor", S_IRUGO,
+                       zswap_debugfs_root, &zswap_reject_compress_poor);
+       debugfs_create_u64("written_back_pages", S_IRUGO,
+                       zswap_debugfs_root, &zswap_written_back_pages);
+       debugfs_create_u64("duplicate_entry", S_IRUGO,
+                       zswap_debugfs_root, &zswap_duplicate_entry);
+       debugfs_create_u64("pool_pages", S_IRUGO,
+                       zswap_debugfs_root, &zswap_pool_pages);
+       debugfs_create_atomic_t("stored_pages", S_IRUGO,
+                       zswap_debugfs_root, &zswap_stored_pages);
+
+       return 0;
+}
+
+static void __exit zswap_debugfs_exit(void)
+{
+       debugfs_remove_recursive(zswap_debugfs_root);
+}
+#else
+static int __init zswap_debugfs_init(void)
+{
+       return 0;
+}
+
+static void __exit zswap_debugfs_exit(void) { }
+#endif
+
+/*********************************
+* module init and exit
+**********************************/
+static int __init init_zswap(void)
+{
+       if (!zswap_enabled)
+               return 0;
+
+       pr_info("loading zswap\n");
+       if (zswap_entry_cache_create()) {
+               pr_err("entry cache creation failed\n");
+               goto error;
+       }
+       if (zswap_comp_init()) {
+               pr_err("compressor initialization failed\n");
+               goto compfail;
+       }
+       if (zswap_cpu_init()) {
+               pr_err("per-cpu initialization failed\n");
+               goto pcpufail;
+       }
+       frontswap_register_ops(&zswap_frontswap_ops);
+       if (zswap_debugfs_init())
+               pr_warn("debugfs initialization failed\n");
+       return 0;
+pcpufail:
+       zswap_comp_exit();
+compfail:
+       zswap_entry_cache_destory();
+error:
+       return -ENOMEM;
+}
+/* must be late so crypto has time to come up */
+late_initcall(init_zswap);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("Compressed cache for swap pages");
index 182084d..8ccf830 100644 (file)
@@ -47,18 +47,24 @@ header-y      := $(filter-out $(generic-y), $(header-y))
 all-files     := $(header-y) $(genhdr-y) $(wrapper-files)
 output-files  := $(addprefix $(installdir)/, $(all-files))
 
-input-files   := $(foreach hdr, $(header-y), \
+input-files1  := $(foreach hdr, $(header-y), \
                   $(if $(wildcard $(srcdir)/$(hdr)), \
-                       $(wildcard $(srcdir)/$(hdr)), \
+                       $(wildcard $(srcdir)/$(hdr))) \
+                  )
+input-files1-name := $(notdir $(input-files1))
+input-files2  := $(foreach hdr, $(header-y), \
+                  $(if  $(wildcard $(srcdir)/$(hdr)),, \
                        $(if $(wildcard $(oldsrcdir)/$(hdr)), \
                                $(wildcard $(oldsrcdir)/$(hdr)), \
                                $(error Missing UAPI file $(srcdir)/$(hdr))) \
-                  )) \
-                $(foreach hdr, $(genhdr-y), \
+                  ))
+input-files2-name := $(notdir $(input-files2))
+input-files3  := $(foreach hdr, $(genhdr-y), \
                   $(if $(wildcard $(gendir)/$(hdr)), \
                        $(wildcard $(gendir)/$(hdr)), \
                        $(error Missing generated UAPI file $(gendir)/$(hdr)) \
                   ))
+input-files3-name := $(notdir $(input-files3))
 
 # Work out what needs to be removed
 oldheaders    := $(patsubst $(installdir)/%,%,$(wildcard $(installdir)/*.h))
@@ -72,7 +78,9 @@ printdir = $(patsubst $(INSTALL_HDR_PATH)/%/,%,$(dir $@))
 quiet_cmd_install = INSTALL $(printdir) ($(words $(all-files))\
                             file$(if $(word 2, $(all-files)),s))
       cmd_install = \
-        $(CONFIG_SHELL) $< $(installdir) $(input-files); \
+        $(CONFIG_SHELL) $< $(installdir) $(srcdir) $(input-files1-name); \
+        $(CONFIG_SHELL) $< $(installdir) $(oldsrcdir) $(input-files2-name); \
+        $(CONFIG_SHELL) $< $(installdir) $(gendir) $(input-files3-name); \
         for F in $(wrapper-files); do                                   \
                 echo "\#include <asm-generic/$$F>" > $(installdir)/$$F;    \
         done;                                                           \
@@ -98,7 +106,7 @@ __headersinst: $(subdirs) $(install-file)
        @:
 
 targets += $(install-file)
-$(install-file): scripts/headers_install.sh $(input-files) FORCE
+$(install-file): scripts/headers_install.sh $(input-files1) $(input-files2) $(input-files3) FORCE
        $(if $(unwanted),$(call cmd,remove),)
        $(if $(wildcard $(dir $@)),,$(shell mkdir -p $(dir $@)))
        $(call if_changed,install)
index 6031e23..49392ec 100644 (file)
@@ -63,7 +63,7 @@ multi-objs   := $(multi-objs-y) $(multi-objs-m)
 subdir-obj-y := $(filter %/built-in.o, $(obj-y))
 
 # $(obj-dirs) is a list of directories that contain object files
-obj-dirs := $(dir $(multi-objs) $(subdir-obj-y))
+obj-dirs := $(dir $(multi-objs) $(obj-y))
 
 # Replace multi-part objects by their individual parts, look at local dir only
 real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y)
@@ -244,7 +244,7 @@ cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@) || \
 # ---------------------------------------------------------------------------
 
 # Generate an assembly file to wrap the output of the device tree compiler
-quiet_cmd_dt_S_dtb= DTB    $@
+quiet_cmd_dt_S_dtb= DTB     $@
 cmd_dt_S_dtb=                                          \
 (                                                      \
        echo '\#include <asm-generic/vmlinux.lds.h>';   \
index 06fcb33..bbf901a 100755 (executable)
@@ -1,17 +1,31 @@
 #!/bin/bash
 
+#
+# This script requires at least spatch
+# version 1.0.0-rc11.
+#
+
 SPATCH="`which ${SPATCH:=spatch}`"
 
+trap kill_running SIGTERM SIGINT
+declare -a SPATCH_PID
+
 # The verbosity may be set by the environmental parameter V=
 # as for example with 'make V=1 coccicheck'
 
 if [ -n "$V" -a "$V" != "0" ]; then
-       VERBOSE=1
+       VERBOSE="$V"
 else
        VERBOSE=0
 fi
 
-FLAGS="$SPFLAGS -very_quiet"
+if [ -z "$J" ]; then
+       NPROC=$(getconf _NPROCESSORS_ONLN)
+else
+       NPROC="$J"
+fi
+
+FLAGS="$SPFLAGS --very-quiet"
 
 # spatch only allows include directories with the syntax "-I include"
 # while gcc also allows "-Iinclude" and "-include include"
@@ -27,14 +41,14 @@ if [ "$C" = "1" -o "$C" = "2" ]; then
 else
     ONLINE=0
     if [ "$KBUILD_EXTMOD" = "" ] ; then
-        OPTIONS="-dir $srctree $COCCIINCLUDE"
+        OPTIONS="--dir $srctree $COCCIINCLUDE"
     else
-        OPTIONS="-dir $KBUILD_EXTMOD $COCCIINCLUDE"
+        OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
     fi
 fi
 
 if [ "$KBUILD_EXTMOD" != "" ] ; then
-    OPTIONS="-patch $srctree $OPTIONS"
+    OPTIONS="--patch $srctree $OPTIONS"
 fi
 
 if [ ! -x "$SPATCH" ]; then
@@ -44,13 +58,21 @@ fi
 
 if [ "$MODE" = "" ] ; then
     if [ "$ONLINE" = "0" ] ; then
-       echo 'You have not explicitly specified the mode to use. Using default "chain" mode.'
-       echo 'All available modes will be tried (in that order): patch, report, context, org'
+       echo 'You have not explicitly specified the mode to use. Using default "report" mode.'
+       echo 'Available modes are the following: patch, report, context, org'
        echo 'You can specify the mode with "make coccicheck MODE=<mode>"'
+       echo 'Note however that some modes are not implemented by some semantic patches.'
+    fi
+    MODE="report"
+fi
+
+if [ "$MODE" = "chain" ] ; then
+    if [ "$ONLINE" = "0" ] ; then
+       echo 'You have selected the "chain" mode.'
+       echo 'All available modes will be tried (in that order): patch, report, context, org'
     fi
-    MODE="chain"
 elif [ "$MODE" = "report" -o "$MODE" = "org" ] ; then
-    FLAGS="$FLAGS -no_show_diff"
+    FLAGS="$FLAGS --no-show-diff"
 fi
 
 if [ "$ONLINE" = "0" ] ; then
@@ -61,19 +83,35 @@ if [ "$ONLINE" = "0" ] ; then
 fi
 
 run_cmd() {
+       local i
        if [ $VERBOSE -ne 0 ] ; then
-               echo "Running: $@"
+               echo "Running ($NPROC in parallel): $@"
        fi
-       eval $@
+       for i in $(seq 0 $(( NPROC - 1)) ); do
+               eval "$@ --max $NPROC --index $i &"
+               SPATCH_PID[$i]=$!
+               if [ $VERBOSE -eq 2 ] ; then
+                       echo "${SPATCH_PID[$i]} running"
+               fi
+       done
+       wait
 }
 
+kill_running() {
+       for i in $(seq $(( NPROC - 1 )) ); do
+               if [ $VERBOSE -eq 2 ] ; then
+                       echo "Killing ${SPATCH_PID[$i]}"
+               fi
+               kill ${SPATCH_PID[$i]} 2>/dev/null
+       done
+}
 
 coccinelle () {
     COCCI="$1"
 
     OPT=`grep "Option" $COCCI | cut -d':' -f2`
 
-#   The option '-parse_cocci' can be used to syntactically check the SmPL files.
+#   The option '--parse-cocci' can be used to syntactically check the SmPL files.
 #
 #    $SPATCH -D $MODE $FLAGS -parse_cocci $COCCI $OPT > /dev/null
 
@@ -114,20 +152,20 @@ coccinelle () {
 
     if [ "$MODE" = "chain" ] ; then
        run_cmd $SPATCH -D patch   \
-               $FLAGS -sp_file $COCCI $OPT $OPTIONS               || \
+               $FLAGS --cocci-file $COCCI $OPT $OPTIONS               || \
        run_cmd $SPATCH -D report  \
-               $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || \
+               $FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff || \
        run_cmd $SPATCH -D context \
-               $FLAGS -sp_file $COCCI $OPT $OPTIONS               || \
+               $FLAGS --cocci-file $COCCI $OPT $OPTIONS               || \
        run_cmd $SPATCH -D org     \
-               $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || exit 1
+               $FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff || exit 1
     elif [ "$MODE" = "rep+ctxt" ] ; then
        run_cmd $SPATCH -D report  \
-               $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff && \
+               $FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff && \
        run_cmd $SPATCH -D context \
-               $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1
+               $FLAGS --cocci-file $COCCI $OPT $OPTIONS || exit 1
     else
-       run_cmd $SPATCH -D $MODE   $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1
+       run_cmd $SPATCH -D $MODE   $FLAGS --cocci-file $COCCI $OPT $OPTIONS || exit 1
     fi
 
 }
index 7d4771d..bd5d08b 100644 (file)
@@ -5,7 +5,7 @@
 // Confidence: High
 // Copyright: 2009,2010 Nicolas Palix, DIKU.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 //
 // Keywords: kmalloc, kzalloc, kcalloc
 // Version min: < 2.6.12 kmalloc
index 046b9b1..52c55e4 100644 (file)
@@ -9,7 +9,7 @@
 // Copyright: (C) 2009-2010 Julia Lawall, Nicolas Palix, DIKU.  GPLv2.
 // Copyright: (C) 2009-2010 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/rules/kzalloc.html
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 //
 // Keywords: kmalloc, kzalloc
 // Version min: < 2.6.12 kmalloc
index a9694a8..9594c9f 100644 (file)
@@ -4,7 +4,7 @@
 //
 // Confidence: Moderate
 // URL: http://coccinelle.lip6.fr/
-// Options: -include_headers
+// Options: --include-headers
 
 virtual context
 virtual org
index 46beb81..562ec88 100644 (file)
@@ -10,7 +10,7 @@
 // Copyright: (C) 2011 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual org
index 07a74b2..09cba54 100644 (file)
@@ -6,7 +6,7 @@
 // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual context
index 4dceab6..3d1aa71 100644 (file)
@@ -6,7 +6,7 @@
 // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual context
index 2b131a8..c606231 100644 (file)
@@ -7,7 +7,7 @@
 // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual context
index 15f076f..2274638 100644 (file)
@@ -5,7 +5,7 @@
 // Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
 // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 //
 // Keywords: ERR_PTR, PTR_ERR, PTR_RET
 // Version min: 2.6.39
index 05962f7..b67e174 100644 (file)
@@ -4,7 +4,7 @@
 ///
 // Confidence: High
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual report
index 0a1e361..3d93490 100644 (file)
@@ -18,7 +18,7 @@
 // Copyright: (C) 2011 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual org
 virtual report
index d9ae6d8..577b780 100644 (file)
@@ -10,7 +10,7 @@
 // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual org
 virtual report
diff --git a/scripts/coccinelle/free/kfreeaddr.cocci b/scripts/coccinelle/free/kfreeaddr.cocci
new file mode 100644 (file)
index 0000000..ce8aacc
--- /dev/null
@@ -0,0 +1,32 @@
+/// Free of a structure field
+///
+// Confidence: High
+// Copyright: (C) 2013 Julia Lawall, INRIA/LIP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: --no-includes --include-headers
+
+virtual org
+virtual report
+virtual context
+
+@r depends on context || report || org @
+expression e;
+identifier f;
+position p;
+@@
+
+* kfree@p(&e->f)
+
+@script:python depends on org@
+p << r.p;
+@@
+
+cocci.print_main("kfree",p)
+
+@script:python depends on report@
+p << r.p;
+@@
+
+msg = "ERROR: kfree of structure field"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/coccinelle/free/pci_free_consistent.cocci b/scripts/coccinelle/free/pci_free_consistent.cocci
new file mode 100644 (file)
index 0000000..43600cc
--- /dev/null
@@ -0,0 +1,52 @@
+/// Find missing pci_free_consistent for every pci_alloc_consistent.
+///
+// Confidence: Moderate
+// Copyright: (C) 2013 Petr Strnad.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Keywords: pci_free_consistent, pci_alloc_consistent
+// Options: --no-includes --include-headers
+
+virtual report
+virtual org
+
+@search@
+local idexpression id;
+expression x,y,z,e;
+position p1,p2;
+type T;
+@@
+
+id = pci_alloc_consistent@p1(x,y,&z)
+... when != e = id
+if (id == NULL || ...) { ... return ...; }
+... when != pci_free_consistent(x,y,id,z)
+    when != if (id) { ... pci_free_consistent(x,y,id,z) ... }
+    when != if (y) { ... pci_free_consistent(x,y,id,z) ... }
+    when != e = (T)id
+    when exists
+(
+return 0;
+|
+return 1;
+|
+return id;
+|
+return@p2 ...;
+)
+
+@script:python depends on report@
+p1 << search.p1;
+p2 << search.p2;
+@@
+
+msg = "ERROR: missing pci_free_consistent; pci_alloc_consistent on line %s and return without freeing on line %s" % (p1[0].line,p2[0].line)
+coccilib.report.print_report(p2[0],msg)
+
+@script:python depends on org@
+p1 << search.p1;
+p2 << search.p2;
+@@
+
+msg = "ERROR: missing pci_free_consistent; pci_alloc_consistent on line %s and return without freeing on line %s" % (p1[0].line,p2[0].line)
+cocci.print_main(msg,p1)
+cocci.print_secs("",p2)
index 0a40af8..48c152f 100644 (file)
@@ -7,7 +7,7 @@
 // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual context
index 259899f..f58732b 100644 (file)
@@ -11,7 +11,7 @@
 // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual context
index b296747..873f444 100644 (file)
@@ -9,7 +9,7 @@
 // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual context
 virtual org
index 06284c5..f085f59 100644 (file)
@@ -11,7 +11,7 @@
 // Copyright: (C) 2012 Gilles Muller, INRIA/LIP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual context
 virtual org
index 8f10b49..669b244 100644 (file)
@@ -9,7 +9,7 @@
 // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual context
index 63b24e6..002752f 100644 (file)
@@ -8,7 +8,7 @@
 // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual org
 virtual report
index 1c4ffe6..debd70e 100644 (file)
@@ -6,7 +6,7 @@
 // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual context
 virtual org
index 3267d74..47f649b 100644 (file)
@@ -11,7 +11,7 @@
 // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual context
 virtual org
index 97ce41c..b9abed4 100644 (file)
@@ -6,7 +6,7 @@
 // Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
 // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
-// Options: -include_headers
+// Options: --include-headers
 
 virtual patch
 virtual context
index d425644..f0368b3 100644 (file)
@@ -6,7 +6,7 @@
 // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual org
 virtual report
index cf74a00..c0c3371 100644 (file)
@@ -8,7 +8,7 @@
 // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments: requires at least Coccinelle 0.2.4, lex or parse error otherwise
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual org
 virtual report
index 3e4089a..8aebd18 100644 (file)
@@ -6,7 +6,7 @@
 // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual org
 virtual report
index b7ed91d..d0d00ef 100644 (file)
@@ -13,7 +13,7 @@
 // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual org
 virtual report
index c170721..80a831c 100644 (file)
@@ -6,7 +6,7 @@
 // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual org
 virtual report
index 4a28cef..81fabf3 100644 (file)
@@ -7,7 +7,7 @@
 // Copyright: (C) 2013 Gilles Muller, INRIA/LIP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual org
 virtual report
index fda8c35..d2e5b6c 100644 (file)
@@ -5,7 +5,7 @@
 // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual context
index ed961a1..9bd29aa 100644 (file)
@@ -6,7 +6,7 @@
 // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual context
index 949bf65..5354a79 100644 (file)
@@ -10,7 +10,7 @@
 // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual context
 virtual org
index 9ba73d0..72f1572 100644 (file)
@@ -10,7 +10,7 @@
 // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual context
 virtual org
index 13a2c0e..78d74c2 100644 (file)
@@ -8,7 +8,7 @@
 // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual context
 virtual org
index e8dd8a6..cfe0a35 100644 (file)
@@ -7,7 +7,7 @@
 // Copyright: (C) 2012 Gilles Muller, INRIA.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
-// Options: -no_includes -include_headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual context
index a65ecbb..567120a 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/bash
 # Manipulate options in a .config file from the command line
 
+myname=${0##*/}
+
 # If no prefix forced, use the default CONFIG_
 CONFIG_="${CONFIG_-CONFIG_}"
 
@@ -8,7 +10,7 @@ usage() {
        cat >&2 <<EOL
 Manipulate options in a .config file from the command line.
 Usage:
-config options command ...
+$myname options command ...
 commands:
        --enable|-e option   Enable option
        --disable|-d option  Disable option
@@ -33,14 +35,14 @@ options:
        --file config-file   .config file to change (default .config)
        --keep-case|-k       Keep next symbols' case (dont' upper-case it)
 
-config doesn't check the validity of the .config file. This is done at next
+$myname doesn't check the validity of the .config file. This is done at next
 make time.
 
-By default, config will upper-case the given symbol. Use --keep-case to keep
+By default, $myname will upper-case the given symbol. Use --keep-case to keep
 the case of all following symbols unchanged.
 
-config uses 'CONFIG_' as the default symbol prefix. Set the environment
-variable CONFIG_ to the prefix to use. Eg.: CONFIG_="FOO_" config ...
+$myname uses 'CONFIG_' as the default symbol prefix. Set the environment
+variable CONFIG_ to the prefix to use. Eg.: CONFIG_="FOO_" $myname ...
 EOL
        exit 1
 }
index 643764f..5de5660 100644 (file)
@@ -2,7 +2,7 @@
 
 if [ $# -lt 1 ]
 then
-       echo "Usage: headers_install.sh OUTDIR [FILES...]
+       echo "Usage: headers_install.sh OUTDIR SRCDIR [FILES...]
        echo
        echo "Prepares kernel header files for use by user space, by removing"
        echo "all compiler.h definitions and #includes, removing any"
@@ -10,6 +10,7 @@ then
        echo "asm/inline/volatile keywords."
        echo
        echo "OUTDIR: directory to write each userspace header FILE to."
+       echo "SRCDIR: source directory where files are picked."
        echo "FILES:  list of header files to operate on."
 
        exit 1
@@ -19,6 +20,8 @@ fi
 
 OUTDIR="$1"
 shift
+SRCDIR="$1"
+shift
 
 # Iterate through files listed on command line
 
@@ -34,7 +37,7 @@ do
                -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \
                -e 's/(^|[ \t(])(inline|asm|volatile)([ \t(]|$)/\1__\2__\3/g' \
                -e 's@#(ifndef|define|endif[ \t]*/[*])[ \t]*_UAPI@#\1 @' \
-               "$i" > "$OUTDIR/$FILE.sed" || exit 1
+               "$SRCDIR/$i" > "$OUTDIR/$FILE.sed" || exit 1
        scripts/unifdef -U__KERNEL__ -D__EXPORTED_HEADERS__ "$OUTDIR/$FILE.sed" \
                > "$OUTDIR/$FILE"
        [ $? -gt 1 ] && exit 1
index bde5b95..d19944f 100644 (file)
@@ -527,11 +527,12 @@ int main(int ac, char **av)
                        seed_env = getenv("KCONFIG_SEED");
                        if( seed_env && *seed_env ) {
                                char *endp;
-                               int tmp = (int)strtol(seed_env, &endp, 10);
+                               int tmp = (int)strtol(seed_env, &endp, 0);
                                if (*endp == '\0') {
                                        seed = tmp;
                                }
                        }
+                       fprintf( stderr, "KCONFIG_SEED=0x%X\n", seed );
                        srand(seed);
                        break;
                }
@@ -653,7 +654,8 @@ int main(int ac, char **av)
                conf_set_all_new_symbols(def_default);
                break;
        case randconfig:
-               conf_set_all_new_symbols(def_random);
+               /* Really nothing to do in this loop */
+               while (conf_set_all_new_symbols(def_random)) ;
                break;
        case defconfig:
                conf_set_all_new_symbols(def_default);
index 43eda40..c55c227 100644 (file)
@@ -1040,7 +1040,7 @@ void conf_set_changed_callback(void (*fn)(void))
        conf_changed_callback = fn;
 }
 
-static void randomize_choice_values(struct symbol *csym)
+static bool randomize_choice_values(struct symbol *csym)
 {
        struct property *prop;
        struct symbol *sym;
@@ -1053,7 +1053,7 @@ static void randomize_choice_values(struct symbol *csym)
         * In both cases stop.
         */
        if (csym->curr.tri != yes)
-               return;
+               return false;
 
        prop = sym_get_choice_prop(csym);
 
@@ -1077,13 +1077,18 @@ static void randomize_choice_values(struct symbol *csym)
                else {
                        sym->def[S_DEF_USER].tri = no;
                }
+               sym->flags |= SYMBOL_DEF_USER;
+               /* clear VALID to get value calculated */
+               sym->flags &= ~SYMBOL_VALID;
        }
        csym->flags |= SYMBOL_DEF_USER;
        /* clear VALID to get value calculated */
        csym->flags &= ~(SYMBOL_VALID);
+
+       return true;
 }
 
-static void set_all_choice_values(struct symbol *csym)
+void set_all_choice_values(struct symbol *csym)
 {
        struct property *prop;
        struct symbol *sym;
@@ -1100,10 +1105,10 @@ static void set_all_choice_values(struct symbol *csym)
        }
        csym->flags |= SYMBOL_DEF_USER;
        /* clear VALID to get value calculated */
-       csym->flags &= ~(SYMBOL_VALID);
+       csym->flags &= ~(SYMBOL_VALID | SYMBOL_NEED_SET_CHOICE_VALUES);
 }
 
-void conf_set_all_new_symbols(enum conf_def_mode mode)
+bool conf_set_all_new_symbols(enum conf_def_mode mode)
 {
        struct symbol *sym, *csym;
        int i, cnt, pby, pty, ptm;      /* pby: probability of boolean  = y
@@ -1151,6 +1156,7 @@ void conf_set_all_new_symbols(enum conf_def_mode mode)
                        exit( 1 );
                }
        }
+       bool has_changed = false;
 
        for_all_symbols(i, sym) {
                if (sym_has_value(sym) || (sym->flags & SYMBOL_VALID))
@@ -1158,6 +1164,7 @@ void conf_set_all_new_symbols(enum conf_def_mode mode)
                switch (sym_get_type(sym)) {
                case S_BOOLEAN:
                case S_TRISTATE:
+                       has_changed = true;
                        switch (mode) {
                        case def_yes:
                                sym->def[S_DEF_USER].tri = yes;
@@ -1202,14 +1209,26 @@ void conf_set_all_new_symbols(enum conf_def_mode mode)
         * selected in a choice block and we set it to yes,
         * and the rest to no.
         */
+       if (mode != def_random) {
+               for_all_symbols(i, csym) {
+                       if ((sym_is_choice(csym) && !sym_has_value(csym)) ||
+                           sym_is_choice_value(csym))
+                               csym->flags |= SYMBOL_NEED_SET_CHOICE_VALUES;
+               }
+       }
+
        for_all_symbols(i, csym) {
                if (sym_has_value(csym) || !sym_is_choice(csym))
                        continue;
 
                sym_calc_value(csym);
                if (mode == def_random)
-                       randomize_choice_values(csym);
-               else
+                       has_changed = randomize_choice_values(csym);
+               else {
                        set_all_choice_values(csym);
+                       has_changed = true;
+               }
        }
+
+       return has_changed;
 }
index cdd4860..df198a5 100644 (file)
@@ -106,6 +106,9 @@ struct symbol {
 #define SYMBOL_DEF3       0x40000  /* symbol.def[S_DEF_3] is valid */
 #define SYMBOL_DEF4       0x80000  /* symbol.def[S_DEF_4] is valid */
 
+/* choice values need to be set before calculating this symbol value */
+#define SYMBOL_NEED_SET_CHOICE_VALUES  0x100000
+
 #define SYMBOL_MAXLENGTH       256
 #define SYMBOL_HASHSIZE                9973
 
index f8aee5f..09f4edf 100644 (file)
@@ -86,7 +86,8 @@ const char *conf_get_autoconfig_name(void);
 char *conf_get_default_confname(void);
 void sym_set_change_count(int count);
 void sym_add_change_count(int count);
-void conf_set_all_new_symbols(enum conf_def_mode mode);
+bool conf_set_all_new_symbols(enum conf_def_mode mode);
+void set_all_choice_values(struct symbol *csym);
 
 struct conf_printer {
        void (*print_symbol)(FILE *, struct symbol *, const char *, void *);
index ef1a738..ecdb965 100644 (file)
@@ -14,6 +14,7 @@ P(conf_set_message_callback, void,(void (*fn)(const char *fmt, va_list ap)));
 /* menu.c */
 P(rootmenu,struct menu,);
 
+P(menu_is_empty, bool, (struct menu *menu));
 P(menu_is_visible, bool, (struct menu *menu));
 P(menu_has_prompt, bool, (struct menu *menu));
 P(menu_get_prompt,const char *,(struct menu *menu));
index a2eb80f..3b15c08 100644 (file)
@@ -132,16 +132,16 @@ int dialog_checklist(const char *title, const char *prompt, int height,
        }
 
 do_resize:
-       if (getmaxy(stdscr) < (height + 6))
+       if (getmaxy(stdscr) < (height + CHECKLIST_HEIGTH_MIN))
                return -ERRDISPLAYTOOSMALL;
-       if (getmaxx(stdscr) < (width + 6))
+       if (getmaxx(stdscr) < (width + CHECKLIST_WIDTH_MIN))
                return -ERRDISPLAYTOOSMALL;
 
        max_choice = MIN(list_height, item_count());
 
        /* center dialog box on screen */
-       x = (COLS - width) / 2;
-       y = (LINES - height) / 2;
+       x = (getmaxx(stdscr) - width) / 2;
+       y = (getmaxy(stdscr) - height) / 2;
 
        draw_shadow(stdscr, y, x, height, width);
 
index 1099337..b4343d3 100644 (file)
@@ -200,6 +200,20 @@ int item_is_tag(char tag);
 int on_key_esc(WINDOW *win);
 int on_key_resize(void);
 
+/* minimum (re)size values */
+#define CHECKLIST_HEIGTH_MIN 6 /* For dialog_checklist() */
+#define CHECKLIST_WIDTH_MIN 6
+#define INPUTBOX_HEIGTH_MIN 2  /* For dialog_inputbox() */
+#define INPUTBOX_WIDTH_MIN 2
+#define MENUBOX_HEIGTH_MIN 15  /* For dialog_menu() */
+#define MENUBOX_WIDTH_MIN 65
+#define TEXTBOX_HEIGTH_MIN 8   /* For dialog_textbox() */
+#define TEXTBOX_WIDTH_MIN 8
+#define YESNO_HEIGTH_MIN 4     /* For dialog_yesno() */
+#define YESNO_WIDTH_MIN 4
+#define WINDOW_HEIGTH_MIN 19   /* For init_dialog() */
+#define WINDOW_WIDTH_MIN 80
+
 int init_dialog(const char *backtitle);
 void set_dialog_backtitle(const char *backtitle);
 void set_dialog_subtitles(struct subtitle_list *subtitles);
index 21404a0..447a582 100644 (file)
@@ -56,14 +56,14 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width
                strcpy(instr, init);
 
 do_resize:
-       if (getmaxy(stdscr) <= (height - 2))
+       if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGTH_MIN))
                return -ERRDISPLAYTOOSMALL;
-       if (getmaxx(stdscr) <= (width - 2))
+       if (getmaxx(stdscr) <= (width - INPUTBOX_WIDTH_MIN))
                return -ERRDISPLAYTOOSMALL;
 
        /* center dialog box on screen */
-       x = (COLS - width) / 2;
-       y = (LINES - height) / 2;
+       x = (getmaxx(stdscr) - width) / 2;
+       y = (getmaxy(stdscr) - height) / 2;
 
        draw_shadow(stdscr, y, x, height, width);
 
index 38cd69c..c93de0b 100644 (file)
@@ -193,7 +193,7 @@ int dialog_menu(const char *title, const char *prompt,
 do_resize:
        height = getmaxy(stdscr);
        width = getmaxx(stdscr);
-       if (height < 15 || width < 65)
+       if (height < MENUBOX_HEIGTH_MIN || width < MENUBOX_WIDTH_MIN)
                return -ERRDISPLAYTOOSMALL;
 
        height -= 4;
@@ -203,8 +203,8 @@ do_resize:
        max_choice = MIN(menu_height, item_count());
 
        /* center dialog box on screen */
-       x = (COLS - width) / 2;
-       y = (LINES - height) / 2;
+       x = (getmaxx(stdscr) - width) / 2;
+       y = (getmaxy(stdscr) - height) / 2;
 
        draw_shadow(stdscr, y, x, height, width);
 
index a48bb93..1773319 100644 (file)
@@ -80,7 +80,7 @@ int dialog_textbox(const char *title, char *tbuf, int initial_height,
 
 do_resize:
        getmaxyx(stdscr, height, width);
-       if (height < 8 || width < 8)
+       if (height < TEXTBOX_HEIGTH_MIN || width < TEXTBOX_WIDTH_MIN)
                return -ERRDISPLAYTOOSMALL;
        if (initial_height != 0)
                height = initial_height;
@@ -98,8 +98,8 @@ do_resize:
                        width = 0;
 
        /* center dialog box on screen */
-       x = (COLS - width) / 2;
-       y = (LINES - height) / 2;
+       x = (getmaxx(stdscr) - width) / 2;
+       y = (getmaxy(stdscr) - height) / 2;
 
        draw_shadow(stdscr, y, x, height, width);
 
index a0e97c2..58a8289 100644 (file)
@@ -254,7 +254,12 @@ void attr_clear(WINDOW * win, int height, int width, chtype attr)
 
 void dialog_clear(void)
 {
-       attr_clear(stdscr, LINES, COLS, dlg.screen.atr);
+       int lines, columns;
+
+       lines = getmaxy(stdscr);
+       columns = getmaxx(stdscr);
+
+       attr_clear(stdscr, lines, columns, dlg.screen.atr);
        /* Display background title if it exists ... - SLH */
        if (dlg.backtitle != NULL) {
                int i, len = 0, skip = 0;
@@ -269,10 +274,10 @@ void dialog_clear(void)
                }
 
                wmove(stdscr, 1, 1);
-               if (len > COLS - 2) {
+               if (len > columns - 2) {
                        const char *ellipsis = "[...] ";
                        waddstr(stdscr, ellipsis);
-                       skip = len - (COLS - 2 - strlen(ellipsis));
+                       skip = len - (columns - 2 - strlen(ellipsis));
                }
 
                for (pos = dlg.subtitles; pos != NULL; pos = pos->next) {
@@ -298,7 +303,7 @@ void dialog_clear(void)
                                skip--;
                }
 
-               for (i = len + 1; i < COLS - 1; i++)
+               for (i = len + 1; i < columns - 1; i++)
                        waddch(stdscr, ACS_HLINE);
        }
        wnoutrefresh(stdscr);
@@ -317,7 +322,7 @@ int init_dialog(const char *backtitle)
        getyx(stdscr, saved_y, saved_x);
 
        getmaxyx(stdscr, height, width);
-       if (height < 19 || width < 80) {
+       if (height < WINDOW_HEIGTH_MIN || width < WINDOW_WIDTH_MIN) {
                endwin();
                return -ERRDISPLAYTOOSMALL;
        }
@@ -371,27 +376,19 @@ void print_title(WINDOW *dialog, const char *title, int width)
 /*
  * Print a string of text in a window, automatically wrap around to the
  * next line if the string is too long to fit on one line. Newline
- * characters '\n' are replaced by spaces.  We start on a new line
+ * characters '\n' are propperly processed.  We start on a new line
  * if there is no room for at least 4 nonblanks following a double-space.
  */
 void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x)
 {
        int newl, cur_x, cur_y;
-       int i, prompt_len, room, wlen;
-       char tempstr[MAX_LEN + 1], *word, *sp, *sp2;
+       int prompt_len, room, wlen;
+       char tempstr[MAX_LEN + 1], *word, *sp, *sp2, *newline_separator = 0;
 
        strcpy(tempstr, prompt);
 
        prompt_len = strlen(tempstr);
 
-       /*
-        * Remove newlines
-        */
-       for (i = 0; i < prompt_len; i++) {
-               if (tempstr[i] == '\n')
-                       tempstr[i] = ' ';
-       }
-
        if (prompt_len <= width - x * 2) {      /* If prompt is short */
                wmove(win, y, (width - prompt_len) / 2);
                waddstr(win, tempstr);
@@ -401,7 +398,10 @@ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x)
                newl = 1;
                word = tempstr;
                while (word && *word) {
-                       sp = strchr(word, ' ');
+                       sp = strpbrk(word, "\n ");
+                       if (sp && *sp == '\n')
+                               newline_separator = sp;
+
                        if (sp)
                                *sp++ = 0;
 
@@ -413,7 +413,7 @@ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x)
                        if (wlen > room ||
                            (newl && wlen < 4 && sp
                             && wlen + 1 + strlen(sp) > room
-                            && (!(sp2 = strchr(sp, ' '))
+                            && (!(sp2 = strpbrk(sp, "\n "))
                                 || wlen + 1 + (sp2 - sp) > room))) {
                                cur_y++;
                                cur_x = x;
@@ -421,7 +421,15 @@ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x)
                        wmove(win, cur_y, cur_x);
                        waddstr(win, word);
                        getyx(win, cur_y, cur_x);
-                       cur_x++;
+
+                       /* Move to the next line if the word separator was a newline */
+                       if (newline_separator) {
+                               cur_y++;
+                               cur_x = x;
+                               newline_separator = 0;
+                       } else
+                               cur_x++;
+
                        if (sp && *sp == ' ') {
                                cur_x++;        /* double space */
                                while (*++sp == ' ') ;
index 4e6e809..676fb2f 100644 (file)
@@ -45,14 +45,14 @@ int dialog_yesno(const char *title, const char *prompt, int height, int width)
        WINDOW *dialog;
 
 do_resize:
-       if (getmaxy(stdscr) < (height + 4))
+       if (getmaxy(stdscr) < (height + YESNO_HEIGTH_MIN))
                return -ERRDISPLAYTOOSMALL;
-       if (getmaxx(stdscr) < (width + 4))
+       if (getmaxx(stdscr) < (width + YESNO_WIDTH_MIN))
                return -ERRDISPLAYTOOSMALL;
 
        /* center dialog box on screen */
-       x = (COLS - width) / 2;
-       y = (LINES - height) / 2;
+       x = (getmaxx(stdscr) - width) / 2;
+       y = (getmaxy(stdscr) - height) / 2;
 
        draw_shadow(stdscr, y, x, height, width);
 
index a69cbd7..6c9c45f 100644 (file)
@@ -48,7 +48,7 @@ static const char mconf_readme[] = N_(
 "----------\n"
 "o  Use the Up/Down arrow keys (cursor keys) to highlight the item\n"
 "   you wish to change or submenu wish to select and press <Enter>.\n"
-"   Submenus are designated by \"--->\".\n"
+"   Submenus are designated by \"--->\", empty ones by \"----\".\n"
 "\n"
 "   Shortcut: Press the option's highlighted letter (hotkey).\n"
 "             Pressing a hotkey more than once will sequence\n"
@@ -176,7 +176,7 @@ static const char mconf_readme[] = N_(
 "\n"),
 menu_instructions[] = N_(
        "Arrow keys navigate the menu.  "
-       "<Enter> selects submenus --->.  "
+       "<Enter> selects submenus ---> (or empty submenus ----).  "
        "Highlighted letters are hotkeys.  "
        "Pressing <Y> includes, <N> excludes, <M> modularizes features.  "
        "Press <Esc><Esc> to exit, <?> for Help, </> for Search.  "
@@ -401,7 +401,7 @@ static void search_conf(void)
        struct subtitle_part stpart;
 
        title = str_new();
-       str_printf( &title, _("Enter %s (sub)string to search for "
+       str_printf( &title, _("Enter %s (sub)string or regexp to search for "
                              "(with or without \"%s\")"), CONFIG_, CONFIG_);
 
 again:
@@ -498,8 +498,9 @@ static void build_conf(struct menu *menu)
                                                  menu->data ? "-->" : "++>",
                                                  indent + 1, ' ', prompt);
                                } else
-                                       item_make("   %*c%s  --->", indent + 1, ' ', prompt);
-
+                                       item_make("   %*c%s  %s",
+                                                 indent + 1, ' ', prompt,
+                                                 menu_is_empty(menu) ? "----" : "--->");
                                item_set_tag('m');
                                item_set_data(menu);
                                if (single_menu_mode && menu->data)
@@ -630,7 +631,7 @@ static void build_conf(struct menu *menu)
                          (sym_has_value(sym) || !sym_is_changable(sym)) ?
                          "" : _(" (NEW)"));
                if (menu->prompt->type == P_MENU) {
-                       item_add_str("  --->");
+                       item_add_str("  %s", menu_is_empty(menu) ? "----" : "--->");
                        return;
                }
        }
@@ -826,7 +827,9 @@ static void conf_choice(struct menu *menu)
                dialog_clear();
                res = dialog_checklist(prompt ? _(prompt) : _("Main Menu"),
                                        _(radiolist_instructions),
-                                        15, 70, 6);
+                                       MENUBOX_HEIGTH_MIN,
+                                       MENUBOX_WIDTH_MIN,
+                                       CHECKLIST_HEIGTH_MIN);
                selected = item_activate_selected();
                switch (res) {
                case 0:
@@ -957,8 +960,8 @@ static int handle_exit(void)
        dialog_clear();
        if (conf_get_changed())
                res = dialog_yesno(NULL,
-                                  _("Do you wish to save your new configuration ?\n"
-                                    "<ESC><ESC> to continue."),
+                                  _("Do you wish to save your new configuration?\n"
+                                    "(Press <ESC><ESC> to continue kernel configuration.)"),
                                   6, 60);
        else
                res = -1;
index fd3f018..7e233a6 100644 (file)
@@ -443,6 +443,22 @@ bool menu_has_prompt(struct menu *menu)
        return true;
 }
 
+/*
+ * Determine if a menu is empty.
+ * A menu is considered empty if it contains no or only
+ * invisible entries.
+ */
+bool menu_is_empty(struct menu *menu)
+{
+       struct menu *child;
+
+       for (child = menu->list; child; child = child->next) {
+               if (menu_is_visible(child))
+                       return(false);
+       }
+       return(true);
+}
+
 bool menu_is_visible(struct menu *menu)
 {
        struct menu *child;
index dbf31ed..7975d8d 100644 (file)
@@ -45,8 +45,8 @@ static const char nconf_global_help[] = N_(
 "<n> to remove it.  You may press the <Space> key to cycle through the\n"
 "available options.\n"
 "\n"
-"A trailing \"--->\" designates a submenu.\n"
-"\n"
+"A trailing \"--->\" designates a submenu, a trailing \"----\" an\n"
+"empty submenu.\n"
 "\n"
 "Menu navigation keys\n"
 "----------------------------------------------------------------------\n"
@@ -131,7 +131,7 @@ static const char nconf_global_help[] = N_(
 "\n"),
 menu_no_f_instructions[] = N_(
 "Legend:  [*] built-in  [ ] excluded  <M> module  < > module capable.\n"
-"Submenus are designated by a trailing \"--->\".\n"
+"Submenus are designated by a trailing \"--->\", empty ones by \"----\".\n"
 "\n"
 "Use the following keys to navigate the menus:\n"
 "Move up or down with <Up> and <Down>.\n"
@@ -148,7 +148,7 @@ menu_no_f_instructions[] = N_(
 "For help related to the current menu entry press <?> or <h>.\n"),
 menu_instructions[] = N_(
 "Legend:  [*] built-in  [ ] excluded  <M> module  < > module capable.\n"
-"Submenus are designated by a trailing \"--->\".\n"
+"Submenus are designated by a trailing \"--->\", empty ones by \"----\".\n"
 "\n"
 "Use the following keys to navigate the menus:\n"
 "Move up or down with <Up> or <Down>.\n"
@@ -365,15 +365,16 @@ static void print_function_line(void)
        int i;
        int offset = 1;
        const int skip = 1;
+       int lines = getmaxy(stdscr);
 
        for (i = 0; i < function_keys_num; i++) {
                (void) wattrset(main_window, attributes[FUNCTION_HIGHLIGHT]);
-               mvwprintw(main_window, LINES-3, offset,
+               mvwprintw(main_window, lines-3, offset,
                                "%s",
                                function_keys[i].key_str);
                (void) wattrset(main_window, attributes[FUNCTION_TEXT]);
                offset += strlen(function_keys[i].key_str);
-               mvwprintw(main_window, LINES-3,
+               mvwprintw(main_window, lines-3,
                                offset, "%s",
                                function_keys[i].func);
                offset += strlen(function_keys[i].func) + skip;
@@ -694,7 +695,7 @@ static void search_conf(void)
        int dres;
 
        title = str_new();
-       str_printf( &title, _("Enter %s (sub)string to search for "
+       str_printf( &title, _("Enter %s (sub)string or regexp to search for "
                              "(with or without \"%s\")"), CONFIG_, CONFIG_);
 
 again:
@@ -759,9 +760,9 @@ static void build_conf(struct menu *menu)
                                                indent + 1, ' ', prompt);
                                } else
                                        item_make(menu, 'm',
-                                               "   %*c%s  --->",
-                                               indent + 1,
-                                               ' ', prompt);
+                                                 "   %*c%s  %s",
+                                                 indent + 1, ' ', prompt,
+                                                 menu_is_empty(menu) ? "----" : "--->");
 
                                if (single_menu_mode && menu->data)
                                        goto conf_childs;
@@ -903,7 +904,7 @@ static void build_conf(struct menu *menu)
                                (sym_has_value(sym) || !sym_is_changable(sym)) ?
                                "" : _(" (NEW)"));
                if (menu->prompt && menu->prompt->type == P_MENU) {
-                       item_add_str("  --->");
+                       item_add_str("  %s", menu_is_empty(menu) ? "----" : "--->");
                        return;
                }
        }
@@ -954,7 +955,7 @@ static void show_menu(const char *prompt, const char *instructions,
 
        clear();
        (void) wattrset(main_window, attributes[NORMAL]);
-       print_in_middle(stdscr, 1, 0, COLS,
+       print_in_middle(stdscr, 1, 0, getmaxx(stdscr),
                        menu_backtitle,
                        attributes[MAIN_HEADING]);
 
@@ -1455,14 +1456,18 @@ static void conf_save(void)
 
 void setup_windows(void)
 {
+       int lines, columns;
+
+       getmaxyx(stdscr, lines, columns);
+
        if (main_window != NULL)
                delwin(main_window);
 
        /* set up the menu and menu window */
-       main_window = newwin(LINES-2, COLS-2, 2, 1);
+       main_window = newwin(lines-2, columns-2, 2, 1);
        keypad(main_window, TRUE);
-       mwin_max_lines = LINES-7;
-       mwin_max_cols = COLS-6;
+       mwin_max_lines = lines-7;
+       mwin_max_cols = columns-6;
 
        /* panels order is from bottom to top */
        new_panel(main_window);
@@ -1470,6 +1475,7 @@ void setup_windows(void)
 
 int main(int ac, char **av)
 {
+       int lines, columns;
        char *mode;
 
        setlocale(LC_ALL, "");
@@ -1495,7 +1501,8 @@ int main(int ac, char **av)
        keypad(stdscr, TRUE);
        curs_set(0);
 
-       if (COLS < 75 || LINES < 20) {
+       getmaxyx(stdscr, lines, columns);
+       if (columns < 75 || lines < 20) {
                endwin();
                printf("Your terminal should have at "
                        "least 20 lines and 75 columns\n");
index 9f8c44e..8275f0e 100644 (file)
@@ -276,8 +276,8 @@ int btn_dialog(WINDOW *main_window, const char *msg, int btn_num, ...)
 
        total_width = max(msg_width, btns_width);
        /* place dialog in middle of screen */
-       y = (LINES-(msg_lines+4))/2;
-       x = (COLS-(total_width+4))/2;
+       y = (getmaxy(stdscr)-(msg_lines+4))/2;
+       x = (getmaxx(stdscr)-(total_width+4))/2;
 
 
        /* create the windows */
@@ -387,8 +387,8 @@ int dialog_inputbox(WINDOW *main_window,
                prompt_width = max(prompt_width, strlen(title));
 
        /* place dialog in middle of screen */
-       y = (LINES-(prompt_lines+4))/2;
-       x = (COLS-(prompt_width+4))/2;
+       y = (getmaxy(stdscr)-(prompt_lines+4))/2;
+       x = (getmaxx(stdscr)-(prompt_width+4))/2;
 
        strncpy(result, init, *result_len);
 
@@ -545,7 +545,7 @@ void show_scroll_win(WINDOW *main_window,
 {
        int res;
        int total_lines = get_line_no(text);
-       int x, y;
+       int x, y, lines, columns;
        int start_x = 0, start_y = 0;
        int text_lines = 0, text_cols = 0;
        int total_cols = 0;
@@ -556,6 +556,8 @@ void show_scroll_win(WINDOW *main_window,
        WINDOW *pad;
        PANEL *panel;
 
+       getmaxyx(stdscr, lines, columns);
+
        /* find the widest line of msg: */
        total_lines = get_line_no(text);
        for (i = 0; i < total_lines; i++) {
@@ -569,14 +571,14 @@ void show_scroll_win(WINDOW *main_window,
        (void) wattrset(pad, attributes[SCROLLWIN_TEXT]);
        fill_window(pad, text);
 
-       win_lines = min(total_lines+4, LINES-2);
-       win_cols = min(total_cols+2, COLS-2);
+       win_lines = min(total_lines+4, lines-2);
+       win_cols = min(total_cols+2, columns-2);
        text_lines = max(win_lines-4, 0);
        text_cols = max(win_cols-2, 0);
 
        /* place window in middle of screen */
-       y = (LINES-win_lines)/2;
-       x = (COLS-win_cols)/2;
+       y = (lines-win_lines)/2;
+       x = (columns-win_cols)/2;
 
        win = newwin(win_lines, win_cols, y, x);
        keypad(win, TRUE);
index ecc5aa5..d550300 100644 (file)
@@ -136,7 +136,7 @@ static struct property *sym_get_range_prop(struct symbol *sym)
        return NULL;
 }
 
-static int sym_get_range_val(struct symbol *sym, int base)
+static long sym_get_range_val(struct symbol *sym, int base)
 {
        sym_calc_value(sym);
        switch (sym->type) {
@@ -155,7 +155,7 @@ static int sym_get_range_val(struct symbol *sym, int base)
 static void sym_validate_range(struct symbol *sym)
 {
        struct property *prop;
-       int base, val, val2;
+       long base, val, val2;
        char str[64];
 
        switch (sym->type) {
@@ -179,9 +179,9 @@ static void sym_validate_range(struct symbol *sym)
                        return;
        }
        if (sym->type == S_INT)
-               sprintf(str, "%d", val2);
+               sprintf(str, "%ld", val2);
        else
-               sprintf(str, "0x%x", val2);
+               sprintf(str, "0x%lx", val2);
        sym->curr.val = strdup(str);
 }
 
@@ -300,6 +300,14 @@ void sym_calc_value(struct symbol *sym)
 
        if (sym->flags & SYMBOL_VALID)
                return;
+
+       if (sym_is_choice_value(sym) &&
+           sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) {
+               sym->flags &= ~SYMBOL_NEED_SET_CHOICE_VALUES;
+               prop = sym_get_choice_prop(sym);
+               sym_calc_value(prop_get_symbol(prop));
+       }
+
        sym->flags |= SYMBOL_VALID;
 
        oldval = sym->curr;
@@ -425,6 +433,9 @@ void sym_calc_value(struct symbol *sym)
 
        if (sym->flags & SYMBOL_AUTO)
                sym->flags &= ~SYMBOL_WRITE;
+
+       if (sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES)
+               set_all_choice_values(sym);
 }
 
 void sym_clear_all_valid(void)
@@ -583,7 +594,7 @@ bool sym_string_valid(struct symbol *sym, const char *str)
 bool sym_string_within_range(struct symbol *sym, const char *str)
 {
        struct property *prop;
-       int val;
+       long val;
 
        switch (sym->type) {
        case S_STRING:
@@ -943,38 +954,98 @@ const char *sym_escape_string_value(const char *in)
        return res;
 }
 
+struct sym_match {
+       struct symbol   *sym;
+       off_t           so, eo;
+};
+
+/* Compare matched symbols as thus:
+ * - first, symbols that match exactly
+ * - then, alphabetical sort
+ */
+static int sym_rel_comp( const void *sym1, const void *sym2 )
+{
+       struct sym_match *s1 = *(struct sym_match **)sym1;
+       struct sym_match *s2 = *(struct sym_match **)sym2;
+       int l1, l2;
+
+       /* Exact match:
+        * - if matched length on symbol s1 is the length of that symbol,
+        *   then this symbol should come first;
+        * - if matched length on symbol s2 is the length of that symbol,
+        *   then this symbol should come first.
+        * Note: since the search can be a regexp, both symbols may match
+        * exactly; if this is the case, we can't decide which comes first,
+        * and we fallback to sorting alphabetically.
+        */
+       l1 = s1->eo - s1->so;
+       l2 = s2->eo - s2->so;
+       if (l1 == strlen(s1->sym->name) && l2 != strlen(s2->sym->name))
+               return -1;
+       if (l1 != strlen(s1->sym->name) && l2 == strlen(s2->sym->name))
+               return 1;
+
+       /* As a fallback, sort symbols alphabetically */
+       return strcmp(s1->sym->name, s2->sym->name);
+}
+
 struct symbol **sym_re_search(const char *pattern)
 {
        struct symbol *sym, **sym_arr = NULL;
+       struct sym_match **sym_match_arr = NULL;
        int i, cnt, size;
        regex_t re;
+       regmatch_t match[1];
 
        cnt = size = 0;
        /* Skip if empty */
        if (strlen(pattern) == 0)
                return NULL;
-       if (regcomp(&re, pattern, REG_EXTENDED|REG_NOSUB|REG_ICASE))
+       if (regcomp(&re, pattern, REG_EXTENDED|REG_ICASE))
                return NULL;
 
        for_all_symbols(i, sym) {
+               struct sym_match *tmp_sym_match;
                if (sym->flags & SYMBOL_CONST || !sym->name)
                        continue;
-               if (regexec(&re, sym->name, 0, NULL, 0))
+               if (regexec(&re, sym->name, 1, match, 0))
                        continue;
                if (cnt + 1 >= size) {
-                       void *tmp = sym_arr;
+                       void *tmp;
                        size += 16;
-                       sym_arr = realloc(sym_arr, size * sizeof(struct symbol *));
-                       if (!sym_arr) {
-                               free(tmp);
-                               return NULL;
+                       tmp = realloc(sym_match_arr, size * sizeof(struct sym_match *));
+                       if (!tmp) {
+                               goto sym_re_search_free;
                        }
+                       sym_match_arr = tmp;
                }
                sym_calc_value(sym);
-               sym_arr[cnt++] = sym;
+               tmp_sym_match = (struct sym_match*)malloc(sizeof(struct sym_match));
+               if (!tmp_sym_match)
+                       goto sym_re_search_free;
+               tmp_sym_match->sym = sym;
+               /* As regexec return 0, we know we have a match, so
+                * we can use match[0].rm_[se]o without further checks
+                */
+               tmp_sym_match->so = match[0].rm_so;
+               tmp_sym_match->eo = match[0].rm_eo;
+               sym_match_arr[cnt++] = tmp_sym_match;
        }
-       if (sym_arr)
+       if (sym_match_arr) {
+               qsort(sym_match_arr, cnt, sizeof(struct sym_match*), sym_rel_comp);
+               sym_arr = malloc((cnt+1) * sizeof(struct symbol));
+               if (!sym_arr)
+                       goto sym_re_search_free;
+               for (i = 0; i < cnt; i++)
+                       sym_arr[i] = sym_match_arr[i]->sym;
                sym_arr[cnt] = NULL;
+       }
+sym_re_search_free:
+       if (sym_match_arr) {
+               for (i = 0; i < cnt; i++)
+                       free(sym_match_arr[i]);
+               free(sym_match_arr);
+       }
        regfree(&re);
 
        return sym_arr;
index 75d59fc..c11212f 100644 (file)
@@ -15,8 +15,8 @@ endef
 quiet_cmd_offsets = GEN     $@
 define cmd_offsets
        (set -e; \
-        echo "#ifndef __DEVICEVTABLE_OFFSETS_H__"; \
-        echo "#define __DEVICEVTABLE_OFFSETS_H__"; \
+        echo "#ifndef __DEVICETABLE_OFFSETS_H__"; \
+        echo "#define __DEVICETABLE_OFFSETS_H__"; \
         echo "/*"; \
         echo " * DO NOT MODIFY."; \
         echo " *"; \
@@ -29,15 +29,10 @@ define cmd_offsets
         echo "#endif" ) > $@
 endef
 
-# We use internal kbuild rules to avoid the "is up to date" message from make
-scripts/mod/devicetable-offsets.s: scripts/mod/devicetable-offsets.c FORCE
-       $(Q)mkdir -p $(dir $@)
-       $(call if_changed_dep,cc_s_c)
+$(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s
+       $(call if_changed,offsets)
 
-$(obj)/$(devicetable-offsets-file): scripts/mod/devicetable-offsets.s
-       $(call cmd,offsets)
-
-targets += $(devicetable-offsets-file)
+targets += $(devicetable-offsets-file) devicetable-offsets.s
 
 # dependencies on generated files need to be listed explicitly
 
index d9e67b7..2370863 100644 (file)
@@ -79,10 +79,12 @@ struct devtable **__start___devtable, **__stop___devtable;
 extern struct devtable *__start___devtable[], *__stop___devtable[];
 #endif /* __MACH__ */
 
-#if __GNUC__ == 3 && __GNUC_MINOR__ < 3
-# define __used                        __attribute__((__unused__))
-#else
-# define __used                        __attribute__((__used__))
+#if !defined(__used)
+# if __GNUC__ == 3 && __GNUC_MINOR__ < 3
+#  define __used                       __attribute__((__unused__))
+# else
+#  define __used                       __attribute__((__used__))
+# endif
 #endif
 
 /* Define a variable f that holds the value of field f of struct devid
index fbbfd08..fdd3fbf 100755 (executable)
@@ -74,6 +74,7 @@ echo ""
 fi
 
 echo "%install"
+echo 'KBUILD_IMAGE=$(make image_name)'
 echo "%ifarch ia64"
 echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules'
 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
index 84b88f1..d105a44 100755 (executable)
@@ -71,9 +71,6 @@ scm_version()
                        printf -- '-svn%s' "`git svn find-rev $head`"
                fi
 
-               # Update index only on r/w media
-               [ -w . ] && git update-index --refresh --unmerged > /dev/null
-
                # Check for uncommitted changes
                if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
                        printf '%s' -dirty
index f4912e2..84c17d8 100644 (file)
@@ -1,68 +1,68 @@
 #ifndef _TOOLS_BE_BYTESHIFT_H
 #define _TOOLS_BE_BYTESHIFT_H
 
-#include <linux/types.h>
+#include <stdint.h>
 
-static inline __u16 __get_unaligned_be16(const __u8 *p)
+static inline uint16_t __get_unaligned_be16(const uint8_t *p)
 {
        return p[0] << 8 | p[1];
 }
 
-static inline __u32 __get_unaligned_be32(const __u8 *p)
+static inline uint32_t __get_unaligned_be32(const uint8_t *p)
 {
        return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
 }
 
-static inline __u64 __get_unaligned_be64(const __u8 *p)
+static inline uint64_t __get_unaligned_be64(const uint8_t *p)
 {
-       return (__u64)__get_unaligned_be32(p) << 32 |
+       return (uint64_t)__get_unaligned_be32(p) << 32 |
               __get_unaligned_be32(p + 4);
 }
 
-static inline void __put_unaligned_be16(__u16 val, __u8 *p)
+static inline void __put_unaligned_be16(uint16_t val, uint8_t *p)
 {
        *p++ = val >> 8;
        *p++ = val;
 }
 
-static inline void __put_unaligned_be32(__u32 val, __u8 *p)
+static inline void __put_unaligned_be32(uint32_t val, uint8_t *p)
 {
        __put_unaligned_be16(val >> 16, p);
        __put_unaligned_be16(val, p + 2);
 }
 
-static inline void __put_unaligned_be64(__u64 val, __u8 *p)
+static inline void __put_unaligned_be64(uint64_t val, uint8_t *p)
 {
        __put_unaligned_be32(val >> 32, p);
        __put_unaligned_be32(val, p + 4);
 }
 
-static inline __u16 get_unaligned_be16(const void *p)
+static inline uint16_t get_unaligned_be16(const void *p)
 {
-       return __get_unaligned_be16((const __u8 *)p);
+       return __get_unaligned_be16((const uint8_t *)p);
 }
 
-static inline __u32 get_unaligned_be32(const void *p)
+static inline uint32_t get_unaligned_be32(const void *p)
 {
-       return __get_unaligned_be32((const __u8 *)p);
+       return __get_unaligned_be32((const uint8_t *)p);
 }
 
-static inline __u64 get_unaligned_be64(const void *p)
+static inline uint64_t get_unaligned_be64(const void *p)
 {
-       return __get_unaligned_be64((const __u8 *)p);
+       return __get_unaligned_be64((const uint8_t *)p);
 }
 
-static inline void put_unaligned_be16(__u16 val, void *p)
+static inline void put_unaligned_be16(uint16_t val, void *p)
 {
        __put_unaligned_be16(val, p);
 }
 
-static inline void put_unaligned_be32(__u32 val, void *p)
+static inline void put_unaligned_be32(uint32_t val, void *p)
 {
        __put_unaligned_be32(val, p);
 }
 
-static inline void put_unaligned_be64(__u64 val, void *p)
+static inline void put_unaligned_be64(uint64_t val, void *p)
 {
        __put_unaligned_be64(val, p);
 }
index c99d45a..8fe9f24 100644 (file)
@@ -1,68 +1,68 @@
 #ifndef _TOOLS_LE_BYTESHIFT_H
 #define _TOOLS_LE_BYTESHIFT_H
 
-#include <linux/types.h>
+#include <stdint.h>
 
-static inline __u16 __get_unaligned_le16(const __u8 *p)
+static inline uint16_t __get_unaligned_le16(const uint8_t *p)
 {
        return p[0] | p[1] << 8;
 }
 
-static inline __u32 __get_unaligned_le32(const __u8 *p)
+static inline uint32_t __get_unaligned_le32(const uint8_t *p)
 {
        return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
 }
 
-static inline __u64 __get_unaligned_le64(const __u8 *p)
+static inline uint64_t __get_unaligned_le64(const uint8_t *p)
 {
-       return (__u64)__get_unaligned_le32(p + 4) << 32 |
+       return (uint64_t)__get_unaligned_le32(p + 4) << 32 |
               __get_unaligned_le32(p);
 }
 
-static inline void __put_unaligned_le16(__u16 val, __u8 *p)
+static inline void __put_unaligned_le16(uint16_t val, uint8_t *p)
 {
        *p++ = val;
        *p++ = val >> 8;
 }
 
-static inline void __put_unaligned_le32(__u32 val, __u8 *p)
+static inline void __put_unaligned_le32(uint32_t val, uint8_t *p)
 {
        __put_unaligned_le16(val >> 16, p + 2);
        __put_unaligned_le16(val, p);
 }
 
-static inline void __put_unaligned_le64(__u64 val, __u8 *p)
+static inline void __put_unaligned_le64(uint64_t val, uint8_t *p)
 {
        __put_unaligned_le32(val >> 32, p + 4);
        __put_unaligned_le32(val, p);
 }
 
-static inline __u16 get_unaligned_le16(const void *p)
+static inline uint16_t get_unaligned_le16(const void *p)
 {
-       return __get_unaligned_le16((const __u8 *)p);
+       return __get_unaligned_le16((const uint8_t *)p);
 }
 
-static inline __u32 get_unaligned_le32(const void *p)
+static inline uint32_t get_unaligned_le32(const void *p)
 {
-       return __get_unaligned_le32((const __u8 *)p);
+       return __get_unaligned_le32((const uint8_t *)p);
 }
 
-static inline __u64 get_unaligned_le64(const void *p)
+static inline uint64_t get_unaligned_le64(const void *p)
 {
-       return __get_unaligned_le64((const __u8 *)p);
+       return __get_unaligned_le64((const uint8_t *)p);
 }
 
-static inline void put_unaligned_le16(__u16 val, void *p)
+static inline void put_unaligned_le16(uint16_t val, void *p)
 {
        __put_unaligned_le16(val, p);
 }
 
-static inline void put_unaligned_le32(__u32 val, void *p)
+static inline void put_unaligned_le32(uint32_t val, void *p)
 {
        __put_unaligned_le32(val, p);
 }
 
-static inline void put_unaligned_le64(__u64 val, void *p)
+static inline void put_unaligned_le64(uint64_t val, void *p)
 {
        __put_unaligned_le64(val, p);
 }
index 0ac3420..97bca48 100644 (file)
@@ -1,5 +1,4 @@
 # This creates the demonstration utility "lguest" which runs a Linux guest.
-# Missing headers?  Add "-I../../../include -I../../../arch/x86/include"
 CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE
 
 all: lguest
index 07a0345..68f67cf 100644 (file)
 #include <pwd.h>
 #include <grp.h>
 
-#include <linux/virtio_config.h>
-#include <linux/virtio_net.h>
-#include <linux/virtio_blk.h>
-#include <linux/virtio_console.h>
-#include <linux/virtio_rng.h>
-#include <linux/virtio_ring.h>
-#include <asm/bootparam.h>
-#include "../../include/linux/lguest_launcher.h"
 /*L:110
  * We can ignore the 43 include files we need for this program, but I do want
  * to draw attention to the use of kernel-style types.
@@ -65,6 +57,15 @@ typedef uint16_t u16;
 typedef uint8_t u8;
 /*:*/
 
+#include <linux/virtio_config.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_blk.h>
+#include <linux/virtio_console.h>
+#include <linux/virtio_rng.h>
+#include <linux/virtio_ring.h>
+#include <asm/bootparam.h>
+#include "../../include/linux/lguest_launcher.h"
+
 #define BRIDGE_PFX "bridge:"
 #ifndef SIOCBRADDIF
 #define SIOCBRADDIF    0x89a2          /* add interface to bridge      */
@@ -177,7 +178,8 @@ static struct termios orig_term;
  * in precise order.
  */
 #define wmb() __asm__ __volatile__("" : : : "memory")
-#define mb() __asm__ __volatile__("" : : : "memory")
+#define rmb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory")
+#define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory")
 
 /* Wrapper for the last available index.  Makes it easier to change. */
 #define lg_last_avail(vq)      ((vq)->last_avail_idx)
@@ -676,6 +678,12 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
                errx(1, "Guest moved used index from %u to %u",
                     last_avail, vq->vring.avail->idx);
 
+       /* 
+        * Make sure we read the descriptor number *after* we read the ring
+        * update; don't let the cpu or compiler change the order.
+        */
+       rmb();
+
        /*
         * Grab the next descriptor number they're advertising, and increment
         * the index we've seen.
@@ -694,6 +702,12 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
        desc = vq->vring.desc;
        i = head;
 
+       /*
+        * We have to read the descriptor after we read the descriptor number,
+        * but there's a data dependency there so the CPU shouldn't reorder
+        * that: no rmb() required.
+        */
+
        /*
         * If this is an indirect entry, then this buffer contains a descriptor
         * table which we handle as if it's any normal descriptor chain.
index 3039a7e..28ce95a 100644 (file)
@@ -1 +1,6 @@
 #include <linux/export.h>
+
+#define MODULE_LICENSE(__MODULE_LICENSE_value) \
+       static __attribute__((unused)) const char *__MODULE_LICENSE_name = \
+               __MODULE_LICENSE_value
+
index cd80183..8447830 100644 (file)
@@ -45,9 +45,6 @@ struct virtqueue {
        void *priv;
 };
 
-#define MODULE_LICENSE(__MODULE_LICENSE_value) \
-       const char *__MODULE_LICENSE_name = __MODULE_LICENSE_value
-
 /* Interfaces exported by virtio_ring. */
 int virtqueue_add_sgs(struct virtqueue *vq,
                      struct scatterlist *sgs[],