Merge branch 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/groec...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 20 Feb 2011 18:15:22 +0000 (10:15 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 20 Feb 2011 18:15:22 +0000 (10:15 -0800)
* 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck/staging:
  hwmon: (lm85) extend to support EMC6D103 chips
  MAINTAINERS: Remove stale hwmon quilt tree
  hwmon: (k10temp) add support for AMD Family 12h/14h CPUs
  hwmon: (jc42) do not allow writing to locked registers
  hwmon: (jc42) more helpful documentation
  hwmon: (jc42) fix type mismatch

104 files changed:
Documentation/networking/Makefile
Documentation/workqueue.txt
MAINTAINERS
arch/s390/crypto/sha_common.c
arch/sparc/include/asm/pcr.h
arch/sparc/kernel/iommu.c
arch/sparc/kernel/pcr.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/una_asm_32.S
arch/sparc/lib/bitext.c
drivers/atm/solos-pci.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/char/tpm/tpm.c
drivers/char/tpm/tpm.h
drivers/char/tpm/tpm_tis.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_pm.c
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv40_graph.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/r300.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/isdn/hisax/isdnl2.c
drivers/memstick/core/memstick.c
drivers/message/fusion/mptbase.h
drivers/message/fusion/mptctl.c
drivers/message/fusion/mptscsih.c
drivers/misc/tifm_core.c
drivers/misc/vmw_balloon.c
drivers/mtd/nand/r852.c
drivers/mtd/sm_ftl.c
drivers/net/can/mcp251x.c
drivers/net/can/softing/Kconfig
drivers/net/cxgb4vf/cxgb4vf_main.c
drivers/net/cxgb4vf/t4vf_hw.c
drivers/net/e1000e/netdev.c
drivers/net/forcedeth.c
drivers/net/ixgbe/ixgbe_fcoe.c
drivers/net/ixgbe/ixgbe_fcoe.h
drivers/net/ixgbe/ixgbe_main.c
drivers/net/pch_gbe/pch_gbe.h
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/r8169.c
drivers/net/stmmac/stmmac_main.c
drivers/net/tg3.c
drivers/net/usb/hso.c
drivers/net/usb/usbnet.c
drivers/net/wireless/iwlwifi/iwl-3945.c
drivers/rtc/Kconfig
drivers/rtc/interface.c
drivers/rtc/rtc-dev.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_debug.c
drivers/spi/pxa2xx_spi_pci.c
drivers/target/Makefile
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_iblock.c
drivers/target/target_core_mib.c [deleted file]
drivers/target/target_core_mib.h [deleted file]
drivers/target/target_core_pscsi.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/tty/serial/max3100.c
drivers/tty/serial/max3107.c
drivers/xen/manage.c
fs/block_dev.c
fs/gfs2/glock.c
fs/gfs2/main.c
fs/namei.c
fs/nfsd/nfs4xdr.c
fs/partitions/mac.c
include/linux/freezer.h
include/linux/list.h
include/linux/rtc.h
include/linux/sched.h
include/linux/workqueue.h
include/target/target_core_base.h
include/target/target_core_transport.h
kernel/power/main.c
kernel/power/process.c
kernel/power/snapshot.c
kernel/workqueue.c
lib/list_debug.c
net/bluetooth/l2cap.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_private.h
net/core/dev.c
net/dcb/dcbnl.c
net/ipv4/devinet.c
net/ipv4/ip_gre.c
net/ipv4/route.c
net/ipv6/route.c
net/mac80211/util.c
net/netfilter/core.c
net/xfrm/xfrm_policy.c

index 5aba7a3..24c308d 100644 (file)
@@ -4,6 +4,8 @@ obj- := dummy.o
 # List of programs to build
 hostprogs-y := ifenslave
 
+HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include
+
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
 
index 996a27d..01c513f 100644 (file)
@@ -190,9 +190,9 @@ resources, scheduled and executed.
        * Long running CPU intensive workloads which can be better
          managed by the system scheduler.
 
-  WQ_FREEZEABLE
+  WQ_FREEZABLE
 
-       A freezeable wq participates in the freeze phase of the system
+       A freezable wq participates in the freeze phase of the system
        suspend operations.  Work items on the wq are drained and no
        new work item starts execution until thawed.
 
index c0ae392..6f99e12 100644 (file)
@@ -885,7 +885,7 @@ S:  Supported
 
 ARM/QUALCOMM MSM MACHINE SUPPORT
 M:     David Brown <davidb@codeaurora.org>
-M:     Daniel Walker <dwalker@codeaurora.org>
+M:     Daniel Walker <dwalker@fifo99.com>
 M:     Bryan Huntsman <bryanh@codeaurora.org>
 L:     linux-arm-msm@vger.kernel.org
 F:     arch/arm/mach-msm/
index f42dbab..48884f8 100644 (file)
@@ -38,6 +38,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
                BUG_ON(ret != bsize);
                data += bsize - index;
                len -= bsize - index;
+               index = 0;
        }
 
        /* process as many blocks as possible */
index a2f5c61..843e4fa 100644 (file)
@@ -43,4 +43,6 @@ static inline u64 picl_value(unsigned int nmi_hz)
 
 extern u64 pcr_enable;
 
+extern int pcr_arch_init(void);
+
 #endif /* __PCR_H */
index 47977a7..72509d0 100644 (file)
@@ -255,10 +255,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
 static int iommu_alloc_ctx(struct iommu *iommu)
 {
        int lowest = iommu->ctx_lowest_free;
-       int sz = IOMMU_NUM_CTXS - lowest;
-       int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
+       int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
 
-       if (unlikely(n == sz)) {
+       if (unlikely(n == IOMMU_NUM_CTXS)) {
                n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
                if (unlikely(n == lowest)) {
                        printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
index ae96cf5..7c2ced6 100644 (file)
@@ -167,5 +167,3 @@ out_unregister:
        unregister_perf_hsvc();
        return err;
 }
-
-early_initcall(pcr_arch_init);
index b6a2b8f..555a76d 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/mdesc.h>
 #include <asm/ldc.h>
 #include <asm/hypervisor.h>
+#include <asm/pcr.h>
 
 #include "cpumap.h"
 
@@ -1358,6 +1359,7 @@ void __cpu_die(unsigned int cpu)
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
+       pcr_arch_init();
 }
 
 void smp_send_reschedule(int cpu)
index 8cc0345..8f096e8 100644 (file)
@@ -24,9 +24,9 @@ retl_efault:
        .globl  __do_int_store
 __do_int_store:
        ld      [%o2], %g1
-       cmp     %1, 2
+       cmp     %o1, 2
        be      2f
-        cmp    %1, 4
+        cmp    %o1, 4
        be      1f
         srl    %g1, 24, %g2
        srl     %g1, 16, %g7
index 764b3eb..48d00e7 100644 (file)
@@ -10,7 +10,7 @@
  */
 
 #include <linux/string.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
 
 #include <asm/bitext.h>
 
@@ -80,8 +80,7 @@ int bit_map_string_get(struct bit_map *t, int len, int align)
                while (test_bit(offset + i, t->map) == 0) {
                        i++;
                        if (i == len) {
-                               for (i = 0; i < len; i++)
-                                       __set_bit(offset + i, t->map);
+                               bitmap_set(t->map, offset, len);
                                if (offset == t->first_free)
                                        t->first_free = find_next_zero_bit
                                                        (t->map, t->size,
index 73fb1c4..25ef1a4 100644 (file)
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc)
        }
 
        skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
-       if (!skb && net_ratelimit()) {
-               dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
+       if (!skb) {
+               if (net_ratelimit())
+                       dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
                return -ENOMEM;
        }
        header = (void *)skb_put(skb, sizeof(*header));
index a126e61..333c212 100644 (file)
@@ -39,6 +39,8 @@ static struct usb_device_id ath3k_table[] = {
        /* Atheros AR3011 with sflash firmware*/
        { USB_DEVICE(0x0CF3, 0x3002) },
 
+       /* Atheros AR9285 Malbec with sflash firmware */
+       { USB_DEVICE(0x03F0, 0x311D) },
        { }     /* Terminating entry */
 };
 
index 1da773f..4cefa91 100644 (file)
@@ -102,6 +102,9 @@ static struct usb_device_id blacklist_table[] = {
        /* Atheros 3011 with sflash firmware */
        { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
 
+       /* Atheros AR9285 Malbec with sflash firmware */
+       { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+
        /* Broadcom BCM2035 */
        { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
        { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
index faf5a2c..36e0fa1 100644 (file)
@@ -577,11 +577,9 @@ duration:
        if (rc)
                return;
 
-       if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
-           be32_to_cpu(tpm_cmd.header.out.length)
-           != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
+       if (be32_to_cpu(tpm_cmd.header.out.return_code)
+           != 3 * sizeof(u32))
                return;
-
        duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
        chip->vendor.duration[TPM_SHORT] =
            usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
@@ -941,18 +939,6 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
 }
 EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
 
-ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct tpm_chip *chip = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%d %d %d\n",
-                      jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
-                      jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
-                      jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
-}
-EXPORT_SYMBOL_GPL(tpm_show_timeouts);
-
 ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
index d84ff77..72ddb03 100644 (file)
@@ -56,8 +56,6 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
                                char *);
 extern ssize_t tpm_show_temp_deactivated(struct device *,
                                         struct device_attribute *attr, char *);
-extern ssize_t tpm_show_timeouts(struct device *,
-                                struct device_attribute *attr, char *);
 
 struct tpm_chip;
 
index 0d1d38e..dd21df5 100644 (file)
@@ -376,7 +376,6 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
                   NULL);
 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
 
 static struct attribute *tis_attrs[] = {
        &dev_attr_pubek.attr,
@@ -386,8 +385,7 @@ static struct attribute *tis_attrs[] = {
        &dev_attr_owned.attr,
        &dev_attr_temp_deactivated.attr,
        &dev_attr_caps.attr,
-       &dev_attr_cancel.attr,
-       &dev_attr_timeouts.attr, NULL,
+       &dev_attr_cancel.attr, NULL,
 };
 
 static struct attribute_group tis_attr_grp = {
index 49e5e99..6bdab89 100644 (file)
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
                entry->tvconf.has_component_output = false;
                break;
        case OUTPUT_LVDS:
-               if ((conn & 0x00003f00) != 0x10)
+               if ((conn & 0x00003f00) >> 8 != 0x10)
                        entry->lvdsconf.use_straps_for_mode = true;
                entry->lvdsconf.use_power_scripts = true;
                break;
index a7fae26..d38a4d9 100644 (file)
@@ -128,6 +128,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
                }
        }
 
+       nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
        nouveau_bo_placement_set(nvbo, flags, 0);
 
        nvbo->channel = chan;
@@ -166,17 +167,17 @@ static void
 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 {
        struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+       int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
 
        if (dev_priv->card_type == NV_10 &&
-           nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
+           nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+           nvbo->bo.mem.num_pages < vram_pages / 2) {
                /*
                 * Make sure that the color and depth buffers are handled
                 * by independent memory controller units. Up to a 9x
                 * speed up when alpha-blending and depth-test are enabled
                 * at the same time.
                 */
-               int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
-
                if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
                        nvbo->placement.fpfn = vram_pages / 2;
                        nvbo->placement.lpfn = ~0;
@@ -785,7 +786,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                goto out;
 
-       ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+       ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
 out:
        ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
@@ -811,11 +812,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
+       ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
        if (ret)
                goto out;
 
index a21e000..390d82c 100644 (file)
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
        int high_w = 0, high_h = 0, high_v = 0;
 
        list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+               mode->vrefresh = drm_mode_vrefresh(mode);
                if (helper->mode_valid(connector, mode) != MODE_OK ||
                    (mode->flags & DRM_MODE_FLAG_INTERLACE))
                        continue;
index f05c0cd..4399e2f 100644 (file)
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev)
        struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
        struct nouveau_pm_level *perflvl;
 
-       if (pm->cur == &pm->boot)
+       if (!pm->cur || pm->cur == &pm->boot)
                return;
 
        perflvl = pm->cur;
index ef23550..c82db37 100644 (file)
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
        if (nv_encoder->dcb->type == OUTPUT_LVDS) {
                bool duallink, dummy;
 
-               nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
-                                             clock, &duallink, &dummy);
+               nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+                                             &duallink, &dummy);
                if (duallink)
                        regp->fp_control |= (8 << 28);
        } else
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
                return;
 
        if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
-               struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
-
                /* when removing an output, crtc may not be set, but PANEL_OFF
                 * must still be run
                 */
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
                           nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
 
                if (mode == DRM_MODE_DPMS_ON) {
-                       if (!nv_connector->native_mode) {
-                               NV_ERROR(dev, "Not turning on LVDS without native mode\n");
-                               return;
-                       }
                        call_lvds_script(dev, nv_encoder->dcb, head,
-                                        LVDS_PANEL_ON, nv_connector->native_mode->clock);
+                                        LVDS_PANEL_ON, nv_encoder->mode.clock);
                } else
                        /* pxclk of 0 is fine for PANEL_OFF, and for a
                         * disconnected LVDS encoder there is no native_mode
index 8870d72..18d30c2 100644 (file)
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
        struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
        switch (dev_priv->chipset) {
+       case 0x40:
+       case 0x41: /* guess */
+       case 0x42:
+       case 0x43:
+       case 0x45: /* guess */
+       case 0x4e:
+               nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+               nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+               nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+               nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+               nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+               nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
+               break;
        case 0x44:
        case 0x4a:
-       case 0x4e:
                nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
                nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
                nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
                break;
-
        case 0x46:
        case 0x47:
        case 0x49:
        case 0x4b:
+       case 0x4c:
+       case 0x67:
+       default:
                nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
                nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
                nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
                nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
                nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
                break;
-
-       default:
-               nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
-               nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
-               nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
-               nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
-               nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
-               nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
-               break;
        }
 }
 
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev)
                break;
        default:
                switch (dev_priv->chipset) {
-               case 0x46:
-               case 0x47:
-               case 0x49:
-               case 0x4b:
-                       nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
-                       nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
-                       break;
-               default:
+               case 0x41:
+               case 0x42:
+               case 0x43:
+               case 0x45:
+               case 0x4e:
+               case 0x44:
+               case 0x4a:
                        nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
                        nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
                        break;
+               default:
+                       nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+                       nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+                       break;
                }
                nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
                nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
index 095bc50..a4e5e53 100644 (file)
@@ -557,9 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
 
                        /* use recommended ref_div for ss */
                        if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                               pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
                                if (ss_enabled) {
                                        if (ss->refdiv) {
+                                               pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
                                                pll->flags |= RADEON_PLL_USE_REF_DIV;
                                                pll->reference_div = ss->refdiv;
                                                if (ASIC_IS_AVIVO(rdev))
@@ -662,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                                   index, (uint32_t *)&args);
                                adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
                                if (args.v3.sOutput.ucRefDiv) {
+                                       pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                                        pll->flags |= RADEON_PLL_USE_REF_DIV;
                                        pll->reference_div = args.v3.sOutput.ucRefDiv;
                                }
                                if (args.v3.sOutput.ucPostDiv) {
+                                       pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                                        pll->flags |= RADEON_PLL_USE_POST_DIV;
                                        pll->post_div = args.v3.sOutput.ucPostDiv;
                                }
index 768c60e..069efa8 100644 (file)
@@ -910,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        track->textures[i].compress_format = R100_TRACK_COMP_NONE;
                        break;
                case R300_TX_FORMAT_X16:
+               case R300_TX_FORMAT_FL_I16:
                case R300_TX_FORMAT_Y8X8:
                case R300_TX_FORMAT_Z5Y6X5:
                case R300_TX_FORMAT_Z6Y5X5:
@@ -922,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        track->textures[i].compress_format = R100_TRACK_COMP_NONE;
                        break;
                case R300_TX_FORMAT_Y16X16:
+               case R300_TX_FORMAT_FL_I16A16:
                case R300_TX_FORMAT_Z11Y11X10:
                case R300_TX_FORMAT_Z10Y11X11:
                case R300_TX_FORMAT_W8Z8Y8X8:
index 8b606fd..08c1948 100644 (file)
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                                        netif_carrier_on(nesvnic->netdev);
 
                                        spin_lock(&nesvnic->port_ibevent_lock);
-                                       if (nesdev->iw_status == 0) {
-                                               nesdev->iw_status = 1;
-                                               nes_port_ibevent(nesvnic);
+                                       if (nesvnic->of_device_registered) {
+                                               if (nesdev->iw_status == 0) {
+                                                       nesdev->iw_status = 1;
+                                                       nes_port_ibevent(nesvnic);
+                                               }
                                        }
                                        spin_unlock(&nesvnic->port_ibevent_lock);
                                }
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                                        netif_carrier_off(nesvnic->netdev);
 
                                        spin_lock(&nesvnic->port_ibevent_lock);
-                                       if (nesdev->iw_status == 1) {
-                                               nesdev->iw_status = 0;
-                                               nes_port_ibevent(nesvnic);
+                                       if (nesvnic->of_device_registered) {
+                                               if (nesdev->iw_status == 1) {
+                                                       nesdev->iw_status = 0;
+                                                       nes_port_ibevent(nesvnic);
+                                               }
                                        }
                                        spin_unlock(&nesvnic->port_ibevent_lock);
                                }
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work)
                                netif_carrier_on(nesvnic->netdev);
 
                                spin_lock(&nesvnic->port_ibevent_lock);
-                               if (nesdev->iw_status == 0) {
-                                       nesdev->iw_status = 1;
-                                       nes_port_ibevent(nesvnic);
+                               if (nesvnic->of_device_registered) {
+                                       if (nesdev->iw_status == 0) {
+                                               nesdev->iw_status = 1;
+                                               nes_port_ibevent(nesvnic);
+                                       }
                                }
                                spin_unlock(&nesvnic->port_ibevent_lock);
                        }
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work)
                                netif_carrier_off(nesvnic->netdev);
 
                                spin_lock(&nesvnic->port_ibevent_lock);
-                               if (nesdev->iw_status == 1) {
-                                       nesdev->iw_status = 0;
-                                       nes_port_ibevent(nesvnic);
+                               if (nesvnic->of_device_registered) {
+                                       if (nesdev->iw_status == 1) {
+                                               nesdev->iw_status = 0;
+                                               nes_port_ibevent(nesvnic);
+                                       }
                                }
                                spin_unlock(&nesvnic->port_ibevent_lock);
                        }
index 8245237..eca0c41 100644 (file)
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
         * there are still requests that haven't been acked.
         */
        if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
-           !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+           !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
+           (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
                start_timer(qp);
 
        while (qp->s_last != qp->s_acked) {
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
        }
 
        spin_lock_irqsave(&qp->s_lock, flags);
+       if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+               goto ack_done;
 
        /* Ignore invalid responses. */
        if (qib_cmp24(psn, qp->s_next_psn) >= 0)
index 0858791..cfff0c4 100644 (file)
@@ -1247,10 +1247,10 @@ static void
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
        struct PStack *st = fi->userdata;
-       struct sk_buff *skb, *oskb;
+       struct sk_buff *skb;
        struct Layer2 *l2 = &st->l2;
        u_char header[MAX_HEADER_LEN];
-       int i;
+       int i, hdr_space_needed;
        int unsigned p1;
        u_long flags;
 
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
        if (!skb)
                return;
 
+       hdr_space_needed = l2headersize(l2, 0);
+       if (hdr_space_needed > skb_headroom(skb)) {
+               struct sk_buff *orig_skb = skb;
+
+               skb = skb_realloc_headroom(skb, hdr_space_needed);
+               if (!skb) {
+                       dev_kfree_skb(orig_skb);
+                       return;
+               }
+       }
        spin_lock_irqsave(&l2->lock, flags);
        if(test_bit(FLG_MOD128, &l2->flag))
                p1 = (l2->vs - l2->va) % 128;
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
                l2->vs = (l2->vs + 1) % 8;
        }
        spin_unlock_irqrestore(&l2->lock, flags);
-       p1 = skb->data - skb->head;
-       if (p1 >= i)
-               memcpy(skb_push(skb, i), header, i);
-       else {
-               printk(KERN_WARNING
-               "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
-               oskb = skb;
-               skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
-               memcpy(skb_put(skb, i), header, i);
-               skb_copy_from_linear_data(oskb,
-                                         skb_put(skb, oskb->len), oskb->len);
-               dev_kfree_skb(oskb);
-       }
+       memcpy(skb_push(skb, i), header, i);
        st->l2.l2l1(st, PH_PULL | INDICATION, skb);
        test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
        if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
index e9a3eab..8c1d85e 100644 (file)
@@ -621,7 +621,7 @@ static int __init memstick_init(void)
 {
        int rc;
 
-       workqueue = create_freezeable_workqueue("kmemstick");
+       workqueue = create_freezable_workqueue("kmemstick");
        if (!workqueue)
                return -ENOMEM;
 
index f71f229..1735c84 100644 (file)
@@ -76,8 +76,8 @@
 #define COPYRIGHT      "Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON       "3.04.17"
-#define MPT_LINUX_PACKAGE_NAME         "@(#)mptlinux-3.04.17"
+#define MPT_LINUX_VERSION_COMMON       "3.04.18"
+#define MPT_LINUX_PACKAGE_NAME         "@(#)mptlinux-3.04.18"
 #define WHAT_MAGIC_STRING              "@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
index a3856ed..e8deb8e 100644 (file)
@@ -596,6 +596,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
        return 1;
 }
 
+static int
+mptctl_release(struct inode *inode, struct file *filep)
+{
+       fasync_helper(-1, filep, 0, &async_queue);
+       return 0;
+}
+
 static int
 mptctl_fasync(int fd, struct file *filep, int mode)
 {
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = {
        .llseek =       no_llseek,
        .fasync =       mptctl_fasync,
        .unlocked_ioctl = mptctl_ioctl,
+       .release =      mptctl_release,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = compat_mpctl_ioctl,
 #endif
index 59b8f53..0d9b82a 100644 (file)
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
        }
 
  out:
-       printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
-           ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
+       printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
+           ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
+           SCpnt, SCpnt->serial_number);
 
        return retval;
 }
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
 
        vdevice = SCpnt->device->hostdata;
        if (!vdevice || !vdevice->vtarget) {
-               retval = SUCCESS;
+               retval = 0;
                goto out;
        }
 
index 5f6852d..44d4475 100644 (file)
@@ -329,7 +329,7 @@ static int __init tifm_init(void)
 {
        int rc;
 
-       workqueue = create_freezeable_workqueue("tifm");
+       workqueue = create_freezable_workqueue("tifm");
        if (!workqueue)
                return -ENOMEM;
 
index 4d2ea8e..6df5a55 100644 (file)
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void)
        if (x86_hyper != &x86_hyper_vmware)
                return -ENODEV;
 
-       vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+       vmballoon_wq = create_freezable_workqueue("vmmemctl");
        if (!vmballoon_wq) {
                pr_err("failed to create workqueue\n");
                return -ENOMEM;
index d9d7efb..6322d1f 100644 (file)
@@ -930,7 +930,7 @@ int  r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
 
        init_completion(&dev->dma_done);
 
-       dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+       dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
 
        if (!dev->card_workqueue)
                goto error9;
index 67822cf..ac0d6a8 100644 (file)
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
 static __init int sm_module_init(void)
 {
        int error = 0;
-       cache_flush_workqueue = create_freezeable_workqueue("smflush");
+       cache_flush_workqueue = create_freezable_workqueue("smflush");
 
        if (IS_ERR(cache_flush_workqueue))
                return PTR_ERR(cache_flush_workqueue);
index 7ab534a..7513c45 100644 (file)
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
                goto open_unlock;
        }
 
-       priv->wq = create_freezeable_workqueue("mcp251x_wq");
+       priv->wq = create_freezable_workqueue("mcp251x_wq");
        INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
        INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
 
index 8ba81b3..5de46a9 100644 (file)
@@ -18,7 +18,7 @@ config CAN_SOFTING
 config CAN_SOFTING_CS
        tristate "Softing Gmbh CAN pcmcia cards"
        depends on PCMCIA
-       select CAN_SOFTING
+       depends on CAN_SOFTING
        ---help---
          Support for PCMCIA cards from Softing Gmbh & some cards
          from Vector Gmbh.
index 56166ae..6aad64d 100644 (file)
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
 {
        int i;
 
-       BUG_ON(adapter->debugfs_root == NULL);
+       BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
 
        /*
         * Debugfs support is best effort.
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
  */
 static void cleanup_debugfs(struct adapter *adapter)
 {
-       BUG_ON(adapter->debugfs_root == NULL);
+       BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
 
        /*
         * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2488,17 +2488,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
        struct port_info *pi;
        struct net_device *netdev;
 
-       /*
-        * Vet our module parameters.
-        */
-       if (msi != MSI_MSIX && msi != MSI_MSI) {
-               dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
-                       " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
-                       MSI_MSI);
-               err = -EINVAL;
-               goto err_out;
-       }
-
        /*
         * Print our driver banner the first time we're called to initialize a
         * device.
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
        /*
         * Set up our debugfs entries.
         */
-       if (cxgb4vf_debugfs_root) {
+       if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
                adapter->debugfs_root =
                        debugfs_create_dir(pci_name(pdev),
                                           cxgb4vf_debugfs_root);
-               if (adapter->debugfs_root == NULL)
+               if (IS_ERR_OR_NULL(adapter->debugfs_root))
                        dev_warn(&pdev->dev, "could not create debugfs"
                                 " directory");
                else
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
         */
 
 err_free_debugfs:
-       if (adapter->debugfs_root) {
+       if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
                cleanup_debugfs(adapter);
                debugfs_remove_recursive(adapter->debugfs_root);
        }
@@ -2802,7 +2791,6 @@ err_release_regions:
 err_disable_device:
        pci_disable_device(pdev);
 
-err_out:
        return err;
 }
 
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
                /*
                 * Tear down our debugfs entries.
                 */
-               if (adapter->debugfs_root) {
+               if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
                        cleanup_debugfs(adapter);
                        debugfs_remove_recursive(adapter->debugfs_root);
                }
@@ -2873,6 +2861,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
        pci_release_regions(pdev);
 }
 
+/*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+       struct adapter *adapter;
+       int pidx;
+
+       adapter = pci_get_drvdata(pdev);
+       if (!adapter)
+               return;
+
+       /*
+        * Disable all Virtual Interfaces.  This will shut down the
+        * delivery of all ingress packets into the chip for these
+        * Virtual Interfaces.
+        */
+       for_each_port(adapter, pidx) {
+               struct net_device *netdev;
+               struct port_info *pi;
+
+               if (!test_bit(pidx, &adapter->registered_device_map))
+                       continue;
+
+               netdev = adapter->port[pidx];
+               if (!netdev)
+                       continue;
+
+               pi = netdev_priv(netdev);
+               t4vf_enable_vi(adapter, pi->viid, false, false);
+       }
+
+       /*
+        * Free up all Queues which will prevent further DMA and
+        * Interrupts allowing various internal pathways to drain.
+        */
+       t4vf_free_sge_resources(adapter);
+}
+
 /*
  * PCI Device registration data structures.
  */
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
        .id_table       = cxgb4vf_pci_tbl,
        .probe          = cxgb4vf_pci_probe,
        .remove         = __devexit_p(cxgb4vf_pci_remove),
+       .shutdown       = __devexit_p(cxgb4vf_pci_shutdown),
 };
 
 /*
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
 {
        int ret;
 
+       /*
+        * Vet our module parameters.
+        */
+       if (msi != MSI_MSIX && msi != MSI_MSI) {
+               printk(KERN_WARNING KBUILD_MODNAME
+                      ": bad module parameter msi=%d; must be %d"
+                      " (MSI-X or MSI) or %d (MSI)\n",
+                      msi, MSI_MSIX, MSI_MSI);
+               return -EINVAL;
+       }
+
        /* Debugfs support is optional, just warn if this fails */
        cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
-       if (!cxgb4vf_debugfs_root)
+       if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
                printk(KERN_WARNING KBUILD_MODNAME ": could not create"
                       " debugfs entry, continuing\n");
 
        ret = pci_register_driver(&cxgb4vf_driver);
-       if (ret < 0)
+       if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
                debugfs_remove(cxgb4vf_debugfs_root);
        return ret;
 }
index 0f51c80..192db22 100644 (file)
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
        delay_idx = 0;
        ms = delay[0];
 
-       for (i = 0; i < 500; i += ms) {
+       for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
                if (sleep_ok) {
                        ms = delay[delay_idx];
                        if (delay_idx < ARRAY_SIZE(delay) - 1)
index 3065870..3fa110d 100644 (file)
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
        u16 phy_status, phy_1000t_status, phy_ext_status;
        u16 pci_status;
 
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        e1e_rphy(hw, PHY_STATUS, &phy_status);
        e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
        e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
        struct e1000_adapter *adapter = container_of(work,
                                        struct e1000_adapter, downshift_task);
 
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
 }
 
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter)
        return 0;
 }
 
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+
+       if (!(adapter->flags2 & FLAG2_DMA_BURST))
+               return;
+
+       /* flush pending descriptor writebacks to memory */
+       ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+       ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+       /* execute the writes immediately */
+       e1e_flush();
+}
+
 void e1000e_down(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter)
 
        if (!pci_channel_offline(adapter->pdev))
                e1000e_reset(adapter);
+
+       e1000e_flush_descriptors(adapter);
+
        e1000_clean_tx_ring(adapter);
        e1000_clean_rx_ring(adapter);
 
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
 {
        struct e1000_adapter *adapter = container_of(work,
                                        struct e1000_adapter, update_phy_task);
+
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        e1000_get_phy_info(&adapter->hw);
 }
 
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
 static void e1000_update_phy_info(unsigned long data)
 {
        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        schedule_work(&adapter->update_phy_task);
 }
 
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work)
        u32 link, tctl;
        int tx_pending = 0;
 
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        link = e1000e_has_link(adapter);
        if ((netif_carrier_ok(netdev)) && link) {
                /* Cancel scheduled suspend requests. */
@@ -4337,19 +4372,12 @@ link_up:
        else
                ew32(ICS, E1000_ICS_RXDMT0);
 
+       /* flush pending descriptors to memory before detecting Tx hang */
+       e1000e_flush_descriptors(adapter);
+
        /* Force detection of hung controller every watchdog period */
        adapter->detect_tx_hung = 1;
 
-       /* flush partial descriptors to memory before detecting Tx hang */
-       if (adapter->flags2 & FLAG2_DMA_BURST) {
-               ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
-               ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
-               /*
-                * no need to flush the writes because the timeout code does
-                * an er32 first thing
-                */
-       }
-
        /*
         * With 82571 controllers, LAA may be overwritten due to controller
         * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4887,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work)
        struct e1000_adapter *adapter;
        adapter = container_of(work, struct e1000_adapter, reset_task);
 
+       /* don't run the task if already down */
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
              (adapter->flags & FLAG_RX_RESTART_NOW))) {
                e1000e_dump(adapter);
index af09296..9c0b1ba 100644 (file)
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                goto out_error;
        }
 
+       netif_carrier_off(dev);
+
        dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
                 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
index 8753980..c54a882 100644 (file)
@@ -159,7 +159,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        struct scatterlist *sg;
        unsigned int i, j, dmacount;
        unsigned int len;
-       static const unsigned int bufflen = 4096;
+       static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
        unsigned int firstoff = 0;
        unsigned int lastsize;
        unsigned int thisoff = 0;
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        /* only the last buffer may have non-full bufflen */
        lastsize = thisoff + thislen;
 
+       /*
+        * lastsize can not be buffer len.
+        * If it is then adding another buffer with lastsize = 1.
+        */
+       if (lastsize == bufflen) {
+               if (j >= IXGBE_BUFFCNT_MAX) {
+                       e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+                               "not enough user buffers. We need an extra "
+                               "buffer because lastsize is bufflen.\n",
+                               xid, i, j, dmacount, (u64)addr);
+                       goto out_noddp_free;
+               }
+
+               ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
+               j++;
+               lastsize = 1;
+       }
+
        fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
        fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
        fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
                        e_err(drv, "failed to allocated FCoE DDP pool\n");
 
                spin_lock_init(&fcoe->lock);
+
+               /* Extra buffer to be shared by all DDPs for HW work around */
+               fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+               if (fcoe->extra_ddp_buffer == NULL) {
+                       e_err(drv, "failed to allocated extra DDP buffer\n");
+                       goto out_extra_ddp_buffer_alloc;
+               }
+
+               fcoe->extra_ddp_buffer_dma =
+                       dma_map_single(&adapter->pdev->dev,
+                                      fcoe->extra_ddp_buffer,
+                                      IXGBE_FCBUFF_MIN,
+                                      DMA_FROM_DEVICE);
+               if (dma_mapping_error(&adapter->pdev->dev,
+                                     fcoe->extra_ddp_buffer_dma)) {
+                       e_err(drv, "failed to map extra DDP buffer\n");
+                       goto out_extra_ddp_buffer_dma;
+               }
        }
 
        /* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
                }
        }
 #endif
+
+       return;
+
+out_extra_ddp_buffer_dma:
+       kfree(fcoe->extra_ddp_buffer);
+out_extra_ddp_buffer_alloc:
+       pci_pool_destroy(fcoe->pool);
+       fcoe->pool = NULL;
 }
 
 /**
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
        if (fcoe->pool) {
                for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
                        ixgbe_fcoe_ddp_put(adapter->netdev, i);
+               dma_unmap_single(&adapter->pdev->dev,
+                                fcoe->extra_ddp_buffer_dma,
+                                IXGBE_FCBUFF_MIN,
+                                DMA_FROM_DEVICE);
+               kfree(fcoe->extra_ddp_buffer);
                pci_pool_destroy(fcoe->pool);
                fcoe->pool = NULL;
        }
index 4bc2c55..65cc8fb 100644 (file)
@@ -70,6 +70,8 @@ struct ixgbe_fcoe {
        spinlock_t lock;
        struct pci_pool *pool;
        struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+       unsigned char *extra_ddp_buffer;
+       dma_addr_t extra_ddp_buffer_dma;
 };
 
 #endif /* _IXGBE_FCOE_H */
index fbae703..30f9ccf 100644 (file)
@@ -3728,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
                         * We need to try and force an autonegotiation
                         * session, then bring up link.
                         */
-                       hw->mac.ops.setup_sfp(hw);
+                       if (hw->mac.ops.setup_sfp)
+                               hw->mac.ops.setup_sfp(hw);
                        if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
                                schedule_work(&adapter->multispeed_fiber_task);
                } else {
@@ -5968,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
                unregister_netdev(adapter->netdev);
                return;
        }
-       hw->mac.ops.setup_sfp(hw);
+       if (hw->mac.ops.setup_sfp)
+               hw->mac.ops.setup_sfp(hw);
 
        if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
                /* This will also work for DA Twinax connections */
index a0c26a9..e1e33c8 100644 (file)
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
        struct pch_gbe_regs_mac_adr mac_adr[16];
        u32 ADDR_MASK;
        u32 MIIM;
-       u32 reserve2;
+       u32 MAC_ADDR_LOAD;
        u32 RGMII_ST;
        u32 RGMII_CTRL;
        u32 reserve3[3];
index 4c9a7d4..b99e90a 100644 (file)
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PCH_GBE_SHORT_PKT              64
 #define DSC_INIT16                     0xC000
 #define PCH_GBE_DMA_ALIGN              0
+#define PCH_GBE_DMA_PADDING            2
 #define PCH_GBE_WATCHDOG_PERIOD                (1 * HZ)        /* watchdog time */
 #define PCH_GBE_COPYBREAK_DEFAULT      256
 #define PCH_GBE_PCI_BAR                        1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
                               int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+       iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
 /**
  * pch_gbe_mac_read_mac_addr - Read MAC address
  * @hw:                    Pointer to the HW structure
@@ -1365,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
        struct pch_gbe_buffer *buffer_info;
        struct pch_gbe_rx_desc *rx_desc;
        u32 length;
-       unsigned char tmp_packet[ETH_HLEN];
        unsigned int i;
        unsigned int cleaned_count = 0;
        bool cleaned = false;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *new_skb;
        u8 dma_status;
        u16 gbec_status;
        u32 tcp_ip_status;
-       u8 skb_copy_flag = 0;
-       u8 skb_padding_flag = 0;
 
        i = rx_ring->next_to_clean;
 
@@ -1418,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
                        pr_err("Receive CRC Error\n");
                } else {
                        /* get receive length */
-                       /* length convert[-3], padding[-2] */
-                       length = (rx_desc->rx_words_eob) - 3 - 2;
+                       /* length convert[-3] */
+                       length = (rx_desc->rx_words_eob) - 3;
 
                        /* Decide the data conversion method */
                        if (!adapter->rx_csum) {
                                /* [Header:14][payload] */
-                               skb_padding_flag = 0;
-                               skb_copy_flag = 1;
+                               if (NET_IP_ALIGN) {
+                                       /* Because alignment differs,
+                                        * the new_skb is newly allocated,
+                                        * and data is copied to new_skb.*/
+                                       new_skb = netdev_alloc_skb(netdev,
+                                                        length + NET_IP_ALIGN);
+                                       if (!new_skb) {
+                                               /* dorrop error */
+                                               pr_err("New skb allocation "
+                                                       "Error\n");
+                                               goto dorrop;
+                                       }
+                                       skb_reserve(new_skb, NET_IP_ALIGN);
+                                       memcpy(new_skb->data, skb->data,
+                                              length);
+                                       skb = new_skb;
+                               } else {
+                                       /* DMA buffer is used as SKB as it is.*/
+                                       buffer_info->skb = NULL;
+                               }
                        } else {
                                /* [Header:14][padding:2][payload] */
-                               skb_padding_flag = 1;
-                               if (length < copybreak)
-                                       skb_copy_flag = 1;
-                               else
-                                       skb_copy_flag = 0;
-                       }
-
-                       /* Data conversion */
-                       if (skb_copy_flag) {    /* recycle  skb */
-                               struct sk_buff *new_skb;
-                               new_skb =
-                                   netdev_alloc_skb(netdev,
-                                                    length + NET_IP_ALIGN);
-                               if (new_skb) {
-                                       if (!skb_padding_flag) {
-                                               skb_reserve(new_skb,
-                                                               NET_IP_ALIGN);
+                               /* The length includes padding length */
+                               length = length - PCH_GBE_DMA_PADDING;
+                               if ((length < copybreak) ||
+                                   (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
+                                       /* Because alignment differs,
+                                        * the new_skb is newly allocated,
+                                        * and data is copied to new_skb.
+                                        * Padding data is deleted
+                                        * at the time of a copy.*/
+                                       new_skb = netdev_alloc_skb(netdev,
+                                                        length + NET_IP_ALIGN);
+                                       if (!new_skb) {
+                                               /* dorrop error */
+                                               pr_err("New skb allocation "
+                                                       "Error\n");
+                                               goto dorrop;
                                        }
+                                       skb_reserve(new_skb, NET_IP_ALIGN);
                                        memcpy(new_skb->data, skb->data,
-                                               length);
-                                       /* save the skb
-                                        * in buffer_info as good */
+                                              ETH_HLEN);
+                                       memcpy(&new_skb->data[ETH_HLEN],
+                                              &skb->data[ETH_HLEN +
+                                              PCH_GBE_DMA_PADDING],
+                                              length - ETH_HLEN);
                                        skb = new_skb;
-                               } else if (!skb_padding_flag) {
-                                       /* dorrop error */
-                                       pr_err("New skb allocation Error\n");
-                                       goto dorrop;
+                               } else {
+                                       /* Padding data is deleted
+                                        * by moving header data.*/
+                                       memmove(&skb->data[PCH_GBE_DMA_PADDING],
+                                               &skb->data[0], ETH_HLEN);
+                                       skb_reserve(skb, NET_IP_ALIGN);
+                                       buffer_info->skb = NULL;
                                }
-                       } else {
-                               buffer_info->skb = NULL;
                        }
-                       if (skb_padding_flag) {
-                               memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
-                               memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
-                                       ETH_HLEN);
-                               skb_reserve(skb, NET_IP_ALIGN);
-
-                       }
-
+                       /* The length includes FCS length */
+                       length = length - ETH_FCS_LEN;
                        /* update status of driver */
                        adapter->stats.rx_bytes += length;
                        adapter->stats.rx_packets++;
@@ -2318,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
        netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
        pch_gbe_set_ethtool_ops(netdev);
 
+       pch_gbe_mac_load_mac_addr(&adapter->hw);
        pch_gbe_mac_reset_hw(&adapter->hw);
 
        /* setup the private structure */
index 59ccf0c..469ab0b 100644 (file)
@@ -3190,6 +3190,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_dev_run_wake(pdev))
                pm_runtime_put_noidle(&pdev->dev);
 
+       netif_carrier_off(dev);
+
 out:
        return rc;
 
index 34a0af3..0e5f031 100644 (file)
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
 
        priv->hw = device;
 
-       if (device_can_wakeup(priv->device))
+       if (device_can_wakeup(priv->device)) {
                priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+               enable_irq_wake(dev->irq);
+       }
 
        return 0;
 }
index 93b32d3..06c0e50 100644 (file)
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                        break;                  /* We have no PHY */
 
-               if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+               if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+                   ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+                    !netif_running(dev)))
                        return -EAGAIN;
 
                spin_lock_bh(&tp->lock);
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                        break;                  /* We have no PHY */
 
-               if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+               if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+                   ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+                    !netif_running(dev)))
                        return -EAGAIN;
 
                spin_lock_bh(&tp->lock);
index bed8fce..6d83812 100644 (file)
@@ -2628,15 +2628,15 @@ exit:
 
 static void hso_free_tiomget(struct hso_serial *serial)
 {
-       struct hso_tiocmget *tiocmget = serial->tiocmget;
+       struct hso_tiocmget *tiocmget;
+       if (!serial)
+               return;
+       tiocmget = serial->tiocmget;
        if (tiocmget) {
-               if (tiocmget->urb) {
-                       usb_free_urb(tiocmget->urb);
-                       tiocmget->urb = NULL;
-               }
+               usb_free_urb(tiocmget->urb);
+               tiocmget->urb = NULL;
                serial->tiocmget = NULL;
                kfree(tiocmget);
-
        }
 }
 
index ed9a416..95c41d5 100644 (file)
@@ -931,8 +931,10 @@ fail_halt:
                if (urb != NULL) {
                        clear_bit (EVENT_RX_MEMORY, &dev->flags);
                        status = usb_autopm_get_interface(dev->intf);
-                       if (status < 0)
+                       if (status < 0) {
+                               usb_free_urb(urb);
                                goto fail_lowmem;
+                       }
                        if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
                                resched = 0;
                        usb_autopm_put_interface(dev->intf);
index a9b852b..39b6f16 100644 (file)
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
 }
 #endif
 
-/**
- * iwl3945_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
-                               struct iwl_rx_packet *pkt)
-{
-       bool rc = true;
-       struct iwl3945_notif_statistics current_stat;
-       int combined_plcp_delta;
-       unsigned int plcp_msec;
-       unsigned long plcp_received_jiffies;
-
-       if (priv->cfg->base_params->plcp_delta_threshold ==
-           IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
-               IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
-               return rc;
-       }
-       memcpy(&current_stat, pkt->u.raw, sizeof(struct
-                       iwl3945_notif_statistics));
-       /*
-        * check for plcp_err and trigger radio reset if it exceeds
-        * the plcp error threshold plcp_delta.
-        */
-       plcp_received_jiffies = jiffies;
-       plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
-                                       (long) priv->plcp_jiffies);
-       priv->plcp_jiffies = plcp_received_jiffies;
-       /*
-        * check to make sure plcp_msec is not 0 to prevent division
-        * by zero.
-        */
-       if (plcp_msec) {
-               combined_plcp_delta =
-                       (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
-                       le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
-
-               if ((combined_plcp_delta > 0) &&
-                       ((combined_plcp_delta * 100) / plcp_msec) >
-                       priv->cfg->base_params->plcp_delta_threshold) {
-                       /*
-                        * if plcp_err exceed the threshold, the following
-                        * data is printed in csv format:
-                        *    Text: plcp_err exceeded %d,
-                        *    Received ofdm.plcp_err,
-                        *    Current ofdm.plcp_err,
-                        *    combined_plcp_delta,
-                        *    plcp_msec
-                        */
-                       IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
-                               "%u, %d, %u mSecs\n",
-                               priv->cfg->base_params->plcp_delta_threshold,
-                               le32_to_cpu(current_stat.rx.ofdm.plcp_err),
-                               combined_plcp_delta, plcp_msec);
-                       /*
-                        * Reset the RF radio due to the high plcp
-                        * error rate
-                        */
-                       rc = false;
-               }
-       }
-       return rc;
-}
-
 void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
                struct iwl_rx_mem_buffer *rxb)
 {
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = {
        .isr_ops = {
                .isr = iwl_isr_legacy,
        },
-       .check_plcp_health = iwl3945_good_plcp_health,
 
        .debugfs_ops = {
                .rx_stats_read = iwl3945_ucode_rx_stats_read,
index cdd9719..4941cad 100644 (file)
@@ -97,6 +97,18 @@ config RTC_INTF_DEV
 
          If unsure, say Y.
 
+config RTC_INTF_DEV_UIE_EMUL
+       bool "RTC UIE emulation on dev interface"
+       depends on RTC_INTF_DEV
+       help
+         Provides an emulation for RTC_UIE if the underlying rtc chip
+         driver does not expose RTC_UIE ioctls. Those requests generate
+         once-per-second update interrupts, used for synchronization.
+
+         The emulation code will read the time from the hardware
+         clock several times per second, please enable this option
+         only if you know that you really need it.
+
 config RTC_DRV_TEST
        tristate "Test driver/device"
        help
index a0c0196..cb2f072 100644 (file)
@@ -209,9 +209,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
        }
 
        if (err)
-               return err;
-
-       if (!rtc->ops)
+               /* nothing */;
+       else if (!rtc->ops)
                err = -ENODEV;
        else if (!rtc->ops->alarm_irq_enable)
                err = -EINVAL;
@@ -229,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
        if (err)
                return err;
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+       if (enabled == 0 && rtc->uie_irq_active) {
+               mutex_unlock(&rtc->ops_lock);
+               return rtc_dev_update_irq_enable_emul(rtc, 0);
+       }
+#endif
        /* make sure we're changing state */
        if (rtc->uie_rtctimer.enabled == enabled)
                goto out;
@@ -248,6 +253,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
 
 out:
        mutex_unlock(&rtc->ops_lock);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+       /*
+        * Enable emulation if the driver did not provide
+        * the update_irq_enable function pointer or if returned
+        * -EINVAL to signal that it has been configured without
+        * interrupts or that are not available at the moment.
+        */
+       if (err == -EINVAL)
+               err = rtc_dev_update_irq_enable_emul(rtc, enabled);
+#endif
        return err;
 
 }
@@ -263,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
  *
  * Triggers the registered irq_task function callback.
  */
-static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
 {
        unsigned long flags;
 
index 37c3cc1..d0e06ed 100644 (file)
@@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
        return err;
 }
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+/*
+ * Routine to poll RTC seconds field for change as often as possible,
+ * after first RTC_UIE use timer to reduce polling
+ */
+static void rtc_uie_task(struct work_struct *work)
+{
+       struct rtc_device *rtc =
+               container_of(work, struct rtc_device, uie_task);
+       struct rtc_time tm;
+       int num = 0;
+       int err;
+
+       err = rtc_read_time(rtc, &tm);
+
+       spin_lock_irq(&rtc->irq_lock);
+       if (rtc->stop_uie_polling || err) {
+               rtc->uie_task_active = 0;
+       } else if (rtc->oldsecs != tm.tm_sec) {
+               num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
+               rtc->oldsecs = tm.tm_sec;
+               rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
+               rtc->uie_timer_active = 1;
+               rtc->uie_task_active = 0;
+               add_timer(&rtc->uie_timer);
+       } else if (schedule_work(&rtc->uie_task) == 0) {
+               rtc->uie_task_active = 0;
+       }
+       spin_unlock_irq(&rtc->irq_lock);
+       if (num)
+               rtc_handle_legacy_irq(rtc, num, RTC_UF);
+}
+static void rtc_uie_timer(unsigned long data)
+{
+       struct rtc_device *rtc = (struct rtc_device *)data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rtc->irq_lock, flags);
+       rtc->uie_timer_active = 0;
+       rtc->uie_task_active = 1;
+       if ((schedule_work(&rtc->uie_task) == 0))
+               rtc->uie_task_active = 0;
+       spin_unlock_irqrestore(&rtc->irq_lock, flags);
+}
+
+static int clear_uie(struct rtc_device *rtc)
+{
+       spin_lock_irq(&rtc->irq_lock);
+       if (rtc->uie_irq_active) {
+               rtc->stop_uie_polling = 1;
+               if (rtc->uie_timer_active) {
+                       spin_unlock_irq(&rtc->irq_lock);
+                       del_timer_sync(&rtc->uie_timer);
+                       spin_lock_irq(&rtc->irq_lock);
+                       rtc->uie_timer_active = 0;
+               }
+               if (rtc->uie_task_active) {
+                       spin_unlock_irq(&rtc->irq_lock);
+                       flush_scheduled_work();
+                       spin_lock_irq(&rtc->irq_lock);
+               }
+               rtc->uie_irq_active = 0;
+       }
+       spin_unlock_irq(&rtc->irq_lock);
+       return 0;
+}
+
+static int set_uie(struct rtc_device *rtc)
+{
+       struct rtc_time tm;
+       int err;
+
+       err = rtc_read_time(rtc, &tm);
+       if (err)
+               return err;
+       spin_lock_irq(&rtc->irq_lock);
+       if (!rtc->uie_irq_active) {
+               rtc->uie_irq_active = 1;
+               rtc->stop_uie_polling = 0;
+               rtc->oldsecs = tm.tm_sec;
+               rtc->uie_task_active = 1;
+               if (schedule_work(&rtc->uie_task) == 0)
+                       rtc->uie_task_active = 0;
+       }
+       rtc->irq_data = 0;
+       spin_unlock_irq(&rtc->irq_lock);
+       return 0;
+}
+
+int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
+{
+       if (enabled)
+               return set_uie(rtc);
+       else
+               return clear_uie(rtc);
+}
+EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
+
+#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
 
 static ssize_t
 rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -387,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc)
 
        rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+       INIT_WORK(&rtc->uie_task, rtc_uie_task);
+       setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+#endif
+
        cdev_init(&rtc->char_dev, &rtc_dev_fops);
        rtc->char_dev.owner = rtc->owner;
 }
index 44578b5..d3e58d7 100644 (file)
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
 {
        struct Scsi_Host *host = rport_to_shost(rport);
        fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+       unsigned long flags;
 
        if (!fcport)
                return;
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
         * Transport has effectively 'deleted' the rport, clear
         * all local references.
         */
-       spin_lock_irq(host->host_lock);
+       spin_lock_irqsave(host->host_lock, flags);
        fcport->rport = fcport->drport = NULL;
        *((fc_port_t **)rport->dd_data) = NULL;
-       spin_unlock_irq(host->host_lock);
+       spin_unlock_irqrestore(host->host_lock, flags);
 
        if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
                return;
index f948e1a..d9479c3 100644 (file)
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data)
 {
        fc_port_t *fcport = data;
        struct fc_rport *rport;
+       unsigned long flags;
 
-       spin_lock_irq(fcport->vha->host->host_lock);
+       spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        rport = fcport->drport ? fcport->drport: fcport->rport;
        fcport->drport = NULL;
-       spin_unlock_irq(fcport->vha->host->host_lock);
+       spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
        if (rport)
                fc_remote_port_delete(rport);
 }
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
        struct fc_rport_identifiers rport_ids;
        struct fc_rport *rport;
        struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
 
        qla2x00_rport_del(fcport);
 
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
                    "Unable to allocate fc remote port!\n");
                return;
        }
-       spin_lock_irq(fcport->vha->host->host_lock);
+       spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        *((fc_port_t **)rport->dd_data) = fcport;
-       spin_unlock_irq(fcport->vha->host->host_lock);
+       spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
 
        rport->supported_classes = fcport->supported_classes;
 
index c194c23..f27724d 100644 (file)
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
        }
        if (atomic_read(&fcport->state) != FCS_ONLINE) {
                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-                       atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
                        atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
                        cmd->result = DID_NO_CONNECT << 16;
                        goto qc24_fail_command;
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
 {
        struct fc_rport *rport;
        scsi_qla_host_t *base_vha;
+       unsigned long flags;
 
        if (!fcport->rport)
                return;
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
        rport = fcport->rport;
        if (defer) {
                base_vha = pci_get_drvdata(vha->hw->pdev);
-               spin_lock_irq(vha->host->host_lock);
+               spin_lock_irqsave(vha->host->host_lock, flags);
                fcport->drport = rport;
-               spin_unlock_irq(vha->host->host_lock);
+               spin_unlock_irqrestore(vha->host->host_lock, flags);
                set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
                qla2xxx_wake_dpc(base_vha);
        } else
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data)
 
        set_user_nice(current, -20);
 
+       set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
                DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
 
-               set_current_state(TASK_INTERRUPTIBLE);
                schedule();
                __set_current_state(TASK_RUNNING);
 
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data)
                qla2x00_do_dpc_all_vps(base_vha);
 
                ha->dpc_active = 0;
+               set_current_state(TASK_INTERRUPTIBLE);
        } /* End of while(1) */
+       __set_current_state(TASK_RUNNING);
 
        DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
 
index 7b31093..a6b2d72 100644 (file)
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd,
                            unsigned long long lba, unsigned int num, int write)
 {
        int ret;
-       unsigned int block, rest = 0;
+       unsigned long long block, rest = 0;
        int (*func)(struct scsi_cmnd *, unsigned char *, int);
 
        func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
index 351d8a3..19752b0 100644 (file)
@@ -7,10 +7,9 @@
 #include <linux/of_device.h>
 #include <linux/spi/pxa2xx_spi.h>
 
-struct awesome_struct {
+struct ce4100_info {
        struct ssp_device ssp;
-       struct platform_device spi_pdev;
-       struct pxa2xx_spi_master spi_pdata;
+       struct platform_device *spi_pdev;
 };
 
 static DEFINE_MUTEX(ssp_lock);
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp)
 }
 EXPORT_SYMBOL_GPL(pxa_ssp_free);
 
-static void plat_dev_release(struct device *dev)
-{
-       struct awesome_struct *as = container_of(dev,
-                       struct awesome_struct, spi_pdev.dev);
-
-       of_device_node_put(&as->spi_pdev.dev);
-}
-
 static int __devinit ce4100_spi_probe(struct pci_dev *dev,
                const struct pci_device_id *ent)
 {
        int ret;
        resource_size_t phys_beg;
        resource_size_t phys_len;
-       struct awesome_struct *spi_info;
+       struct ce4100_info *spi_info;
        struct platform_device *pdev;
-       struct pxa2xx_spi_master *spi_pdata;
+       struct pxa2xx_spi_master spi_pdata;
        struct ssp_device *ssp;
 
        ret = pci_enable_device(dev);
@@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
                return ret;
        }
 
+       pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
        spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
-       if (!spi_info) {
+       if (!pdev || !spi_info ) {
                ret = -ENOMEM;
-               goto err_kz;
+               goto err_nomem;
        }
-       ssp = &spi_info->ssp;
-       pdev = &spi_info->spi_pdev;
-       spi_pdata =  &spi_info->spi_pdata;
+       memset(&spi_pdata, 0, sizeof(spi_pdata));
+       spi_pdata.num_chipselect = dev->devfn;
 
-       pdev->name = "pxa2xx-spi";
-       pdev->id = dev->devfn;
-       pdev->dev.parent = &dev->dev;
-       pdev->dev.platform_data = &spi_info->spi_pdata;
+       ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
+       if (ret)
+               goto err_nomem;
 
+       pdev->dev.parent = &dev->dev;
 #ifdef CONFIG_OF
        pdev->dev.of_node = dev->dev.of_node;
 #endif
-       pdev->dev.release = plat_dev_release;
-
-       spi_pdata->num_chipselect = dev->devfn;
-
+       ssp = &spi_info->ssp;
        ssp->phys_base = pci_resource_start(dev, 0);
        ssp->mmio_base = ioremap(phys_beg, phys_len);
        if (!ssp->mmio_base) {
                dev_err(&pdev->dev, "failed to ioremap() registers\n");
                ret = -EIO;
-               goto err_remap;
+               goto err_nomem;
        }
        ssp->irq = dev->irq;
        ssp->port_id = pdev->id;
@@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
 
        pci_set_drvdata(dev, spi_info);
 
-       ret = platform_device_register(pdev);
+       ret = platform_device_add(pdev);
        if (ret)
                goto err_dev_add;
 
@@ -135,27 +123,21 @@ err_dev_add:
        mutex_unlock(&ssp_lock);
        iounmap(ssp->mmio_base);
 
-err_remap:
-       kfree(spi_info);
-
-err_kz:
+err_nomem:
        release_mem_region(phys_beg, phys_len);
-
+       platform_device_put(pdev);
+       kfree(spi_info);
        return ret;
 }
 
 static void __devexit ce4100_spi_remove(struct pci_dev *dev)
 {
-       struct awesome_struct *spi_info;
-       struct platform_device *pdev;
+       struct ce4100_info *spi_info;
        struct ssp_device *ssp;
 
        spi_info = pci_get_drvdata(dev);
-
        ssp = &spi_info->ssp;
-       pdev = &spi_info->spi_pdev;
-
-       platform_device_unregister(pdev);
+       platform_device_unregister(spi_info->spi_pdev);
 
        iounmap(ssp->mmio_base);
        release_mem_region(pci_resource_start(dev, 0),
@@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev)
 }
 
 static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
-
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
        { },
 };
index 5cfd708..973bb19 100644 (file)
@@ -13,8 +13,7 @@ target_core_mod-y             := target_core_configfs.o \
                                   target_core_transport.o \
                                   target_core_cdb.o \
                                   target_core_ua.o \
-                                  target_core_rd.o \
-                                  target_core_mib.o
+                                  target_core_rd.o
 
 obj-$(CONFIG_TARGET_CORE)      += target_core_mod.o
 
index 2764510..caf8dc1 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/parser.h>
 #include <linux/syscalls.h>
 #include <linux/configfs.h>
-#include <linux/proc_fs.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_device.h>
@@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item)
 {
        struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
                                struct se_subsystem_dev, se_dev_group);
-       struct config_group *dev_cg;
-
-       if (!(se_dev))
-               return;
+       struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+       struct se_subsystem_api *t = hba->transport;
+       struct config_group *dev_cg = &se_dev->se_dev_group;
 
-       dev_cg = &se_dev->se_dev_group;
        kfree(dev_cg->default_groups);
+       /*
+        * This pointer will set when the storage is enabled with:
+        *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+        */
+       if (se_dev->se_dev_ptr) {
+               printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+                       "virtual_device() for se_dev_ptr: %p\n",
+                       se_dev->se_dev_ptr);
+
+               se_free_virtual_device(se_dev->se_dev_ptr, hba);
+       } else {
+               /*
+                * Release struct se_subsystem_dev->se_dev_su_ptr..
+                */
+               printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+                       "device() for se_dev_su_ptr: %p\n",
+                       se_dev->se_dev_su_ptr);
+
+               t->free_device(se_dev->se_dev_su_ptr);
+       }
+
+       printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+                       "_dev_t: %p\n", se_dev);
+       kfree(se_dev);
 }
 
 static ssize_t target_core_dev_show(struct config_item *item,
@@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
        NULL,
 };
 
+static void target_core_alua_lu_gp_release(struct config_item *item)
+{
+       struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+                       struct t10_alua_lu_gp, lu_gp_group);
+
+       core_alua_free_lu_gp(lu_gp);
+}
+
 static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+       .release                = target_core_alua_lu_gp_release,
        .show_attribute         = target_core_alua_lu_gp_attr_show,
        .store_attribute        = target_core_alua_lu_gp_attr_store,
 };
@@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp(
        printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
                " Group: core/alua/lu_gps/%s, ID: %hu\n",
                config_item_name(item), lu_gp->lu_gp_id);
-
+       /*
+        * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
+        * -> target_core_alua_lu_gp_release()
+        */
        config_item_put(item);
-       core_alua_free_lu_gp(lu_gp);
 }
 
 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
@@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
        NULL,
 };
 
+static void target_core_alua_tg_pt_gp_release(struct config_item *item)
+{
+       struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+                       struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+       core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+       .release                = target_core_alua_tg_pt_gp_release,
        .show_attribute         = target_core_alua_tg_pt_gp_attr_show,
        .store_attribute        = target_core_alua_tg_pt_gp_attr_store,
 };
@@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp(
        printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
                " Group: alua/tg_pt_gps/%s, ID: %hu\n",
                config_item_name(item), tg_pt_gp->tg_pt_gp_id);
-
+       /*
+        * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
+        * -> target_core_alua_tg_pt_gp_release().
+        */
        config_item_put(item);
-       core_alua_free_tg_pt_gp(tg_pt_gp);
 }
 
 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
@@ -2771,13 +2814,11 @@ static void target_core_drop_subdev(
        struct se_subsystem_api *t;
        struct config_item *df_item;
        struct config_group *dev_cg, *tg_pt_gp_cg;
-       int i, ret;
+       int i;
 
        hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
 
-       if (mutex_lock_interruptible(&hba->hba_access_mutex))
-               goto out;
-
+       mutex_lock(&hba->hba_access_mutex);
        t = hba->transport;
 
        spin_lock(&se_global->g_device_lock);
@@ -2791,7 +2832,10 @@ static void target_core_drop_subdev(
                config_item_put(df_item);
        }
        kfree(tg_pt_gp_cg->default_groups);
-       core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+       /*
+        * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
+        * directly from target_core_alua_tg_pt_gp_release().
+        */
        T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
 
        dev_cg = &se_dev->se_dev_group;
@@ -2800,38 +2844,12 @@ static void target_core_drop_subdev(
                dev_cg->default_groups[i] = NULL;
                config_item_put(df_item);
        }
-
-       config_item_put(item);
        /*
-        * This pointer will set when the storage is enabled with:
-        * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+        * The releasing of se_dev and associated se_dev->se_dev_ptr is done
+        * from target_core_dev_item_ops->release() ->target_core_dev_release().
         */
-       if (se_dev->se_dev_ptr) {
-               printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
-                       "virtual_device() for se_dev_ptr: %p\n",
-                               se_dev->se_dev_ptr);
-
-               ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
-               if (ret < 0)
-                       goto hba_out;
-       } else {
-               /*
-                * Release struct se_subsystem_dev->se_dev_su_ptr..
-                */
-               printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
-                       "device() for se_dev_su_ptr: %p\n",
-                       se_dev->se_dev_su_ptr);
-
-               t->free_device(se_dev->se_dev_su_ptr);
-       }
-
-       printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
-               "_dev_t: %p\n", se_dev);
-
-hba_out:
+       config_item_put(item);
        mutex_unlock(&hba->hba_access_mutex);
-out:
-       kfree(se_dev);
 }
 
 static struct configfs_group_operations target_core_hba_group_ops = {
@@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
 
 CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
 
+static void target_core_hba_release(struct config_item *item)
+{
+       struct se_hba *hba = container_of(to_config_group(item),
+                               struct se_hba, hba_group);
+       core_delete_hba(hba);
+}
+
 static struct configfs_attribute *target_core_hba_attrs[] = {
        &target_core_hba_hba_info.attr,
        &target_core_hba_hba_mode.attr,
@@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = {
 };
 
 static struct configfs_item_operations target_core_hba_item_ops = {
+       .release                = target_core_hba_release,
        .show_attribute         = target_core_hba_attr_show,
        .store_attribute        = target_core_hba_attr_store,
 };
@@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_hba *hba = item_to_hba(item);
-
+       /*
+        * core_delete_hba() is called from target_core_hba_item_ops->release()
+        * -> target_core_hba_release()
+        */
        config_item_put(item);
-       core_delete_hba(hba);
 }
 
 static struct configfs_group_operations target_core_group_ops = {
@@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void)
        struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
        struct config_group *lu_gp_cg = NULL;
        struct configfs_subsystem *subsys;
-       struct proc_dir_entry *scsi_target_proc = NULL;
        struct t10_alua_lu_gp *lu_gp;
        int ret;
 
@@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void)
        if (core_dev_setup_virtual_lun0() < 0)
                goto out;
 
-       scsi_target_proc = proc_mkdir("scsi_target", 0);
-       if (!(scsi_target_proc)) {
-               printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
-               goto out;
-       }
-       ret = init_scsi_target_mib();
-       if (ret < 0)
-               goto out;
-
        return 0;
 
 out:
        configfs_unregister_subsystem(subsys);
-       if (scsi_target_proc)
-               remove_proc_entry("scsi_target", 0);
        core_dev_release_virtual_lun0();
        rd_module_exit();
 out_global:
@@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void)
                config_item_put(item);
        }
        kfree(lu_gp_cg->default_groups);
-       core_alua_free_lu_gp(se_global->default_lu_gp);
-       se_global->default_lu_gp = NULL;
+       lu_gp_cg->default_groups = NULL;
 
        alua_cg = &se_global->alua_group;
        for (i = 0; alua_cg->default_groups[i]; i++) {
@@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void)
                config_item_put(item);
        }
        kfree(alua_cg->default_groups);
+       alua_cg->default_groups = NULL;
 
        hba_cg = &se_global->target_core_hbagroup;
        for (i = 0; hba_cg->default_groups[i]; i++) {
@@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void)
                config_item_put(item);
        }
        kfree(hba_cg->default_groups);
-
-       for (i = 0; subsys->su_group.default_groups[i]; i++) {
-               item = &subsys->su_group.default_groups[i]->cg_item;
-               subsys->su_group.default_groups[i] = NULL;
-               config_item_put(item);
-       }
+       hba_cg->default_groups = NULL;
+       /*
+        * We expect subsys->su_group.default_groups to be released
+        * by configfs subsystem provider logic..
+        */
+       configfs_unregister_subsystem(subsys);
        kfree(subsys->su_group.default_groups);
 
-       configfs_unregister_subsystem(subsys);
+       core_alua_free_lu_gp(se_global->default_lu_gp);
+       se_global->default_lu_gp = NULL;
+
        printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
                        " Infrastructure\n");
 
-       remove_scsi_target_mib();
-       remove_proc_entry("scsi_target", 0);
        core_dev_release_virtual_lun0();
        rd_module_exit();
        release_se_global();
index 317ce58..5da051a 100644 (file)
@@ -373,11 +373,11 @@ int core_update_device_list_for_node(
                /*
                 * deve->se_lun_acl will be NULL for demo-mode created LUNs
                 * that have not been explictly concerted to MappedLUNs ->
-                * struct se_lun_acl.
+                * struct se_lun_acl, but we remove deve->alua_port_list from
+                * port->sep_alua_list. This also means that active UAs and
+                * NodeACL context specific PR metadata for demo-mode
+                * MappedLUN *deve will be released below..
                 */
-               if (!(deve->se_lun_acl))
-                       return 0;
-
                spin_lock_bh(&port->sep_alua_lock);
                list_del(&deve->alua_port_list);
                spin_unlock_bh(&port->sep_alua_lock);
@@ -395,12 +395,14 @@ int core_update_device_list_for_node(
                                printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
                                        " already set for demo mode -> explict"
                                        " LUN ACL transition\n");
+                               spin_unlock_irq(&nacl->device_list_lock);
                                return -1;
                        }
                        if (deve->se_lun != lun) {
                                printk(KERN_ERR "struct se_dev_entry->se_lun does"
                                        " match passed struct se_lun for demo mode"
                                        " -> explict LUN ACL transition\n");
+                               spin_unlock_irq(&nacl->device_list_lock);
                                return -1;
                        }
                        deve->se_lun_acl = lun_acl;
@@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev)
                }
        }
        spin_unlock(&hba->device_lock);
-
-       while (atomic_read(&hba->dev_mib_access_count))
-               cpu_relax();
 }
 
 int se_dev_check_online(struct se_device *dev)
index 32b148d..b65d1c8 100644 (file)
@@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
 
 CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
 
+static void target_fabric_mappedlun_release(struct config_item *item)
+{
+       struct se_lun_acl *lacl = container_of(to_config_group(item),
+                               struct se_lun_acl, se_lun_group);
+       struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+       core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
 static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
        &target_fabric_mappedlun_write_protect.attr,
        NULL,
 };
 
 static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+       .release                = target_fabric_mappedlun_release,
        .show_attribute         = target_fabric_mappedlun_attr_show,
        .store_attribute        = target_fabric_mappedlun_attr_store,
        .allow_link             = target_fabric_mappedlun_link,
@@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_lun_acl *lacl = container_of(to_config_group(item),
-                       struct se_lun_acl, se_lun_group);
-       struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
-
        config_item_put(item);
-       core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static void target_fabric_nacl_base_release(struct config_item *item)
+{
+       struct se_node_acl *se_nacl = container_of(to_config_group(item),
+                       struct se_node_acl, acl_group);
+       struct se_portal_group *se_tpg = se_nacl->se_tpg;
+       struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+       tf->tf_ops.fabric_drop_nodeacl(se_nacl);
 }
 
 static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+       .release                = target_fabric_nacl_base_release,
        .show_attribute         = target_fabric_nacl_base_attr_show,
        .store_attribute        = target_fabric_nacl_base_attr_store,
 };
@@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_portal_group *se_tpg = container_of(group,
-                       struct se_portal_group, tpg_acl_group);
-       struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
        struct se_node_acl *se_nacl = container_of(to_config_group(item),
                        struct se_node_acl, acl_group);
        struct config_item *df_item;
@@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl(
                nacl_cg->default_groups[i] = NULL;
                config_item_put(df_item);
        }
-
+       /*
+        * struct se_node_acl free is done in target_fabric_nacl_base_release()
+        */
        config_item_put(item);
-       tf->tf_ops.fabric_drop_nodeacl(se_nacl);
 }
 
 static struct configfs_group_operations target_fabric_nacl_group_ops = {
@@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
 
 CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
 
+static void target_fabric_np_base_release(struct config_item *item)
+{
+       struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+                               struct se_tpg_np, tpg_np_group);
+       struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
+       struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+       tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
 static struct configfs_item_operations target_fabric_np_base_item_ops = {
+       .release                = target_fabric_np_base_release,
        .show_attribute         = target_fabric_np_base_attr_show,
        .store_attribute        = target_fabric_np_base_attr_store,
 };
@@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np(
        if (!(se_tpg_np) || IS_ERR(se_tpg_np))
                return ERR_PTR(-EINVAL);
 
+       se_tpg_np->tpg_np_parent = se_tpg;
        config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
                        &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
 
@@ -476,14 +502,10 @@ static void target_fabric_drop_np(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_portal_group *se_tpg = container_of(group,
-                               struct se_portal_group, tpg_np_group);
-       struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
-       struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
-                               struct se_tpg_np, tpg_np_group);
-
+       /*
+        * struct se_tpg_np is released via target_fabric_np_base_release()
+        */
        config_item_put(item);
-       tf->tf_ops.fabric_drop_np(se_tpg_np);
 }
 
 static struct configfs_group_operations target_fabric_np_group_ops = {
@@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
  */
 CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
 
+static void target_fabric_tpg_release(struct config_item *item)
+{
+       struct se_portal_group *se_tpg = container_of(to_config_group(item),
+                       struct se_portal_group, tpg_group);
+       struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+       struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+       tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
 static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+       .release                = target_fabric_tpg_release,
        .show_attribute         = target_fabric_tpg_attr_show,
        .store_attribute        = target_fabric_tpg_attr_store,
 };
@@ -872,8 +905,6 @@ static void target_fabric_drop_tpg(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
-       struct target_fabric_configfs *tf = wwn->wwn_tf;
        struct se_portal_group *se_tpg = container_of(to_config_group(item),
                                struct se_portal_group, tpg_group);
        struct config_group *tpg_cg = &se_tpg->tpg_group;
@@ -890,15 +921,28 @@ static void target_fabric_drop_tpg(
        }
 
        config_item_put(item);
-       tf->tf_ops.fabric_drop_tpg(se_tpg);
 }
 
+static void target_fabric_release_wwn(struct config_item *item)
+{
+       struct se_wwn *wwn = container_of(to_config_group(item),
+                               struct se_wwn, wwn_group);
+       struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+       tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_item_operations target_fabric_tpg_item_ops = {
+       .release        = target_fabric_release_wwn,
+};
+
 static struct configfs_group_operations target_fabric_tpg_group_ops = {
        .make_group     = target_fabric_make_tpg,
        .drop_item      = target_fabric_drop_tpg,
 };
 
-TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
+TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
+               NULL);
 
 /* End of tfc_tpg_cit */
 
@@ -932,13 +976,7 @@ static void target_fabric_drop_wwn(
        struct config_group *group,
        struct config_item *item)
 {
-       struct target_fabric_configfs *tf = container_of(group,
-                               struct target_fabric_configfs, tf_group);
-       struct se_wwn *wwn = container_of(to_config_group(item),
-                               struct se_wwn, wwn_group);
-
        config_item_put(item);
-       tf->tf_ops.fabric_drop_wwn(wwn);
 }
 
 static struct configfs_group_operations target_fabric_wwn_group_ops = {
index c6e0d75..67f0c09 100644 (file)
@@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice(
 
        bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
                                FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
-       if (!(bd))
+       if (IS_ERR(bd))
                goto failed;
        /*
         * Setup the local scope queue_limits from struct request_queue->limits
@@ -220,8 +220,10 @@ static void iblock_free_device(void *p)
 {
        struct iblock_dev *ib_dev = p;
 
-       blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
-       bioset_free(ib_dev->ibd_bio_set);
+       if (ib_dev->ibd_bd != NULL)
+               blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+       if (ib_dev->ibd_bio_set != NULL)
+               bioset_free(ib_dev->ibd_bio_set);
        kfree(ib_dev);
 }
 
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
deleted file mode 100644 (file)
index d5a48aa..0000000
+++ /dev/null
@@ -1,1078 +0,0 @@
-/*******************************************************************************
- * Filename:  target_core_mib.c
- *
- * Copyright (c) 2006-2007 SBE, Inc.  All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
- *
- * Nicholas A. Bellinger <nab@linux-iscsi.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- ******************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <generated/utsrelease.h>
-#include <linux/utsname.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_configfs.h>
-
-#include "target_core_hba.h"
-#include "target_core_mib.h"
-
-/* SCSI mib table index */
-static struct scsi_index_table scsi_index_table;
-
-#ifndef INITIAL_JIFFIES
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
-#endif
-
-/* SCSI Instance Table */
-#define SCSI_INST_SW_INDEX             1
-#define SCSI_TRANSPORT_INDEX           1
-
-#define NONE           "None"
-#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
-
-static inline int list_is_first(const struct list_head *list,
-                               const struct list_head *head)
-{
-       return list->prev == head;
-}
-
-static void *locate_hba_start(
-       struct seq_file *seq,
-       loff_t *pos)
-{
-       spin_lock(&se_global->g_device_lock);
-       return seq_list_start(&se_global->g_se_dev_list, *pos);
-}
-
-static void *locate_hba_next(
-       struct seq_file *seq,
-       void *v,
-       loff_t *pos)
-{
-       return seq_list_next(v, &se_global->g_se_dev_list, pos);
-}
-
-static void locate_hba_stop(struct seq_file *seq, void *v)
-{
-       spin_unlock(&se_global->g_device_lock);
-}
-
-/****************************************************************************
- * SCSI MIB Tables
- ****************************************************************************/
-
-/*
- * SCSI Instance Table
- */
-static void *scsi_inst_seq_start(
-       struct seq_file *seq,
-       loff_t *pos)
-{
-       spin_lock(&se_global->hba_lock);
-       return seq_list_start(&se_global->g_hba_list, *pos);
-}
-
-static void *scsi_inst_seq_next(
-       struct seq_file *seq,
-       void *v,
-       loff_t *pos)
-{
-       return seq_list_next(v, &se_global->g_hba_list, pos);
-}
-
-static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
-{
-       spin_unlock(&se_global->hba_lock);
-}
-
-static int scsi_inst_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
-
-       if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
-               seq_puts(seq, "inst sw_indx\n");
-
-       seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
-       seq_printf(seq, "plugin: %s version: %s\n",
-                       hba->transport->name, TARGET_CORE_VERSION);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_inst_seq_ops = {
-       .start  = scsi_inst_seq_start,
-       .next   = scsi_inst_seq_next,
-       .stop   = scsi_inst_seq_stop,
-       .show   = scsi_inst_seq_show
-};
-
-static int scsi_inst_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_inst_seq_ops);
-}
-
-static const struct file_operations scsi_inst_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_inst_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Device Table
- */
-static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-static int scsi_dev_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       char str[28];
-       int k;
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst indx role ports\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
-                  dev->dev_index, "Target", dev->dev_port_count);
-
-       memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
-
-       /* vendor */
-       for (k = 0; k < 8; k++)
-               str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
-                               DEV_T10_WWN(dev)->vendor[k] : 0x20;
-       str[k] = 0x20;
-
-       /* model */
-       for (k = 0; k < 16; k++)
-               str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
-                               DEV_T10_WWN(dev)->model[k] : 0x20;
-       str[k + 9] = 0;
-
-       seq_printf(seq, "dev_alias: %s\n", str);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_dev_seq_ops = {
-       .start  = scsi_dev_seq_start,
-       .next   = scsi_dev_seq_next,
-       .stop   = scsi_dev_seq_stop,
-       .show   = scsi_dev_seq_show
-};
-
-static int scsi_dev_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_dev_seq_ops);
-}
-
-static const struct file_operations scsi_dev_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_dev_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Port Table
- */
-static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_port_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-static int scsi_port_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       struct se_port *sep, *sep_tmp;
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst device indx role busy_count\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       /* FIXME: scsiPortBusyStatuses count */
-       spin_lock(&dev->se_port_lock);
-       list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
-               seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
-                       dev->dev_index, sep->sep_index, "Device",
-                       dev->dev_index, 0);
-       }
-       spin_unlock(&dev->se_port_lock);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_port_seq_ops = {
-       .start  = scsi_port_seq_start,
-       .next   = scsi_port_seq_next,
-       .stop   = scsi_port_seq_stop,
-       .show   = scsi_port_seq_show
-};
-
-static int scsi_port_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_port_seq_ops);
-}
-
-static const struct file_operations scsi_port_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_port_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Transport Table
- */
-static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-static int scsi_transport_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       struct se_port *se, *se_tmp;
-       struct se_portal_group *tpg;
-       struct t10_wwn *wwn;
-       char buf[64];
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst device indx dev_name\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       wwn = DEV_T10_WWN(dev);
-
-       spin_lock(&dev->se_port_lock);
-       list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
-               tpg = se->sep_tpg;
-               sprintf(buf, "scsiTransport%s",
-                               TPG_TFO(tpg)->get_fabric_name());
-
-               seq_printf(seq, "%u %s %u %s+%s\n",
-                       hba->hba_index, /* scsiTransportIndex */
-                       buf,  /* scsiTransportType */
-                       (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
-                       TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
-                       0,
-                       TPG_TFO(tpg)->tpg_get_wwn(tpg),
-                       (strlen(wwn->unit_serial)) ?
-                       /* scsiTransportDevName */
-                       wwn->unit_serial : wwn->vendor);
-       }
-       spin_unlock(&dev->se_port_lock);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_transport_seq_ops = {
-       .start  = scsi_transport_seq_start,
-       .next   = scsi_transport_seq_next,
-       .stop   = scsi_transport_seq_stop,
-       .show   = scsi_transport_seq_show
-};
-
-static int scsi_transport_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_transport_seq_ops);
-}
-
-static const struct file_operations scsi_transport_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_transport_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Target Device Table
- */
-static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-
-#define LU_COUNT       1  /* for now */
-static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       int non_accessible_lus = 0;
-       char status[16];
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst indx num_LUs status non_access_LUs"
-                       " resets\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       switch (dev->dev_status) {
-       case TRANSPORT_DEVICE_ACTIVATED:
-               strcpy(status, "activated");
-               break;
-       case TRANSPORT_DEVICE_DEACTIVATED:
-               strcpy(status, "deactivated");
-               non_accessible_lus = 1;
-               break;
-       case TRANSPORT_DEVICE_SHUTDOWN:
-               strcpy(status, "shutdown");
-               non_accessible_lus = 1;
-               break;
-       case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
-       case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
-               strcpy(status, "offline");
-               non_accessible_lus = 1;
-               break;
-       default:
-               sprintf(status, "unknown(%d)", dev->dev_status);
-               non_accessible_lus = 1;
-       }
-
-       seq_printf(seq, "%u %u %u %s %u %u\n",
-                  hba->hba_index, dev->dev_index, LU_COUNT,
-                  status, non_accessible_lus, dev->num_resets);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_tgt_dev_seq_ops = {
-       .start  = scsi_tgt_dev_seq_start,
-       .next   = scsi_tgt_dev_seq_next,
-       .stop   = scsi_tgt_dev_seq_stop,
-       .show   = scsi_tgt_dev_seq_show
-};
-
-static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_tgt_dev_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_dev_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_tgt_dev_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Target Port Table
- */
-static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       struct se_port *sep, *sep_tmp;
-       struct se_portal_group *tpg;
-       u32 rx_mbytes, tx_mbytes;
-       unsigned long long num_cmds;
-       char buf[64];
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst device indx name port_index in_cmds"
-                       " write_mbytes read_mbytes hs_in_cmds\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       spin_lock(&dev->se_port_lock);
-       list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
-               tpg = sep->sep_tpg;
-               sprintf(buf, "%sPort#",
-                       TPG_TFO(tpg)->get_fabric_name());
-
-               seq_printf(seq, "%u %u %u %s%d %s%s%d ",
-                    hba->hba_index,
-                    dev->dev_index,
-                    sep->sep_index,
-                    buf, sep->sep_index,
-                    TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
-                    TPG_TFO(tpg)->tpg_get_tag(tpg));
-
-               spin_lock(&sep->sep_lun->lun_sep_lock);
-               num_cmds = sep->sep_stats.cmd_pdus;
-               rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
-               tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
-               spin_unlock(&sep->sep_lun->lun_sep_lock);
-
-               seq_printf(seq, "%llu %u %u %u\n", num_cmds,
-                       rx_mbytes, tx_mbytes, 0);
-       }
-       spin_unlock(&dev->se_port_lock);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_tgt_port_seq_ops = {
-       .start  = scsi_tgt_port_seq_start,
-       .next   = scsi_tgt_port_seq_next,
-       .stop   = scsi_tgt_port_seq_stop,
-       .show   = scsi_tgt_port_seq_show
-};
-
-static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_tgt_port_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_port_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_tgt_port_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Authorized Initiator Table:
- * It contains the SCSI Initiators authorized to be attached to one of the
- * local Target ports.
- * Iterates through all active TPGs and extracts the info from the ACLs
- */
-static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       spin_lock_bh(&se_global->se_tpg_lock);
-       return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
-                                        loff_t *pos)
-{
-       return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
-{
-       spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
-                                               se_tpg_list);
-       struct se_dev_entry *deve;
-       struct se_lun *lun;
-       struct se_node_acl *se_nacl;
-       int j;
-
-       if (list_is_first(&se_tpg->se_tpg_list,
-                         &se_global->g_se_tpg_list))
-               seq_puts(seq, "inst dev port indx dev_or_port intr_name "
-                        "map_indx att_count num_cmds read_mbytes "
-                        "write_mbytes hs_num_cmds creation_time row_status\n");
-
-       if (!(se_tpg))
-               return 0;
-
-       spin_lock(&se_tpg->acl_node_lock);
-       list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
-
-               atomic_inc(&se_nacl->mib_ref_count);
-               smp_mb__after_atomic_inc();
-               spin_unlock(&se_tpg->acl_node_lock);
-
-               spin_lock_irq(&se_nacl->device_list_lock);
-               for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
-                       deve = &se_nacl->device_list[j];
-                       if (!(deve->lun_flags &
-                                       TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
-                           (!deve->se_lun))
-                               continue;
-                       lun = deve->se_lun;
-                       if (!lun->lun_se_dev)
-                               continue;
-
-                       seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
-                                       " %u %s\n",
-                               /* scsiInstIndex */
-                               (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
-                               TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
-                               0,
-                               /* scsiDeviceIndex */
-                               lun->lun_se_dev->dev_index,
-                               /* scsiAuthIntrTgtPortIndex */
-                               TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
-                               /* scsiAuthIntrIndex */
-                               se_nacl->acl_index,
-                               /* scsiAuthIntrDevOrPort */
-                               1,
-                               /* scsiAuthIntrName */
-                               se_nacl->initiatorname[0] ?
-                                       se_nacl->initiatorname : NONE,
-                               /* FIXME: scsiAuthIntrLunMapIndex */
-                               0,
-                               /* scsiAuthIntrAttachedTimes */
-                               deve->attach_count,
-                               /* scsiAuthIntrOutCommands */
-                               deve->total_cmds,
-                               /* scsiAuthIntrReadMegaBytes */
-                               (u32)(deve->read_bytes >> 20),
-                               /* scsiAuthIntrWrittenMegaBytes */
-                               (u32)(deve->write_bytes >> 20),
-                               /* FIXME: scsiAuthIntrHSOutCommands */
-                               0,
-                               /* scsiAuthIntrLastCreation */
-                               (u32)(((u32)deve->creation_time -
-                                           INITIAL_JIFFIES) * 100 / HZ),
-                               /* FIXME: scsiAuthIntrRowStatus */
-                               "Ready");
-               }
-               spin_unlock_irq(&se_nacl->device_list_lock);
-
-               spin_lock(&se_tpg->acl_node_lock);
-               atomic_dec(&se_nacl->mib_ref_count);
-               smp_mb__after_atomic_dec();
-       }
-       spin_unlock(&se_tpg->acl_node_lock);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_auth_intr_seq_ops = {
-       .start  = scsi_auth_intr_seq_start,
-       .next   = scsi_auth_intr_seq_next,
-       .stop   = scsi_auth_intr_seq_stop,
-       .show   = scsi_auth_intr_seq_show
-};
-
-static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_auth_intr_seq_ops);
-}
-
-static const struct file_operations scsi_auth_intr_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_auth_intr_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Attached Initiator Port Table:
- * It lists the SCSI Initiators attached to one of the local Target ports.
- * Iterates through all active TPGs and use active sessions from each TPG
- * to list the info fo this table.
- */
-static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       spin_lock_bh(&se_global->se_tpg_lock);
-       return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
-                                        loff_t *pos)
-{
-       return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
-{
-       spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
-                                               se_tpg_list);
-       struct se_dev_entry *deve;
-       struct se_lun *lun;
-       struct se_node_acl *se_nacl;
-       struct se_session *se_sess;
-       unsigned char buf[64];
-       int j;
-
-       if (list_is_first(&se_tpg->se_tpg_list,
-                         &se_global->g_se_tpg_list))
-               seq_puts(seq, "inst dev port indx port_auth_indx port_name"
-                       " port_ident\n");
-
-       if (!(se_tpg))
-               return 0;
-
-       spin_lock(&se_tpg->session_lock);
-       list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
-               if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
-                   (!se_sess->se_node_acl) ||
-                   (!se_sess->se_node_acl->device_list))
-                       continue;
-
-               atomic_inc(&se_sess->mib_ref_count);
-               smp_mb__after_atomic_inc();
-               se_nacl = se_sess->se_node_acl;
-               atomic_inc(&se_nacl->mib_ref_count);
-               smp_mb__after_atomic_inc();
-               spin_unlock(&se_tpg->session_lock);
-
-               spin_lock_irq(&se_nacl->device_list_lock);
-               for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
-                       deve = &se_nacl->device_list[j];
-                       if (!(deve->lun_flags &
-                                       TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
-                          (!deve->se_lun))
-                               continue;
-
-                       lun = deve->se_lun;
-                       if (!lun->lun_se_dev)
-                               continue;
-
-                       memset(buf, 0, 64);
-                       if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
-                               TPG_TFO(se_tpg)->sess_get_initiator_sid(
-                                       se_sess, (unsigned char *)&buf[0], 64);
-
-                       seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
-                               /* scsiInstIndex */
-                               (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
-                               TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
-                               0,
-                               /* scsiDeviceIndex */
-                               lun->lun_se_dev->dev_index,
-                               /* scsiPortIndex */
-                               TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
-                               /* scsiAttIntrPortIndex */
-                               (TPG_TFO(se_tpg)->sess_get_index != NULL) ?
-                               TPG_TFO(se_tpg)->sess_get_index(se_sess) :
-                               0,
-                               /* scsiAttIntrPortAuthIntrIdx */
-                               se_nacl->acl_index,
-                               /* scsiAttIntrPortName */
-                               se_nacl->initiatorname[0] ?
-                                       se_nacl->initiatorname : NONE,
-                               /* scsiAttIntrPortIdentifier */
-                               buf);
-               }
-               spin_unlock_irq(&se_nacl->device_list_lock);
-
-               spin_lock(&se_tpg->session_lock);
-               atomic_dec(&se_nacl->mib_ref_count);
-               smp_mb__after_atomic_dec();
-               atomic_dec(&se_sess->mib_ref_count);
-               smp_mb__after_atomic_dec();
-       }
-       spin_unlock(&se_tpg->session_lock);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_att_intr_port_seq_ops = {
-       .start  = scsi_att_intr_port_seq_start,
-       .next   = scsi_att_intr_port_seq_next,
-       .stop   = scsi_att_intr_port_seq_stop,
-       .show   = scsi_att_intr_port_seq_show
-};
-
-static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_att_intr_port_seq_ops);
-}
-
-static const struct file_operations scsi_att_intr_port_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_att_intr_port_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Logical Unit Table
- */
-static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-#define SCSI_LU_INDEX          1
-static int scsi_lu_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       int j;
-       char str[28];
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
-               " dev_type status state-bit num_cmds read_mbytes"
-               " write_mbytes resets full_stat hs_num_cmds creation_time\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       /* Fix LU state, if we can read it from the device */
-       seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
-                       dev->dev_index, SCSI_LU_INDEX,
-                       (unsigned long long)0, /* FIXME: scsiLuDefaultLun */
-                       (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
-                       /* scsiLuWwnName */
-                       (char *)&DEV_T10_WWN(dev)->unit_serial[0] :
-                       "None");
-
-       memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
-       /* scsiLuVendorId */
-       for (j = 0; j < 8; j++)
-               str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
-                       DEV_T10_WWN(dev)->vendor[j] : 0x20;
-       str[8] = 0;
-       seq_printf(seq, " %s", str);
-
-       /* scsiLuProductId */
-       for (j = 0; j < 16; j++)
-               str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
-                       DEV_T10_WWN(dev)->model[j] : 0x20;
-       str[16] = 0;
-       seq_printf(seq, " %s", str);
-
-       /* scsiLuRevisionId */
-       for (j = 0; j < 4; j++)
-               str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
-                       DEV_T10_WWN(dev)->revision[j] : 0x20;
-       str[4] = 0;
-       seq_printf(seq, " %s", str);
-
-       seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
-               /* scsiLuPeripheralType */
-                  TRANSPORT(dev)->get_device_type(dev),
-                  (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
-               "available" : "notavailable", /* scsiLuStatus */
-               "exposed",      /* scsiLuState */
-               (unsigned long long)dev->num_cmds,
-               /* scsiLuReadMegaBytes */
-               (u32)(dev->read_bytes >> 20),
-               /* scsiLuWrittenMegaBytes */
-               (u32)(dev->write_bytes >> 20),
-               dev->num_resets, /* scsiLuInResets */
-               0, /* scsiLuOutTaskSetFullStatus */
-               0, /* scsiLuHSInCommands */
-               (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
-                                                       100 / HZ));
-
-       return 0;
-}
-
-static const struct seq_operations scsi_lu_seq_ops = {
-       .start  = scsi_lu_seq_start,
-       .next   = scsi_lu_seq_next,
-       .stop   = scsi_lu_seq_stop,
-       .show   = scsi_lu_seq_show
-};
-
-static int scsi_lu_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_lu_seq_ops);
-}
-
-static const struct file_operations scsi_lu_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_lu_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/****************************************************************************/
-
-/*
- * Remove proc fs entries
- */
-void remove_scsi_target_mib(void)
-{
-       remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_port", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
-       remove_proc_entry("scsi_target/mib", NULL);
-}
-
-/*
- * Create proc fs entries for the mib tables
- */
-int init_scsi_target_mib(void)
-{
-       struct proc_dir_entry *dir_entry;
-       struct proc_dir_entry *scsi_inst_entry;
-       struct proc_dir_entry *scsi_dev_entry;
-       struct proc_dir_entry *scsi_port_entry;
-       struct proc_dir_entry *scsi_transport_entry;
-       struct proc_dir_entry *scsi_tgt_dev_entry;
-       struct proc_dir_entry *scsi_tgt_port_entry;
-       struct proc_dir_entry *scsi_auth_intr_entry;
-       struct proc_dir_entry *scsi_att_intr_port_entry;
-       struct proc_dir_entry *scsi_lu_entry;
-
-       dir_entry = proc_mkdir("scsi_target/mib", NULL);
-       if (!(dir_entry)) {
-               printk(KERN_ERR "proc_mkdir() failed.\n");
-               return -1;
-       }
-
-       scsi_inst_entry =
-               create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
-       if (scsi_inst_entry)
-               scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
-       else
-               goto error;
-
-       scsi_dev_entry =
-               create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
-       if (scsi_dev_entry)
-               scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
-       else
-               goto error;
-
-       scsi_port_entry =
-               create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
-       if (scsi_port_entry)
-               scsi_port_entry->proc_fops = &scsi_port_seq_fops;
-       else
-               goto error;
-
-       scsi_transport_entry =
-               create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
-       if (scsi_transport_entry)
-               scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
-       else
-               goto error;
-
-       scsi_tgt_dev_entry =
-               create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
-       if (scsi_tgt_dev_entry)
-               scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
-       else
-               goto error;
-
-       scsi_tgt_port_entry =
-               create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
-       if (scsi_tgt_port_entry)
-               scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
-       else
-               goto error;
-
-       scsi_auth_intr_entry =
-               create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
-       if (scsi_auth_intr_entry)
-               scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
-       else
-               goto error;
-
-       scsi_att_intr_port_entry =
-             create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
-       if (scsi_att_intr_port_entry)
-               scsi_att_intr_port_entry->proc_fops =
-                               &scsi_att_intr_port_seq_fops;
-       else
-               goto error;
-
-       scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
-       if (scsi_lu_entry)
-               scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
-       else
-               goto error;
-
-       return 0;
-
-error:
-       printk(KERN_ERR "create_proc_entry() failed.\n");
-       remove_scsi_target_mib();
-       return -1;
-}
-
-/*
- * Initialize the index table for allocating unique row indexes to various mib
- * tables
- */
-void init_scsi_index_table(void)
-{
-       memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
-       spin_lock_init(&scsi_index_table.lock);
-}
-
-/*
- * Allocate a new row index for the entry type specified
- */
-u32 scsi_get_new_index(scsi_index_t type)
-{
-       u32 new_index;
-
-       if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
-               printk(KERN_ERR "Invalid index type %d\n", type);
-               return -1;
-       }
-
-       spin_lock(&scsi_index_table.lock);
-       new_index = ++scsi_index_table.scsi_mib_index[type];
-       if (new_index == 0)
-               new_index = ++scsi_index_table.scsi_mib_index[type];
-       spin_unlock(&scsi_index_table.lock);
-
-       return new_index;
-}
-EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
deleted file mode 100644 (file)
index 2772046..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef TARGET_CORE_MIB_H
-#define TARGET_CORE_MIB_H
-
-typedef enum {
-       SCSI_INST_INDEX,
-       SCSI_DEVICE_INDEX,
-       SCSI_AUTH_INTR_INDEX,
-       SCSI_INDEX_TYPE_MAX
-} scsi_index_t;
-
-struct scsi_index_table {
-       spinlock_t      lock;
-       u32             scsi_mib_index[SCSI_INDEX_TYPE_MAX];
-} ____cacheline_aligned;
-
-/* SCSI Port stats */
-struct scsi_port_stats {
-       u64     cmd_pdus;
-       u64     tx_data_octets;
-       u64     rx_data_octets;
-} ____cacheline_aligned;
-
-extern int init_scsi_target_mib(void);
-extern void remove_scsi_target_mib(void);
-extern void init_scsi_index_table(void);
-extern u32 scsi_get_new_index(scsi_index_t);
-
-#endif   /*** TARGET_CORE_MIB_H ***/
index 742d246..f2a0847 100644 (file)
@@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk(
         */
        bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
                                FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
-       if (!(bd)) {
-               printk("pSCSI: blkdev_get_by_path() failed\n");
+       if (IS_ERR(bd)) {
+               printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
                scsi_device_put(sd);
                return NULL;
        }
index abfa81a..c26f674 100644 (file)
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
        spin_lock_init(&acl->device_list_lock);
        spin_lock_init(&acl->nacl_sess_lock);
        atomic_set(&acl->acl_pr_ref_count, 0);
-       atomic_set(&acl->mib_ref_count, 0);
        acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
        acl->se_tpg = tpg;
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
                cpu_relax();
 }
 
-void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
-{
-       while (atomic_read(&nacl->mib_ref_count) != 0)
-               cpu_relax();
-}
-
 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
 {
        int i, ret;
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl(
        spin_unlock_bh(&tpg->session_lock);
 
        core_tpg_wait_for_nacl_pr_ref(acl);
-       core_tpg_wait_for_mib_ref(acl);
        core_clear_initiator_node_from_tpg(acl, tpg);
        core_free_device_list_for_node(acl, tpg);
 
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register);
 
 int core_tpg_deregister(struct se_portal_group *se_tpg)
 {
+       struct se_node_acl *nacl, *nacl_tmp;
+
        printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
                " for endpoint: %s Portal Tag %u\n",
                (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
 
        while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
                cpu_relax();
+       /*
+        * Release any remaining demo-mode generated se_node_acl that have
+        * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+        * in transport_deregister_session().
+        */
+       spin_lock_bh(&se_tpg->acl_node_lock);
+       list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+                       acl_list) {
+               list_del(&nacl->acl_list);
+               se_tpg->num_node_acls--;
+               spin_unlock_bh(&se_tpg->acl_node_lock);
+
+               core_tpg_wait_for_nacl_pr_ref(nacl);
+               core_free_device_list_for_node(nacl, se_tpg);
+               TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+
+               spin_lock_bh(&se_tpg->acl_node_lock);
+       }
+       spin_unlock_bh(&se_tpg->acl_node_lock);
 
        if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
                core_tpg_release_virtual_lun0(se_tpg);
index 28b6292..236e22d 100644 (file)
@@ -379,6 +379,40 @@ void release_se_global(void)
        se_global = NULL;
 }
 
+/* SCSI statistics table index */
+static struct scsi_index_table scsi_index_table;
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables.
+ */
+void init_scsi_index_table(void)
+{
+       memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+       spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+       u32 new_index;
+
+       if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+               printk(KERN_ERR "Invalid index type %d\n", type);
+               return -EINVAL;
+       }
+
+       spin_lock(&scsi_index_table.lock);
+       new_index = ++scsi_index_table.scsi_mib_index[type];
+       if (new_index == 0)
+               new_index = ++scsi_index_table.scsi_mib_index[type];
+       spin_unlock(&scsi_index_table.lock);
+
+       return new_index;
+}
+
 void transport_init_queue_obj(struct se_queue_obj *qobj)
 {
        atomic_set(&qobj->queue_cnt, 0);
@@ -437,7 +471,6 @@ struct se_session *transport_init_session(void)
        }
        INIT_LIST_HEAD(&se_sess->sess_list);
        INIT_LIST_HEAD(&se_sess->sess_acl_list);
-       atomic_set(&se_sess->mib_ref_count, 0);
 
        return se_sess;
 }
@@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess)
                transport_free_session(se_sess);
                return;
        }
-       /*
-        * Wait for possible reference in drivers/target/target_core_mib.c:
-        * scsi_att_intr_port_seq_show()
-        */
-       while (atomic_read(&se_sess->mib_ref_count) != 0)
-               cpu_relax();
 
        spin_lock_bh(&se_tpg->session_lock);
        list_del(&se_sess->sess_list);
@@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess)
                                spin_unlock_bh(&se_tpg->acl_node_lock);
 
                                core_tpg_wait_for_nacl_pr_ref(se_nacl);
-                               core_tpg_wait_for_mib_ref(se_nacl);
                                core_free_device_list_for_node(se_nacl, se_tpg);
                                TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
                                                se_nacl);
@@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map(
 
                return ret;
        }
+
+       BUG_ON(list_empty(se_mem_list));
        /*
         * This is the normal path for all normal non BIDI and BIDI-COMMAND
         * WRITE payloads..  If we need to do BIDI READ passthrough for
@@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
                struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
                u32 se_mem_cnt = 0, task_offset = 0;
 
-               BUG_ON(list_empty(cmd->t_task->t_mem_list));
+               if (!list_empty(T_TASK(cmd)->t_mem_list))
+                       se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
+                                       struct se_mem, se_list);
 
                ret = transport_do_se_mem_map(dev, task,
                                cmd->t_task->t_mem_list, NULL, se_mem,
index beb1afa..7b951ad 100644 (file)
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port)
        s->rts = 0;
 
        sprintf(b, "max3100-%d", s->minor);
-       s->workqueue = create_freezeable_workqueue(b);
+       s->workqueue = create_freezable_workqueue(b);
        if (!s->workqueue) {
                dev_warn(&s->spi->dev, "cannot create workqueue\n");
                return -EBUSY;
index 910870e..750b4f6 100644 (file)
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port)
        struct max3107_port *s = container_of(port, struct max3107_port, port);
 
        /* Initialize work queue */
-       s->workqueue = create_freezeable_workqueue("max3107");
+       s->workqueue = create_freezable_workqueue("max3107");
        if (!s->workqueue) {
                dev_err(&s->spi->dev, "Workqueue creation failed\n");
                return -EBUSY;
index db8c4c4..2417727 100644 (file)
@@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
 #ifdef CONFIG_PM_SLEEP
 static int xen_hvm_suspend(void *data)
 {
+       int err;
        struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
        int *cancelled = data;
 
        BUG_ON(!irqs_disabled());
 
+       err = sysdev_suspend(PMSG_SUSPEND);
+       if (err) {
+               printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
+                      err);
+               return err;
+       }
+
        *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
 
        xen_hvm_post_suspend(*cancelled);
@@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data)
                xen_timer_resume();
        }
 
+       sysdev_resume();
+
        return 0;
 }
 
index 333a7bb..4fb8a34 100644 (file)
@@ -1215,12 +1215,6 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
 
        res = __blkdev_get(bdev, mode, 0);
 
-       /* __blkdev_get() may alter read only status, check it afterwards */
-       if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
-               __blkdev_put(bdev, mode, 0);
-               res = -EACCES;
-       }
-
        if (whole) {
                /* finish claiming */
                mutex_lock(&bdev->bd_mutex);
@@ -1298,6 +1292,11 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
        if (err)
                return ERR_PTR(err);
 
+       if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
+               blkdev_put(bdev, mode);
+               return ERR_PTR(-EACCES);
+       }
+
        return bdev;
 }
 EXPORT_SYMBOL(blkdev_get_by_path);
index 08a8beb..7cd9a5a 100644 (file)
@@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void)
 #endif
 
        glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
-                                         WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+                                         WQ_HIGHPRI | WQ_FREEZABLE, 0);
        if (IS_ERR(glock_workqueue))
                return PTR_ERR(glock_workqueue);
        gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
-                                               WQ_MEM_RECLAIM | WQ_FREEZEABLE,
+                                               WQ_MEM_RECLAIM | WQ_FREEZABLE,
                                                0);
        if (IS_ERR(gfs2_delete_workqueue)) {
                destroy_workqueue(glock_workqueue);
index ebef7ab..85ba027 100644 (file)
@@ -144,7 +144,7 @@ static int __init init_gfs2_fs(void)
 
        error = -ENOMEM;
        gfs_recovery_wq = alloc_workqueue("gfs_recovery",
-                                         WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0);
+                                         WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
        if (!gfs_recovery_wq)
                goto fail_wq;
 
index 9e701e2..0087cf9 100644 (file)
@@ -795,7 +795,7 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p)
  * Without that kind of total limit, nasty chains of consecutive
  * symlinks can cause almost arbitrarily long lookups. 
  */
-static inline int do_follow_link(struct path *path, struct nameidata *nd)
+static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd)
 {
        void *cookie;
        int err = -ELOOP;
@@ -803,6 +803,7 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
        /* We drop rcu-walk here */
        if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry))
                return -ECHILD;
+       BUG_ON(inode != path->dentry->d_inode);
 
        if (current->link_count >= MAX_NESTED_LINKS)
                goto loop;
@@ -1413,8 +1414,7 @@ exec_again:
                        goto out_dput;
 
                if (inode->i_op->follow_link) {
-                       BUG_ON(inode != next.dentry->d_inode);
-                       err = do_follow_link(&next, nd);
+                       err = do_follow_link(inode, &next, nd);
                        if (err)
                                goto return_err;
                        nd->inode = nd->path.dentry->d_inode;
@@ -1458,8 +1458,7 @@ last_component:
                        break;
                if (inode && unlikely(inode->i_op->follow_link) &&
                    (lookup_flags & LOOKUP_FOLLOW)) {
-                       BUG_ON(inode != next.dentry->d_inode);
-                       err = do_follow_link(&next, nd);
+                       err = do_follow_link(inode, &next, nd);
                        if (err)
                                goto return_err;
                        nd->inode = nd->path.dentry->d_inode;
index 956629b..1275b86 100644 (file)
@@ -317,8 +317,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                READ_BUF(dummy32);
                len += (XDR_QUADLEN(dummy32) << 2);
                READMEM(buf, dummy32);
-               if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
-                       goto out_nfserr;
+               if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
+                       return status;
                iattr->ia_valid |= ATTR_UID;
        }
        if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
@@ -328,8 +328,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                READ_BUF(dummy32);
                len += (XDR_QUADLEN(dummy32) << 2);
                READMEM(buf, dummy32);
-               if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
-                       goto out_nfserr;
+               if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
+                       return status;
                iattr->ia_valid |= ATTR_GID;
        }
        if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
index 68d6a21..11f688b 100644 (file)
@@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len)
 
 int mac_partition(struct parsed_partitions *state)
 {
-       int slot = 1;
        Sector sect;
        unsigned char *data;
-       int blk, blocks_in_map;
+       int slot, blocks_in_map;
        unsigned secsize;
 #ifdef CONFIG_PPC_PMAC
        int found_root = 0;
@@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state)
                put_dev_sector(sect);
                return 0;               /* not a MacOS disk */
        }
-       strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
        blocks_in_map = be32_to_cpu(part->map_count);
-       for (blk = 1; blk <= blocks_in_map; ++blk) {
-               int pos = blk * secsize;
+       if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
+               put_dev_sector(sect);
+               return 0;
+       }
+       strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
+       for (slot = 1; slot <= blocks_in_map; ++slot) {
+               int pos = slot * secsize;
                put_dev_sector(sect);
                data = read_part_sector(state, pos/512, &sect);
                if (!data)
@@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state)
                        }
 
                        if (goodness > found_root_goodness) {
-                               found_root = blk;
+                               found_root = slot;
                                found_root_goodness = goodness;
                        }
                }
 #endif /* CONFIG_PPC_PMAC */
-
-               ++slot;
        }
 #ifdef CONFIG_PPC_PMAC
        if (found_root_goodness)
index da7e52b..1effc8b 100644 (file)
@@ -109,7 +109,7 @@ static inline void freezer_count(void)
 }
 
 /*
- * Check if the task should be counted as freezeable by the freezer
+ * Check if the task should be counted as freezable by the freezer
  */
 static inline int freezer_should_skip(struct task_struct *p)
 {
index 9a5f8a7..3a54266 100644 (file)
@@ -96,6 +96,11 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
  * in an undefined state.
  */
 #ifndef CONFIG_DEBUG_LIST
+static inline void __list_del_entry(struct list_head *entry)
+{
+       __list_del(entry->prev, entry->next);
+}
+
 static inline void list_del(struct list_head *entry)
 {
        __list_del(entry->prev, entry->next);
@@ -103,6 +108,7 @@ static inline void list_del(struct list_head *entry)
        entry->prev = LIST_POISON2;
 }
 #else
+extern void __list_del_entry(struct list_head *entry);
 extern void list_del(struct list_head *entry);
 #endif
 
@@ -135,7 +141,7 @@ static inline void list_replace_init(struct list_head *old,
  */
 static inline void list_del_init(struct list_head *entry)
 {
-       __list_del(entry->prev, entry->next);
+       __list_del_entry(entry);
        INIT_LIST_HEAD(entry);
 }
 
@@ -146,7 +152,7 @@ static inline void list_del_init(struct list_head *entry)
  */
 static inline void list_move(struct list_head *list, struct list_head *head)
 {
-       __list_del(list->prev, list->next);
+       __list_del_entry(list);
        list_add(list, head);
 }
 
@@ -158,7 +164,7 @@ static inline void list_move(struct list_head *list, struct list_head *head)
 static inline void list_move_tail(struct list_head *list,
                                  struct list_head *head)
 {
-       __list_del(list->prev, list->next);
+       __list_del_entry(list);
        list_add_tail(list, head);
 }
 
index a0b639f..89c3e51 100644 (file)
@@ -203,6 +203,18 @@ struct rtc_device
        struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
        int pie_enabled;
        struct work_struct irqwork;
+
+
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+       struct work_struct uie_task;
+       struct timer_list uie_timer;
+       /* Those fields are protected by rtc->irq_lock */
+       unsigned int oldsecs;
+       unsigned int uie_irq_active:1;
+       unsigned int stop_uie_polling:1;
+       unsigned int uie_task_active:1;
+       unsigned int uie_timer_active:1;
+#endif
 };
 #define to_rtc_device(d) container_of(d, struct rtc_device, dev)
 
@@ -235,7 +247,10 @@ extern int rtc_irq_set_freq(struct rtc_device *rtc,
                                struct rtc_task *task, int freq);
 extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled);
 extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled);
+extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
+                                               unsigned int enabled);
 
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode);
 void rtc_aie_update_irq(void *private);
 void rtc_uie_update_irq(void *private);
 enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
index d747f94..777d8a5 100644 (file)
@@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
 #define PF_MEMPOLICY   0x10000000      /* Non-default NUMA mempolicy */
 #define PF_MUTEX_TESTER        0x20000000      /* Thread belongs to the rt mutex tester */
-#define PF_FREEZER_SKIP        0x40000000      /* Freezer should not count it as freezeable */
+#define PF_FREEZER_SKIP        0x40000000      /* Freezer should not count it as freezable */
 #define PF_FREEZER_NOSIG 0x80000000    /* Freezer won't send signals to it */
 
 /*
index 1ac1158..f7998a3 100644 (file)
@@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
 enum {
        WQ_NON_REENTRANT        = 1 << 0, /* guarantee non-reentrance */
        WQ_UNBOUND              = 1 << 1, /* not bound to any cpu */
-       WQ_FREEZEABLE           = 1 << 2, /* freeze during suspend */
+       WQ_FREEZABLE            = 1 << 2, /* freeze during suspend */
        WQ_MEM_RECLAIM          = 1 << 3, /* may be used for memory reclaim */
        WQ_HIGHPRI              = 1 << 4, /* high priority */
        WQ_CPU_INTENSIVE        = 1 << 5, /* cpu instensive workqueue */
@@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
 /**
  * alloc_ordered_workqueue - allocate an ordered workqueue
  * @name: name of the workqueue
- * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
  *
  * Allocate an ordered workqueue.  An ordered workqueue executes at
  * most one work item at any given time in the queued order.  They are
@@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags)
 
 #define create_workqueue(name)                                 \
        alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
-#define create_freezeable_workqueue(name)                      \
-       alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
+#define create_freezable_workqueue(name)                       \
+       alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 #define create_singlethread_workqueue(name)                    \
        alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 
index 07fdfb6..0828b6c 100644 (file)
@@ -8,7 +8,6 @@
 #include <scsi/scsi_cmnd.h>
 #include <net/sock.h>
 #include <net/tcp.h>
-#include "target_core_mib.h"
 
 #define TARGET_CORE_MOD_VERSION                "v4.0.0-rc6"
 #define SHUTDOWN_SIGS  (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
@@ -195,6 +194,21 @@ typedef enum {
        SAM_TASK_ATTR_EMULATED
 } t10_task_attr_index_t;
 
+/*
+ * Used for target SCSI statistics
+ */
+typedef enum {
+       SCSI_INST_INDEX,
+       SCSI_DEVICE_INDEX,
+       SCSI_AUTH_INTR_INDEX,
+       SCSI_INDEX_TYPE_MAX
+} scsi_index_t;
+
+struct scsi_index_table {
+       spinlock_t      lock;
+       u32             scsi_mib_index[SCSI_INDEX_TYPE_MAX];
+} ____cacheline_aligned;
+
 struct se_cmd;
 
 struct t10_alua {
@@ -578,8 +592,6 @@ struct se_node_acl {
        spinlock_t              stats_lock;
        /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
        atomic_t                acl_pr_ref_count;
-       /* Used for MIB access */
-       atomic_t                mib_ref_count;
        struct se_dev_entry     *device_list;
        struct se_session       *nacl_sess;
        struct se_portal_group *se_tpg;
@@ -595,8 +607,6 @@ struct se_node_acl {
 } ____cacheline_aligned;
 
 struct se_session {
-       /* Used for MIB access */
-       atomic_t                mib_ref_count;
        u64                     sess_bin_isid;
        struct se_node_acl      *se_node_acl;
        struct se_portal_group *se_tpg;
@@ -806,7 +816,6 @@ struct se_hba {
        /* Virtual iSCSI devices attached. */
        u32                     dev_count;
        u32                     hba_index;
-       atomic_t                dev_mib_access_count;
        atomic_t                load_balance_queue;
        atomic_t                left_queue_depth;
        /* Maximum queue depth the HBA can handle. */
@@ -845,6 +854,12 @@ struct se_lun {
 
 #define SE_LUN(c)              ((struct se_lun *)(c)->se_lun)
 
+struct scsi_port_stats {
+       u64     cmd_pdus;
+       u64     tx_data_octets;
+       u64     rx_data_octets;
+} ____cacheline_aligned;
+
 struct se_port {
        /* RELATIVE TARGET PORT IDENTIFER */
        u16             sep_rtpi;
@@ -867,6 +882,7 @@ struct se_port {
 } ____cacheline_aligned;
 
 struct se_tpg_np {
+       struct se_portal_group *tpg_np_parent;
        struct config_group     tpg_np_group;
 } ____cacheline_aligned;
 
index 66f44e5..2469405 100644 (file)
@@ -111,6 +111,8 @@ struct se_subsystem_api;
 
 extern int init_se_global(void);
 extern void release_se_global(void);
+extern void init_scsi_index_table(void);
+extern u32 scsi_get_new_index(scsi_index_t);
 extern void transport_init_queue_obj(struct se_queue_obj *);
 extern int transport_subsystem_check_init(void);
 extern int transport_subsystem_register(struct se_subsystem_api *);
index 7b5db6a..7018530 100644 (file)
@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq);
 
 static int __init pm_start_workqueue(void)
 {
-       pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0);
+       pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
 
        return pm_wq ? 0 : -ENOMEM;
 }
index d6d2a10..0cf3a27 100644 (file)
@@ -22,7 +22,7 @@
  */
 #define TIMEOUT        (20 * HZ)
 
-static inline int freezeable(struct task_struct * p)
+static inline int freezable(struct task_struct * p)
 {
        if ((p == current) ||
            (p->flags & PF_NOFREEZE) ||
@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only)
                todo = 0;
                read_lock(&tasklist_lock);
                do_each_thread(g, p) {
-                       if (frozen(p) || !freezeable(p))
+                       if (frozen(p) || !freezable(p))
                                continue;
 
                        if (!freeze_task(p, sig_only))
@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only)
 
        read_lock(&tasklist_lock);
        do_each_thread(g, p) {
-               if (!freezeable(p))
+               if (!freezable(p))
                        continue;
 
                if (nosig_only && should_send_signal(p))
index 0dac75e..64db648 100644 (file)
@@ -1519,11 +1519,8 @@ static int
 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
                unsigned int nr_pages, unsigned int nr_highmem)
 {
-       int error = 0;
-
        if (nr_highmem > 0) {
-               error = get_highmem_buffer(PG_ANY);
-               if (error)
+               if (get_highmem_buffer(PG_ANY))
                        goto err_out;
                if (nr_highmem > alloc_highmem) {
                        nr_highmem -= alloc_highmem;
@@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
 
  err_out:
        swsusp_free();
-       return error;
+       return -ENOMEM;
 }
 
 asmlinkage int swsusp_save(void)
index 11869fa..ee6578b 100644 (file)
@@ -79,7 +79,9 @@ enum {
        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
 
-       MAYDAY_INITIAL_TIMEOUT  = HZ / 100,     /* call for help after 10ms */
+       MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
+                                               /* call for help after 10ms
+                                                  (min two ticks) */
        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
        TRUSTEE_COOLDOWN        = HZ / 10,      /* for trustee draining */
@@ -2047,6 +2049,15 @@ repeat:
                                move_linked_works(work, scheduled, &n);
 
                process_scheduled_works(rescuer);
+
+               /*
+                * Leave this gcwq.  If keep_working() is %true, notify a
+                * regular worker; otherwise, we end up with 0 concurrency
+                * and stalling the execution.
+                */
+               if (keep_working(gcwq))
+                       wake_up_worker(gcwq);
+
                spin_unlock_irq(&gcwq->lock);
        }
 
@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
         */
        spin_lock(&workqueue_lock);
 
-       if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
+       if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
                for_each_cwq_cpu(cpu, wq)
                        get_cwq(cpu, wq)->max_active = 0;
 
@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 
                spin_lock_irq(&gcwq->lock);
 
-               if (!(wq->flags & WQ_FREEZEABLE) ||
+               if (!(wq->flags & WQ_FREEZABLE) ||
                    !(gcwq->flags & GCWQ_FREEZING))
                        get_cwq(gcwq->cpu, wq)->max_active = max_active;
 
@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
         * want to get it over with ASAP - spam rescuers, wake up as
         * many idlers as necessary and create new ones till the
         * worklist is empty.  Note that if the gcwq is frozen, there
-        * may be frozen works in freezeable cwqs.  Don't declare
+        * may be frozen works in freezable cwqs.  Don't declare
         * completion while frozen.
         */
        while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
 /**
  * freeze_workqueues_begin - begin freezing workqueues
  *
- * Start freezing workqueues.  After this function returns, all
- * freezeable workqueues will queue new works to their frozen_works
- * list instead of gcwq->worklist.
+ * Start freezing workqueues.  After this function returns, all freezable
+ * workqueues will queue new works to their frozen_works list instead of
+ * gcwq->worklist.
  *
  * CONTEXT:
  * Grabs and releases workqueue_lock and gcwq->lock's.
@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (cwq && wq->flags & WQ_FREEZEABLE)
+                       if (cwq && wq->flags & WQ_FREEZABLE)
                                cwq->max_active = 0;
                }
 
@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void)
 }
 
 /**
- * freeze_workqueues_busy - are freezeable workqueues still busy?
+ * freeze_workqueues_busy - are freezable workqueues still busy?
  *
  * Check whether freezing is complete.  This function must be called
  * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void)
  * Grabs and releases workqueue_lock.
  *
  * RETURNS:
- * %true if some freezeable workqueues are still busy.  %false if
- * freezing is complete.
+ * %true if some freezable workqueues are still busy.  %false if freezing
+ * is complete.
  */
 bool freeze_workqueues_busy(void)
 {
@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (!cwq || !(wq->flags & WQ_FREEZEABLE))
+                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
                        BUG_ON(cwq->nr_active < 0);
@@ -3690,7 +3701,7 @@ void thaw_workqueues(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (!cwq || !(wq->flags & WQ_FREEZEABLE))
+                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
                        /* restore max_active and repopulate worklist */
index 344c710..b8029a5 100644 (file)
@@ -35,6 +35,31 @@ void __list_add(struct list_head *new,
 }
 EXPORT_SYMBOL(__list_add);
 
+void __list_del_entry(struct list_head *entry)
+{
+       struct list_head *prev, *next;
+
+       prev = entry->prev;
+       next = entry->next;
+
+       if (WARN(next == LIST_POISON1,
+               "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+               entry, LIST_POISON1) ||
+           WARN(prev == LIST_POISON2,
+               "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+               entry, LIST_POISON2) ||
+           WARN(prev->next != entry,
+               "list_del corruption. prev->next should be %p, "
+               "but was %p\n", entry, prev->next) ||
+           WARN(next->prev != entry,
+               "list_del corruption. next->prev should be %p, "
+               "but was %p\n", entry, next->prev))
+               return;
+
+       __list_del(prev, next);
+}
+EXPORT_SYMBOL(__list_del_entry);
+
 /**
  * list_del - deletes entry from list.
  * @entry: the element to delete from the list.
@@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add);
  */
 void list_del(struct list_head *entry)
 {
-       WARN(entry->next == LIST_POISON1,
-               "list_del corruption, next is LIST_POISON1 (%p)\n",
-               LIST_POISON1);
-       WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2,
-               "list_del corruption, prev is LIST_POISON2 (%p)\n",
-               LIST_POISON2);
-       WARN(entry->prev->next != entry,
-               "list_del corruption. prev->next should be %p, "
-               "but was %p\n", entry, entry->prev->next);
-       WARN(entry->next->prev != entry,
-               "list_del corruption. next->prev should be %p, "
-               "but was %p\n", entry, entry->next->prev);
-       __list_del(entry->prev, entry->next);
+       __list_del_entry(entry);
        entry->next = LIST_POISON1;
        entry->prev = LIST_POISON2;
 }
index 7550abb..675614e 100644 (file)
@@ -859,6 +859,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
                                result = L2CAP_CR_SEC_BLOCK;
                        else
                                result = L2CAP_CR_BAD_PSM;
+                       sk->sk_state = BT_DISCONN;
 
                        rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
                        rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
index 6f6d8e1..88e4aa9 100644 (file)
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
        if (is_multicast_ether_addr(dest)) {
                mdst = br_mdb_get(br, skb);
                if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
-                       if ((mdst && !hlist_unhashed(&mdst->mglist)) ||
+                       if ((mdst && mdst->mglist) ||
                            br_multicast_is_router(br))
                                skb2 = skb;
                        br_multicast_forward(mdst, skb, skb2);
index f701a21..09d5c09 100644 (file)
@@ -232,8 +232,7 @@ static void br_multicast_group_expired(unsigned long data)
        if (!netif_running(br->dev) || timer_pending(&mp->timer))
                goto out;
 
-       if (!hlist_unhashed(&mp->mglist))
-               hlist_del_init(&mp->mglist);
+       mp->mglist = false;
 
        if (mp->ports)
                goto out;
@@ -276,7 +275,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
                del_timer(&p->query_timer);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
-               if (!mp->ports && hlist_unhashed(&mp->mglist) &&
+               if (!mp->ports && !mp->mglist &&
                    netif_running(br->dev))
                        mod_timer(&mp->timer, jiffies);
 
@@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data)
        struct net_bridge *br = mp->br;
 
        spin_lock(&br->multicast_lock);
-       if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) ||
+       if (!netif_running(br->dev) || !mp->mglist ||
            mp->queries_sent >= br->multicast_last_member_count)
                goto out;
 
@@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br,
                goto err;
 
        if (!port) {
-               hlist_add_head(&mp->mglist, &br->mglist);
+               mp->mglist = true;
                mod_timer(&mp->timer, now + br->multicast_membership_interval);
                goto out;
        }
@@ -1165,7 +1164,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
 
        max_delay *= br->multicast_last_member_count;
 
-       if (!hlist_unhashed(&mp->mglist) &&
+       if (mp->mglist &&
            (timer_pending(&mp->timer) ?
             time_after(mp->timer.expires, now + max_delay) :
             try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1177,7 +1176,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
                if (timer_pending(&p->timer) ?
                    time_after(p->timer.expires, now + max_delay) :
                    try_to_del_timer_sync(&p->timer) >= 0)
-                       mod_timer(&mp->timer, now + max_delay);
+                       mod_timer(&p->timer, now + max_delay);
        }
 
 out:
@@ -1236,7 +1235,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
                goto out;
 
        max_delay *= br->multicast_last_member_count;
-       if (!hlist_unhashed(&mp->mglist) &&
+       if (mp->mglist &&
            (timer_pending(&mp->timer) ?
             time_after(mp->timer.expires, now + max_delay) :
             try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1248,7 +1247,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
                if (timer_pending(&p->timer) ?
                    time_after(p->timer.expires, now + max_delay) :
                    try_to_del_timer_sync(&p->timer) >= 0)
-                       mod_timer(&mp->timer, now + max_delay);
+                       mod_timer(&p->timer, now + max_delay);
        }
 
 out:
@@ -1283,7 +1282,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
                     br->multicast_last_member_interval;
 
        if (!port) {
-               if (!hlist_unhashed(&mp->mglist) &&
+               if (mp->mglist &&
                    (timer_pending(&mp->timer) ?
                     time_after(mp->timer.expires, time) :
                     try_to_del_timer_sync(&mp->timer) >= 0)) {
index 84aac77..4e1b620 100644 (file)
@@ -84,13 +84,13 @@ struct net_bridge_port_group {
 struct net_bridge_mdb_entry
 {
        struct hlist_node               hlist[2];
-       struct hlist_node               mglist;
        struct net_bridge               *br;
        struct net_bridge_port_group __rcu *ports;
        struct rcu_head                 rcu;
        struct timer_list               timer;
        struct timer_list               query_timer;
        struct br_ip                    addr;
+       bool                            mglist;
        u32                             queries_sent;
 };
 
@@ -238,7 +238,6 @@ struct net_bridge
        spinlock_t                      multicast_lock;
        struct net_bridge_mdb_htable __rcu *mdb;
        struct hlist_head               router_list;
-       struct hlist_head               mglist;
 
        struct timer_list               multicast_router_timer;
        struct timer_list               multicast_querier_timer;
index 8e726cb..8ae6631 100644 (file)
@@ -1280,10 +1280,13 @@ static int __dev_close_many(struct list_head *head)
 
 static int __dev_close(struct net_device *dev)
 {
+       int retval;
        LIST_HEAD(single);
 
        list_add(&dev->unreg_list, &single);
-       return __dev_close_many(&single);
+       retval = __dev_close_many(&single);
+       list_del(&single);
+       return retval;
 }
 
 int dev_close_many(struct list_head *head)
@@ -1325,7 +1328,7 @@ int dev_close(struct net_device *dev)
 
        list_add(&dev->unreg_list, &single);
        dev_close_many(&single);
-
+       list_del(&single);
        return 0;
 }
 EXPORT_SYMBOL(dev_close);
@@ -5063,6 +5066,7 @@ static void rollback_registered(struct net_device *dev)
 
        list_add(&dev->unreg_list, &single);
        rollback_registered_many(&single);
+       list_del(&single);
 }
 
 unsigned long netdev_fix_features(unsigned long features, const char *name)
@@ -6216,6 +6220,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
                }
        }
        unregister_netdevice_many(&dev_kill_list);
+       list_del(&dev_kill_list);
        rtnl_unlock();
 }
 
index 6b03f56..d5074a5 100644 (file)
@@ -626,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
        dcb->cmd = DCB_CMD_GAPP;
 
        app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
+       if (!app_nest)
+               goto out_cancel;
+
        ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
        if (ret)
                goto out_cancel;
@@ -1613,6 +1616,10 @@ EXPORT_SYMBOL(dcb_getapp);
 u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
 {
        struct dcb_app_type *itr;
+       struct dcb_app_type event;
+
+       memcpy(&event.name, dev->name, sizeof(event.name));
+       memcpy(&event.app, new, sizeof(event.app));
 
        spin_lock(&dcb_lock);
        /* Search for existing match and replace */
@@ -1644,7 +1651,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
        }
 out:
        spin_unlock(&dcb_lock);
-       call_dcbevent_notifiers(DCB_APP_EVENT, new);
+       call_dcbevent_notifiers(DCB_APP_EVENT, &event);
        return 0;
 }
 EXPORT_SYMBOL(dcb_setapp);
index 748cb5b..df4616f 100644 (file)
@@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu)
        return mtu >= 68;
 }
 
+static void inetdev_send_gratuitous_arp(struct net_device *dev,
+                                       struct in_device *in_dev)
+
+{
+       struct in_ifaddr *ifa = in_dev->ifa_list;
+
+       if (!ifa)
+               return;
+
+       arp_send(ARPOP_REQUEST, ETH_P_ARP,
+                ifa->ifa_address, dev,
+                ifa->ifa_address, NULL,
+                dev->dev_addr, NULL);
+}
+
 /* Called only under RTNL semaphore */
 
 static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
                }
                ip_mc_up(in_dev);
                /* fall through */
-       case NETDEV_NOTIFY_PEERS:
        case NETDEV_CHANGEADDR:
+               if (!IN_DEV_ARP_NOTIFY(in_dev))
+                       break;
+               /* fall through */
+       case NETDEV_NOTIFY_PEERS:
                /* Send gratuitous ARP to notify of link change */
-               if (IN_DEV_ARP_NOTIFY(in_dev)) {
-                       struct in_ifaddr *ifa = in_dev->ifa_list;
-
-                       if (ifa)
-                               arp_send(ARPOP_REQUEST, ETH_P_ARP,
-                                        ifa->ifa_address, dev,
-                                        ifa->ifa_address, NULL,
-                                        dev->dev_addr, NULL);
-               }
+               inetdev_send_gratuitous_arp(dev, in_dev);
                break;
        case NETDEV_DOWN:
                ip_mc_down(in_dev);
index eb68a0e..6613edf 100644 (file)
@@ -775,6 +775,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        .fl4_dst = dst,
                        .fl4_src = tiph->saddr,
                        .fl4_tos = RT_TOS(tos),
+                       .proto = IPPROTO_GRE,
                        .fl_gre_key = tunnel->parms.o_key
                };
                if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
index 788a3e7..6ed6603 100644 (file)
@@ -2722,6 +2722,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
        .destroy                =       ipv4_dst_destroy,
        .check                  =       ipv4_blackhole_dst_check,
        .default_mtu            =       ipv4_blackhole_default_mtu,
+       .default_advmss         =       ipv4_default_advmss,
        .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
 };
 
index 1c29f95..a998db6 100644 (file)
@@ -128,6 +128,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
        .destroy                =       ip6_dst_destroy,
        .check                  =       ip6_dst_check,
        .default_mtu            =       ip6_blackhole_default_mtu,
+       .default_advmss         =       ip6_default_advmss,
        .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
 };
 
index cf68700..d036597 100644 (file)
@@ -1210,7 +1210,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_STATION:
                        changed |= BSS_CHANGED_ASSOC;
+                       mutex_lock(&sdata->u.mgd.mtx);
                        ieee80211_bss_info_change_notify(sdata, changed);
+                       mutex_unlock(&sdata->u.mgd.mtx);
                        break;
                case NL80211_IFTYPE_ADHOC:
                        changed |= BSS_CHANGED_IBSS;
index 32fcbe2..4aa614b 100644 (file)
@@ -133,6 +133,7 @@ unsigned int nf_iterate(struct list_head *head,
 
                /* Optimization: we don't need to hold module
                   reference here, since function can't sleep. --RR */
+repeat:
                verdict = elem->hook(hook, skb, indev, outdev, okfn);
                if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
@@ -145,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head,
 #endif
                        if (verdict != NF_REPEAT)
                                return verdict;
-                       *i = (*i)->prev;
+                       goto repeat;
                }
        }
        return NF_ACCEPT;
index 8b3ef40..6459588 100644 (file)
@@ -1340,10 +1340,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
        default:
                BUG();
        }
-       xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
+       xdst = dst_alloc(dst_ops);
        xfrm_policy_put_afinfo(afinfo);
 
-       xdst->flo.ops = &xfrm_bundle_fc_ops;
+       if (likely(xdst))
+               xdst->flo.ops = &xfrm_bundle_fc_ops;
+       else
+               xdst = ERR_PTR(-ENOBUFS);
 
        return xdst;
 }