Optional properties:
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
- mac-address : See ethernet.txt file in the same directory
+- phy-handle : See ethernet.txt file in the same directory
Note: "ti,hwmods" field is used to fetch the base address and irq
resources from TI, omap hwmod data base during device registration.
--- /dev/null
+SMSC LAN87xx Ethernet PHY
+
+Some boards require special tuning values. Configure them
+through an Ethernet OF device node.
+
+Optional properties:
+
+- smsc,disable-energy-detect:
+ If set, do not enable energy detect mode for the SMSC phy.
+ default: enable energy detect mode
+
+Examples:
+smsc phy with disabled energy detect mode on an am335x based board.
+&davinci_mdio {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&davinci_mdio_default>;
+ pinctrl-1 = <&davinci_mdio_sleep>;
+ status = "okay";
+
+ ethernetphy0: ethernet-phy@0 {
+ reg = <0>;
+ smsc,disable-energy-detect;
+ };
+};
--- /dev/null
+Broadcom BCM2835 auxiliar SPI1/2 controller
+
+The BCM2835 contains two forms of SPI master controller, one known simply as
+SPI0, and the other known as the "Universal SPI Master"; part of the
+auxiliary block. This binding applies to the SPI1/2 controller.
+
+Required properties:
+- compatible: Should be "brcm,bcm2835-aux-spi".
+- reg: Should contain register location and length for the spi block
+- interrupts: Should contain shared interrupt of the aux block
+- clocks: The clock feeding the SPI controller - needs to
+ point to the auxiliar clock driver of the bcm2835,
+ as this clock will enable the output gate for the specific
+ clock.
+- cs-gpios: the cs-gpios (native cs is NOT supported)
+ see also spi-bus.txt
+
+Example:
+
+spi1@7e215080 {
+ compatible = "brcm,bcm2835-aux-spi";
+ reg = <0x7e215080 0x40>;
+ interrupts = <1 29>;
+ clocks = <&aux_clocks BCM2835_AUX_CLOCK_SPI1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cs-gpios = <&gpio 18>, <&gpio 17>, <&gpio 16>;
+};
+
+spi2@7e2150c0 {
+ compatible = "brcm,bcm2835-aux-spi";
+ reg = <0x7e2150c0 0x40>;
+ interrupts = <1 29>;
+ clocks = <&aux_clocks BCM2835_AUX_CLOCK_SPI2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cs-gpios = <&gpio 43>, <&gpio 44>, <&gpio 45>;
+};
S: Maintained
F: drivers/net/ethernet/freescale/ucc_geth*
+FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
+M: Claudiu Manoil <claudiu.manoil@freescale.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/freescale/gianfar*
+X: drivers/net/ethernet/freescale/gianfar_ptp.c
+F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+
FREESCALE QUICC ENGINE UCC UART DRIVER
M: Timur Tabi <timur@tabi.org>
L: linuxppc-dev@lists.ozlabs.org
VERSION = 4
PATCHLEVEL = 3
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Blurry Fish Butt
# *DOCUMENTATION*
config ARCH_RPC
bool "RiscPC"
+ depends on MMU
select ARCH_ACORN
select ARCH_MAY_HAVE_PC_FDC
select ARCH_SPARSEMEM_ENABLE
button@1 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
label = "DSW2-1";
linux,code = <KEY_1>;
gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
};
button@2 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
label = "DSW2-2";
linux,code = <KEY_2>;
gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
};
button@3 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
label = "DSW2-3";
linux,code = <KEY_3>;
gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
};
button@4 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
label = "DSW2-4";
linux,code = <KEY_4>;
gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>;
button@1 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <2>;
label = "userpb";
gpios = <&gpio1 0 0x4>;
};
button@2 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <3>;
label = "extkb1";
gpios = <&gpio4 23 0x4>;
};
button@3 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <4>;
label = "extkb2";
gpios = <&gpio4 24 0x4>;
};
button@4 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <5>;
label = "extkb3";
gpios = <&gpio5 1 0x4>;
};
button@5 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <6>;
label = "extkb4";
gpios = <&gpio5 2 0x4>;
* it does.
*/
-#include <byteswap.h>
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/types.h>
#include <unistd.h>
+#define swab16(x) \
+ ((((x) & 0x00ff) << 8) | \
+ (((x) & 0xff00) >> 8))
+
+#define swab32(x) \
+ ((((x) & 0x000000ff) << 24) | \
+ (((x) & 0x0000ff00) << 8) | \
+ (((x) & 0x00ff0000) >> 8) | \
+ (((x) & 0xff000000) << 24))
+
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define HOST_ORDER ELFDATA2LSB
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
static Elf32_Word read_elf_word(Elf32_Word word, bool swap)
{
- return swap ? bswap_32(word) : word;
+ return swap ? swab32(word) : word;
}
static Elf32_Half read_elf_half(Elf32_Half half, bool swap)
{
- return swap ? bswap_16(half) : half;
+ return swap ? swab16(half) : half;
}
static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap)
{
- *dst = swap ? bswap_32(val) : val;
+ *dst = swap ? swab32(val) : val;
}
int main(int argc, char **argv)
button@1 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <116>;
label = "POWER";
gpios = <&iofpga_gpio0 0 0x4>;
};
button@2 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <102>;
label = "HOME";
gpios = <&iofpga_gpio0 1 0x4>;
};
button@3 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <152>;
label = "RLOCK";
gpios = <&iofpga_gpio0 2 0x4>;
};
button@4 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <115>;
label = "VOL+";
gpios = <&iofpga_gpio0 3 0x4>;
};
button@5 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <114>;
label = "VOL-";
gpios = <&iofpga_gpio0 4 0x4>;
};
button@6 {
debounce_interval = <50>;
- wakeup = <1>;
+ wakeup-source;
linux,code = <99>;
label = "NMI";
gpios = <&iofpga_gpio0 5 0x4>;
__asm__ __volatile__( \
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
- " mov %w2, %w1\n" \
- "0: ldxr"B" %w1, [%3]\n" \
- "1: stxr"B" %w0, %w2, [%3]\n" \
+ "0: ldxr"B" %w2, [%3]\n" \
+ "1: stxr"B" %w0, %w1, [%3]\n" \
" cbz %w0, 2f\n" \
" mov %w0, %w4\n" \
+ " b 3f\n" \
"2:\n" \
+ " mov %w1, %w2\n" \
+ "3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
- "3: mov %w0, %w5\n" \
- " b 2b\n" \
+ "4: mov %w0, %w5\n" \
+ " b 3b\n" \
" .popsection" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
- " .quad 0b, 3b\n" \
- " .quad 1b, 3b\n" \
+ " .quad 0b, 4b\n" \
+ " .quad 1b, 4b\n" \
" .popsection\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
unsigned long kernel_size, kernel_memsize = 0;
unsigned long nr_pages;
void *old_image_addr = (void *)*image_addr;
+ unsigned long preferred_offset;
+
+ /*
+ * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
+ * a 2 MB aligned base, which itself may be lower than dram_base, as
+ * long as the resulting offset equals or exceeds it.
+ */
+ preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
+ if (preferred_offset < dram_base)
+ preferred_offset += SZ_2M;
/* Relocate the image, if required. */
kernel_size = _edata - _text;
- if (*image_addr != (dram_base + TEXT_OFFSET)) {
+ if (*image_addr != preferred_offset) {
kernel_memsize = kernel_size + (_end - _edata);
/*
* Mustang), we can still place the kernel at the address
* 'dram_base + TEXT_OFFSET'.
*/
- *image_addr = *reserve_addr = dram_base + TEXT_OFFSET;
+ *image_addr = *reserve_addr = preferred_offset;
nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
EFI_PAGE_SIZE;
status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp);
- /*
- * -4 here because we care about the PC at time of bl,
- * not where the return will go.
- */
- frame->pc = *(unsigned long *)(fp + 8) - 4;
+ frame->pc = *(unsigned long *)(fp + 8);
return 0;
}
if (ret == 0) {
/*
* We are resuming from reset with TTBR0_EL1 set to the
- * idmap to enable the MMU; restore the active_mm mappings in
- * TTBR0_EL1 unless the active_mm == &init_mm, in which case
- * the thread entered cpu_suspend with TTBR0_EL1 set to
- * reserved TTBR0 page tables and should be restored as such.
+ * idmap to enable the MMU; set the TTBR0 to the reserved
+ * page tables to prevent speculative TLB allocations, flush
+ * the local tlb and set the default tcr_el1.t0sz so that
+ * the TTBR0 address space set-up is properly restored.
+ * If the current active_mm != &init_mm we entered cpu_suspend
+ * with mappings in TTBR0 that must be restored, so we switch
+ * them back to complete the address space configuration
+ * restoration before returning.
*/
- if (mm == &init_mm)
- cpu_set_reserved_ttbr0();
- else
- cpu_switch_mm(mm->pgd, mm);
-
+ cpu_set_reserved_ttbr0();
flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
+
+ if (mm != &init_mm)
+ cpu_switch_mm(mm->pgd, mm);
/*
* Restore per-cpu offset before any kernel
-#define NR_syscalls 321 /* length of syscall table */
+#define NR_syscalls 322 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
#define __NR_execveat 1342
#define __NR_userfaultfd 1343
#define __NR_membarrier 1344
+#define __NR_kcmp 1345
#endif /* _UAPI_ASM_IA64_UNISTD_H */
data8 sys_execveat
data8 sys_userfaultfd
data8 sys_membarrier
+ data8 sys_kcmp // 1345
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#include <bcm63xx_dev_spi.h>
#include <bcm63xx_regs.h>
-/*
- * register offsets
- */
-static const unsigned long bcm6348_regs_spi[] = {
- __GEN_SPI_REGS_TABLE(6348)
-};
-
-static const unsigned long bcm6358_regs_spi[] = {
- __GEN_SPI_REGS_TABLE(6358)
-};
-
-const unsigned long *bcm63xx_regs_spi;
-EXPORT_SYMBOL(bcm63xx_regs_spi);
-
-static __init void bcm63xx_spi_regs_init(void)
-{
- if (BCMCPU_IS_6338() || BCMCPU_IS_6348())
- bcm63xx_regs_spi = bcm6348_regs_spi;
- if (BCMCPU_IS_3368() || BCMCPU_IS_6358() ||
- BCMCPU_IS_6362() || BCMCPU_IS_6368())
- bcm63xx_regs_spi = bcm6358_regs_spi;
-}
-
static struct resource spi_resources[] = {
{
.start = -1, /* filled at runtime */
},
};
-static struct bcm63xx_spi_pdata spi_pdata = {
- .bus_num = 0,
- .num_chipselect = 8,
-};
-
static struct platform_device bcm63xx_spi_device = {
- .name = "bcm63xx-spi",
.id = -1,
.num_resources = ARRAY_SIZE(spi_resources),
.resource = spi_resources,
- .dev = {
- .platform_data = &spi_pdata,
- },
};
int __init bcm63xx_spi_register(void)
spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
+ bcm63xx_spi_device.name = "bcm6348-spi",
spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
- spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE;
- spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT;
- spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH;
}
if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6362() ||
BCMCPU_IS_6368()) {
+ bcm63xx_spi_device.name = "bcm6358-spi",
spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
- spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
- spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
- spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH;
}
- bcm63xx_spi_regs_init();
-
return platform_device_register(&bcm63xx_spi_device);
}
int __init bcm63xx_spi_register(void);
-struct bcm63xx_spi_pdata {
- unsigned int fifo_size;
- unsigned int msg_type_shift;
- unsigned int msg_ctl_width;
- int bus_num;
- int num_chipselect;
-};
-
-enum bcm63xx_regs_spi {
- SPI_CMD,
- SPI_INT_STATUS,
- SPI_INT_MASK_ST,
- SPI_INT_MASK,
- SPI_ST,
- SPI_CLK_CFG,
- SPI_FILL_BYTE,
- SPI_MSG_TAIL,
- SPI_RX_TAIL,
- SPI_MSG_CTL,
- SPI_MSG_DATA,
- SPI_RX_DATA,
-};
-
-#define __GEN_SPI_REGS_TABLE(__cpu) \
- [SPI_CMD] = SPI_## __cpu ##_CMD, \
- [SPI_INT_STATUS] = SPI_## __cpu ##_INT_STATUS, \
- [SPI_INT_MASK_ST] = SPI_## __cpu ##_INT_MASK_ST, \
- [SPI_INT_MASK] = SPI_## __cpu ##_INT_MASK, \
- [SPI_ST] = SPI_## __cpu ##_ST, \
- [SPI_CLK_CFG] = SPI_## __cpu ##_CLK_CFG, \
- [SPI_FILL_BYTE] = SPI_## __cpu ##_FILL_BYTE, \
- [SPI_MSG_TAIL] = SPI_## __cpu ##_MSG_TAIL, \
- [SPI_RX_TAIL] = SPI_## __cpu ##_RX_TAIL, \
- [SPI_MSG_CTL] = SPI_## __cpu ##_MSG_CTL, \
- [SPI_MSG_DATA] = SPI_## __cpu ##_MSG_DATA, \
- [SPI_RX_DATA] = SPI_## __cpu ##_RX_DATA,
-
-static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg)
-{
- extern const unsigned long *bcm63xx_regs_spi;
-
- return bcm63xx_regs_spi[reg];
-}
-
#endif /* BCM63XX_DEV_SPI_H */
dev->coherent_dma_mask = mask;
return 0;
}
-EXPORT_SYMBOL_GPL(dma_set_coherent_mask);
+EXPORT_SYMBOL(dma_set_coherent_mask);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
mask = apic->target_cpus();
chip = irq_data_get_irq_chip(idata);
- chip->irq_set_affinity(idata, mask, false);
+ /* Might be lapic_chip for irq 0 */
+ if (chip->irq_set_affinity)
+ chip->irq_set_affinity(idata, mask, false);
}
}
#endif
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
{
+ if (!*dev)
+ *dev = &x86_dma_fallback_dev;
+
*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
- if (!*dev)
- *dev = &x86_dma_fallback_dev;
if (!is_device_dma_capable(*dev))
return false;
return true;
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
memcpy(dst, src, arch_task_struct_size);
+#ifdef CONFIG_VM86
+ dst->thread.vm86 = NULL;
+#endif
return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
}
bio_put(bio);
}
-/*
- * Ensure that max discard sectors doesn't overflow bi_size and hopefully
- * it is of the proper granularity as long as the granularity is a power
- * of two.
- */
-#define MAX_BIO_SECTORS ((1U << 31) >> 9)
-
/**
* blkdev_issue_discard - queue a discard
* @bdev: blockdev to issue discard for
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
+ unsigned int granularity;
+ int alignment;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ /* Zero-sector (unknown) and one-sector granularities are the same. */
+ granularity = max(q->limits.discard_granularity >> 9, 1U);
+ alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
+
if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secdiscard(q))
return -EOPNOTSUPP;
blk_start_plug(&plug);
while (nr_sects) {
unsigned int req_sects;
- sector_t end_sect;
+ sector_t end_sect, tmp;
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
break;
}
- req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS);
+ /* Make sure bi_size doesn't overflow */
+ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
+
+ /*
+ * If splitting a request, and the next starting sector would be
+ * misaligned, stop the discard at the previous aligned sector.
+ */
end_sect = sector + req_sects;
+ tmp = end_sect;
+ if (req_sects < nr_sects &&
+ sector_div(tmp, granularity) != alignment) {
+ end_sect = end_sect - alignment;
+ sector_div(end_sect, granularity);
+ end_sect = end_sect * granularity + alignment;
+ req_sects = end_sect - sector;
+ }
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
err:
if (err != -EAGAIN)
break;
- if (signal_pending(current)) {
+ if (fatal_signal_pending(current)) {
err = -EINTR;
break;
}
crypto_alg_tested(larval->alg.cra_driver_name, 0);
}
- err = wait_for_completion_interruptible(&larval->completion);
+ err = wait_for_completion_killable(&larval->completion);
WARN_ON(err);
out:
struct crypto_larval *larval = (void *)alg;
long timeout;
- timeout = wait_for_completion_interruptible_timeout(
+ timeout = wait_for_completion_killable_timeout(
&larval->completion, 60 * HZ);
alg = larval->adult;
err:
if (err != -EAGAIN)
break;
- if (signal_pending(current)) {
+ if (fatal_signal_pending(current)) {
err = -EINTR;
break;
}
err:
if (err != -EAGAIN)
break;
- if (signal_pending(current)) {
+ if (fatal_signal_pending(current)) {
err = -EINTR;
break;
}
err = PTR_ERR(alg);
if (err != -EAGAIN)
break;
- if (signal_pending(current)) {
+ if (fatal_signal_pending(current)) {
err = -EINTR;
break;
}
blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
q->limits.discard_zeroes_data = 1;
+ if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
+ q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+
disk->queue = q;
q->queuedata = rbd_dev;
if (IS_ERR(r))
return PTR_ERR(r);
- l = clkdev_create(r, alias, "%s", alias_dev_name);
+ l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL,
+ alias_dev_name);
clk_put(r);
return l ? 0 : -ENODEV;
* different to the 32-bit upper value read previously, go back to step 2.
* Otherwise the 64-bit timer counter value is correct.
*/
-static u64 gt_counter_read(void)
+static u64 notrace _gt_counter_read(void)
{
u64 counter;
u32 lower;
return counter;
}
+static u64 gt_counter_read(void)
+{
+ return _gt_counter_read();
+}
+
/**
* To ensure that updates to comparator value register do not set the
* Interrupt Status Register proceed as follows:
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
static u64 notrace gt_sched_clock_read(void)
{
- return gt_counter_read();
+ return _gt_counter_read();
}
#endif
ftm_writel(0x00, base + FTM_CNT);
}
-static u64 ftm_read_sched_clock(void)
+static u64 notrace ftm_read_sched_clock(void)
{
return ftm_readl(priv->clksrc_base + FTM_CNT);
}
samsung_time_start(pwm.source_id, true);
}
-static cycle_t samsung_clocksource_read(struct clocksource *c)
+static cycle_t notrace samsung_clocksource_read(struct clocksource *c)
{
return ~readl_relaxed(pwm.source_reg);
}
{
struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
- sh_mtu2_disable(ch);
+ if (clockevent_state_periodic(ced))
+ sh_mtu2_disable(ch);
+
return 0;
}
writel(value, base + 0x20 * gpt_id + offset);
}
-static cycle_t pistachio_clocksource_read_cycles(struct clocksource *cs)
+static cycle_t notrace
+pistachio_clocksource_read_cycles(struct clocksource *cs)
{
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
u32 counter, overflw;
return IRQ_HANDLED;
}
-static u64 digicolor_timer_sched_read(void)
+static u64 notrace digicolor_timer_sched_read(void)
{
return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
}
}
/* read 64-bit timer counter */
-static cycle_t sirfsoc_timer_read(struct clocksource *cs)
+static cycle_t notrace sirfsoc_timer_read(struct clocksource *cs)
{
u64 cycles;
__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
}
-static u64 pit_read_sched_clock(void)
+static u64 notrace pit_read_sched_clock(void)
{
return ~__raw_readl(clksrc_base + PITCVAL);
}
u8 fan_max_rpm;
/* dpm */
bool dpm_enabled;
+ bool sysfs_initialized;
struct amdgpu_dpm dpm;
const struct firmware *fw; /* SMC firmware */
uint32_t fw_version;
goto cleanup;
}
- fence_get(work->excl);
- for (i = 0; i < work->shared_count; ++i)
- fence_get(work->shared[i]);
-
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
amdgpu_bo_unreserve(new_rbo);
{
int ret;
+ if (adev->pm.sysfs_initialized)
+ return 0;
+
if (adev->pm.funcs->get_temperature == NULL)
return 0;
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
return ret;
}
+ adev->pm.sysfs_initialized = true;
+
return 0;
}
struct drm_property_blob *blob;
int ret;
- if (!length)
+ if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
* not associated with any file_priv. */
mutex_lock(&dev->mode_config.blob_lock);
out_resp->blob_id = blob->base.id;
- list_add_tail(&file_priv->blobs, &blob->head_file);
+ list_add_tail(&blob->head_file, &file_priv->blobs);
mutex_unlock(&dev->mode_config.blob_lock);
return 0;
backlight_update_status(bd);
DRM_INFO("radeon atom DIG backlight initialized\n");
+ rdev->mode_info.bl_encoder = radeon_encoder;
return;
} else
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (rdev->mode_info.bl_encoder) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
+ } else {
+ args.ucAction = ATOM_LCD_BLON;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
}
break;
case DRM_MODE_DPMS_STANDBY:
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (rdev->mode_info.bl_encoder)
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
+ else
+ atombios_dig_transmitter_setup(encoder,
+ ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ }
if (ext_encoder)
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
u8 fan_max_rpm;
/* dpm */
bool dpm_enabled;
+ bool sysfs_initialized;
struct radeon_dpm dpm;
};
radeon_atom_backlight_init(radeon_encoder, connector);
else
radeon_legacy_backlight_init(radeon_encoder, connector);
- rdev->mode_info.bl_encoder = radeon_encoder;
}
}
backlight_update_status(bd);
DRM_INFO("radeon legacy LVDS backlight initialized\n");
+ rdev->mode_info.bl_encoder = radeon_encoder;
return;
if (rdev->pm.pm_method == PM_METHOD_DPM) {
if (rdev->pm.dpm_enabled) {
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- /* XXX: these are noops for dpm but are here for backwards compat */
- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
- if (ret)
- DRM_ERROR("failed to create device file for power profile\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_method);
- if (ret)
- DRM_ERROR("failed to create device file for power method\n");
+ if (!rdev->pm.sysfs_initialized) {
+ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+ if (ret)
+ DRM_ERROR("failed to create device file for dpm state\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+ if (ret)
+ DRM_ERROR("failed to create device file for dpm state\n");
+ /* XXX: these are noops for dpm but are here for backwards compat */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+ if (!ret)
+ rdev->pm.sysfs_initialized = true;
+ }
mutex_lock(&rdev->pm.mutex);
ret = radeon_dpm_late_enable(rdev);
}
}
} else {
- if (rdev->pm.num_power_states > 1) {
+ if ((rdev->pm.num_power_states > 1) &&
+ (!rdev->pm.sysfs_initialized)) {
/* where's the best place to put these? */
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
if (ret)
ret = device_create_file(rdev->dev, &dev_attr_power_method);
if (ret)
DRM_ERROR("failed to create device file for power method\n");
+ if (!ret)
+ rdev->pm.sysfs_initialized = true;
}
}
return ret;
*
* Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
* command buffers left that are not submitted to hardware, Make sure
- * IRQ handling is turned on. Otherwise, make sure it's turned off. This
- * function may return -EAGAIN to indicate it should be rerun due to
- * possibly missed IRQs if IRQs has just been turned on.
+ * IRQ handling is turned on. Otherwise, make sure it's turned off.
*/
-static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
+static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
{
- int notempty = 0;
+ int notempty;
struct vmw_cmdbuf_context *ctx;
int i;
+retry:
+ notempty = 0;
for_each_cmdbuf_ctx(man, i, ctx)
vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
man->irq_on = true;
/* Rerun in case we just missed an irq. */
- return -EAGAIN;
+ goto retry;
}
-
- return 0;
}
/**
header->cb_context = cb_context;
list_add_tail(&header->list, &man->ctx[cb_context].submitted);
- if (vmw_cmdbuf_man_process(man) == -EAGAIN)
- vmw_cmdbuf_man_process(man);
+ vmw_cmdbuf_man_process(man);
}
/**
struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
spin_lock(&man->lock);
- if (vmw_cmdbuf_man_process(man) == -EAGAIN)
- (void) vmw_cmdbuf_man_process(man);
+ vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
}
struct vmw_cmdbuf_man *man =
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
+ uint32_t dummy;
bool restart = false;
spin_lock_bh(&man->lock);
if (restart && vmw_cmdbuf_startstop(man, true))
DRM_ERROR("Failed restarting command buffer context 0.\n");
+ /* Send a new fence in case one was removed */
+ vmw_fifo_send_fence(man->dev_priv, &dummy);
}
/**
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret) {
- (void) vmw_cmdbuf_man_process(man);
+ vmw_cmdbuf_man_process(man);
ret = drm_mm_insert_node_generic(&man->mm, info->node,
info->page_size, 0, 0,
DRM_MM_SEARCH_DEFAULT,
drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
man->has_pool = true;
- man->default_size = default_size;
+
+ /*
+ * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
+ * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
+ * needs to wait for space and we block on further command
+ * submissions to be able to free up space.
+ */
+ man->default_size = VMW_CMDBUF_INLINE_SIZE;
DRM_INFO("Using command buffers with %s pool.\n",
(man->using_mob) ? "MOB" : "DMA");
struct i2c_msg *msgs = drv_data->msgs;
int num = drv_data->num_msgs;
- return false;
-
if (!drv_data->offload_enabled)
return false;
{
struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
- clk_disable(alg_data->clk);
+ clk_disable_unprepare(alg_data->clk);
return 0;
}
{
struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
- return clk_enable(alg_data->clk);
+ return clk_prepare_enable(alg_data->clk);
}
static SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
if (IS_ERR(alg_data->ioaddr))
return PTR_ERR(alg_data->ioaddr);
- ret = clk_enable(alg_data->clk);
+ ret = clk_prepare_enable(alg_data->clk);
if (ret)
return ret;
return 0;
out_clock:
- clk_disable(alg_data->clk);
+ clk_disable_unprepare(alg_data->clk);
return ret;
}
struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
i2c_del_adapter(&alg_data->adapter);
- clk_disable(alg_data->clk);
+ clk_disable_unprepare(alg_data->clk);
return 0;
}
#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
6-byte ALPS packet */
-#define ALPS_DELL 0x100 /* device is a Dell laptop */
+#define ALPS_STICK_BITS 0x100 /* separate stick button bits */
#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
static const struct alps_model_info alps_model_data[] = {
ALPS_PROTO_V8, 0x18, 0x18, 0
};
+/*
+ * Some v2 models report the stick buttons in separate bits
+ */
+static const struct dmi_system_id alps_dmi_has_separate_stick_buttons[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ /* Extrapolated from other entries */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D420"),
+ },
+ },
+ {
+ /* Reported-by: Hans de Bruin <jmdebruin@xmsnet.nl> */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D430"),
+ },
+ },
+ {
+ /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D620"),
+ },
+ },
+ {
+ /* Extrapolated from other entries */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D630"),
+ },
+ },
+#endif
+ { }
+};
+
static void alps_set_abs_params_st(struct alps_data *priv,
struct input_dev *dev1);
static void alps_set_abs_params_semi_mt(struct alps_data *priv,
return;
}
- /* Dell non interleaved V2 dualpoint has separate stick button bits */
- if (priv->proto_version == ALPS_PROTO_V2 &&
- priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
+ /* Some models have separate stick button bits */
+ if (priv->flags & ALPS_STICK_BITS) {
left |= packet[0] & 1;
right |= packet[0] & 2;
middle |= packet[0] & 4;
priv->byte0 = protocol->byte0;
priv->mask0 = protocol->mask0;
priv->flags = protocol->flags;
- if (dmi_name_in_vendors("Dell"))
- priv->flags |= ALPS_DELL;
priv->x_max = 2000;
priv->y_max = 1400;
priv->set_abs_params = alps_set_abs_params_st;
priv->x_max = 1023;
priv->y_max = 767;
+ if (dmi_check_system(alps_dmi_has_separate_stick_buttons))
+ priv->flags |= ALPS_STICK_BITS;
break;
case ALPS_PROTO_V3:
config TOUCHSCREEN_SUR40
tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
depends on USB && MEDIA_USB_SUPPORT && HAS_DMA
+ depends on VIDEO_V4L2
select INPUT_POLLDEV
select VIDEOBUF2_DMA_SG
help
tsc_readl(tsc, LPC32XX_TSC_CON) &
~LPC32XX_TSC_ADCCON_AUTO_EN);
- clk_disable(tsc->clk);
+ clk_disable_unprepare(tsc->clk);
}
static void lpc32xx_setup_tsc(struct lpc32xx_tsc *tsc)
{
u32 tmp;
- clk_enable(tsc->clk);
+ clk_prepare_enable(tsc->clk);
tmp = tsc_readl(tsc, LPC32XX_TSC_CON) & ~LPC32XX_TSC_ADCCON_POWER_UP;
static void clear_dte_entry(u16 devid)
{
/* remove entry from the device table seen by the hardware */
- amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
- amd_iommu_dev_table[devid].data[1] = 0;
+ amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
+ amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
amd_iommu_apply_erratum_63(devid);
}
#define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62)
+#define DTE_FLAG_MASK (0x3ffULL << 32)
#define DTE_FLAG_IOTLB (0x01UL << 32)
#define DTE_FLAG_GV (0x01ULL << 55)
#define DTE_GLX_SHIFT (56)
goto out;
}
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) {
+ /* handle_mm_fault would BUG_ON() */
+ up_read(&mm->mmap_sem);
+ handle_fault_error(fault);
+ goto out;
+ }
+
ret = handle_mm_fault(mm, vma, address, write);
if (ret & VM_FAULT_ERROR) {
/* failed to service fault */
handle_level_irq);
}
irq_set_probe(virq);
+ irq_clear_status_flags(virq, IRQ_NOAUTOEN);
return 0;
}
.irq_unmask = tegra_unmask,
.irq_retrigger = tegra_retrigger,
.irq_set_wake = tegra_set_wake,
+ .irq_set_type = irq_chip_set_type_parent,
.flags = IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
- struct sk_buff *skb;
+ struct sk_buff *skb, *nskb;
struct Layer2 *l2 = &st->l2;
u_char header[MAX_HEADER_LEN];
int i, hdr_space_needed;
return;
hdr_space_needed = l2headersize(l2, 0);
- if (hdr_space_needed > skb_headroom(skb)) {
- struct sk_buff *orig_skb = skb;
-
- skb = skb_realloc_headroom(skb, hdr_space_needed);
- if (!skb) {
- dev_kfree_skb(orig_skb);
- return;
- }
+ nskb = skb_realloc_headroom(skb, hdr_space_needed);
+ if (!nskb) {
+ skb_queue_head(&l2->i_queue, skb);
+ return;
}
spin_lock_irqsave(&l2->lock, flags);
if (test_bit(FLG_MOD128, &l2->flag))
p1);
dev_kfree_skb(l2->windowar[p1]);
}
- l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC);
+ l2->windowar[p1] = skb;
i = sethdraddr(&st->l2, header, CMD);
l2->vs = (l2->vs + 1) % 8;
}
spin_unlock_irqrestore(&l2->lock, flags);
- memcpy(skb_push(skb, i), header, i);
- st->l2.l2l1(st, PH_PULL | INDICATION, skb);
+ memcpy(skb_push(nskb, i), header, i);
+ st->l2.l2l1(st, PH_PULL | INDICATION, nskb);
test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
FsmDelTimer(&st->l2.t203, 13);
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
- struct sk_buff *skb, *nskb, *oskb;
+ struct sk_buff *skb, *nskb;
u_char header[MAX_L2HEADER_LEN];
u_int i, p1;
skb = skb_dequeue(&l2->i_queue);
if (!skb)
return;
-
- if (test_bit(FLG_MOD128, &l2->flag))
- p1 = (l2->vs - l2->va) % 128;
- else
- p1 = (l2->vs - l2->va) % 8;
- p1 = (p1 + l2->sow) % l2->window;
- if (l2->windowar[p1]) {
- printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
- mISDNDevName4ch(&l2->ch), p1);
- dev_kfree_skb(l2->windowar[p1]);
- }
- l2->windowar[p1] = skb;
i = sethdraddr(l2, header, CMD);
if (test_bit(FLG_MOD128, &l2->flag)) {
header[i++] = l2->vs << 1;
header[i++] = l2->vr << 1;
+ } else
+ header[i++] = (l2->vr << 5) | (l2->vs << 1);
+ nskb = skb_realloc_headroom(skb, i);
+ if (!nskb) {
+ printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n",
+ mISDNDevName4ch(&l2->ch), i);
+ skb_queue_head(&l2->i_queue, skb);
+ return;
+ }
+ if (test_bit(FLG_MOD128, &l2->flag)) {
+ p1 = (l2->vs - l2->va) % 128;
l2->vs = (l2->vs + 1) % 128;
} else {
- header[i++] = (l2->vr << 5) | (l2->vs << 1);
+ p1 = (l2->vs - l2->va) % 8;
l2->vs = (l2->vs + 1) % 8;
}
-
- nskb = skb_clone(skb, GFP_ATOMIC);
- p1 = skb_headroom(nskb);
- if (p1 >= i)
- memcpy(skb_push(nskb, i), header, i);
- else {
- printk(KERN_WARNING
- "%s: L2 pull_iqueue skb header(%d/%d) too short\n",
- mISDNDevName4ch(&l2->ch), i, p1);
- oskb = nskb;
- nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
- if (!nskb) {
- dev_kfree_skb(oskb);
- printk(KERN_WARNING "%s: no skb mem in %s\n",
- mISDNDevName4ch(&l2->ch), __func__);
- return;
- }
- memcpy(skb_put(nskb, i), header, i);
- memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
- dev_kfree_skb(oskb);
+ p1 = (p1 + l2->sow) % l2->window;
+ if (l2->windowar[p1]) {
+ printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
+ mISDNDevName4ch(&l2->ch), p1);
+ dev_kfree_skb(l2->windowar[p1]);
}
+ l2->windowar[p1] = skb;
+ memcpy(skb_push(nskb, i), header, i);
l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
!test_bit(Bitmap_sync, &rdev->flags)))
continue;
- if (rdev->saved_raid_disk < 0)
- rdev->recovery_offset = 0;
+ rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
if (sysfs_link_rdev(mddev, rdev))
bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) == 0)
+ if (submit_bio_wait(WRITE, wbio) < 0)
/* failure! */
ok = rdev_set_badblocks(rdev, sector,
sectors, 0)
rdev_dec_pending(conf->mirrors[m].rdev,
conf->mddev);
}
- if (test_bit(R1BIO_WriteError, &r1_bio->state))
- close_write(r1_bio);
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
- } else
+ } else {
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
raid_end_bio_io(r1_bio);
+ }
}
static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
r1_bio = list_first_entry(&tmp, struct r1bio,
retry_list);
list_del(&r1_bio->retry_list);
+ if (mddev->degraded)
+ set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
raid_end_bio_io(r1_bio);
}
}
* far_copies (stored in second byte of layout)
* far_offset (stored in bit 16 of layout )
* use_far_sets (stored in bit 17 of layout )
+ * use_far_sets_bugfixed (stored in bit 18 of layout )
*
* The data to be stored is divided into chunks using chunksize. Each device
* is divided into far_copies sections. In each section, chunks are laid out
seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
else
seq_printf(seq, " %d far-copies", conf->geo.far_copies);
+ if (conf->geo.far_set_size != conf->geo.raid_disks)
+ seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
}
seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
conf->geo.raid_disks - mddev->degraded);
choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) == 0)
+ if (submit_bio_wait(WRITE, wbio) < 0)
/* Failure! */
ok = rdev_set_badblocks(rdev, sector,
sectors, 0)
rdev_dec_pending(rdev, conf->mddev);
}
}
- if (test_bit(R10BIO_WriteError,
- &r10_bio->state))
- close_write(r10_bio);
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
- } else
+ } else {
+ if (test_bit(R10BIO_WriteError,
+ &r10_bio->state))
+ close_write(r10_bio);
raid_end_bio_io(r10_bio);
+ }
}
}
r10_bio = list_first_entry(&tmp, struct r10bio,
retry_list);
list_del(&r10_bio->retry_list);
+ if (mddev->degraded)
+ set_bit(R10BIO_Degraded, &r10_bio->state);
+
+ if (test_bit(R10BIO_WriteError,
+ &r10_bio->state))
+ close_write(r10_bio);
raid_end_bio_io(r10_bio);
}
}
disks = mddev->raid_disks + mddev->delta_disks;
break;
}
- if (layout >> 18)
+ if (layout >> 19)
return -1;
if (chunk < (PAGE_SIZE >> 9) ||
!is_power_of_2(chunk))
geo->near_copies = nc;
geo->far_copies = fc;
geo->far_offset = fo;
- geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
+ switch (layout >> 17) {
+ case 0: /* original layout. simple but not always optimal */
+ geo->far_set_size = disks;
+ break;
+ case 1: /* "improved" layout which was buggy. Hopefully no-one is
+ * actually using this, but leave code here just in case.*/
+ geo->far_set_size = disks/fc;
+ WARN(geo->far_set_size < fc,
+ "This RAID10 layout does not provide data safety - please backup and create new array\n");
+ break;
+ case 2: /* "improved" layout fixed to match documentation */
+ geo->far_set_size = fc * nc;
+ break;
+ default: /* Not a valid layout */
+ return -1;
+ }
geo->chunk_mask = chunk - 1;
geo->chunk_shift = ffz(~chunk);
return nc*fc;
}
if (!discard_pending &&
test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
+ int hash;
clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
if (sh->qd_idx >= 0) {
* no updated data, so remove it from hash list and the stripe
* will be reinitialized
*/
- spin_lock_irq(&conf->device_lock);
unhash:
+ hash = sh->hash_lock_index;
+ spin_lock_irq(conf->hash_locks + hash);
remove_hash(sh);
+ spin_unlock_irq(conf->hash_locks + hash);
if (head_sh->batch_head) {
sh = list_first_entry(&sh->batch_list,
struct stripe_head, batch_list);
if (sh != head_sh)
goto unhash;
}
- spin_unlock_irq(&conf->device_lock);
sh = head_sh;
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
if (ndev->irq == -ENXIO) {
netdev_err(ndev, "No irq resource\n");
ret = ndev->irq;
- goto out;
+ goto out_iounmap;
}
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
- goto out;
+ goto out_iounmap;
}
- clk_prepare_enable(db->clk);
+ ret = clk_prepare_enable(db->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
+ goto out_iounmap;
+ }
ret = sunxi_sram_claim(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
- goto out;
+ goto out_clk_disable_unprepare;
}
db->phy_node = of_parse_phandle(np, "phy", 0);
out_release_sram:
sunxi_sram_release(&pdev->dev);
+out_clk_disable_unprepare:
+ clk_disable_unprepare(db->clk);
+out_iounmap:
+ iounmap(db->membase);
out:
dev_err(db->dev, "not found (%d).\n", ret);
static int emac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct emac_board_info *db = netdev_priv(ndev);
unregister_netdev(ndev);
+ sunxi_sram_release(&pdev->dev);
+ clk_disable_unprepare(db->clk);
+ iounmap(db->membase);
free_netdev(ndev);
dev_dbg(&pdev->dev, "released and freed device\n");
packet->rdesc_count, 1);
/* Make sure ownership is written to the descriptor */
- dma_wmb();
+ smp_wmb();
ring->cur = cur_index + 1;
if (!packet->skb->xmit_more ||
struct netdev_queue *txq;
int processed = 0;
unsigned int tx_packets = 0, tx_bytes = 0;
+ unsigned int cur;
DBGPR("-->xgbe_tx_poll\n");
if (!ring)
return 0;
+ cur = ring->cur;
+
+ /* Be sure we get ring->cur before accessing descriptor data */
+ smp_rmb();
+
txq = netdev_get_tx_queue(netdev, channel->queue_index);
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
- (ring->dirty != ring->cur)) {
+ (ring->dirty != cur)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
rdesc = rdata->rdesc;
for (i = 0; i < priv->num_ports; i++) {
struct bcm63xx_enetsw_port *port;
- int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
+ int val, j, up, advertise, lpa, speed, duplex, media;
int external_phy = bcm_enet_port_is_rgmii(i);
u8 override;
lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
MII_LPA);
- lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
- MII_STAT1000);
-
/* figure out media and duplex from advertise and LPA values */
media = mii_nway_result(lpa & advertise);
duplex = (media & ADVERTISE_FULL) ? 1 : 0;
- if (lpa2 & LPA_1000FULL)
- duplex = 1;
-
- if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
- speed = 1000;
- else {
- if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
- speed = 100;
- else
- speed = 10;
+
+ if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
+ speed = 100;
+ else
+ speed = 10;
+
+ if (val & BMSR_ESTATEN) {
+ advertise = bcmenet_sw_mdio_read(priv, external_phy,
+ port->phy_id, MII_CTRL1000);
+
+ lpa = bcmenet_sw_mdio_read(priv, external_phy,
+ port->phy_id, MII_STAT1000);
+
+ if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+ && lpa & (LPA_1000FULL | LPA_1000HALF)) {
+ speed = 1000;
+ duplex = (lpa & LPA_1000FULL);
+ }
}
dev_info(&priv->pdev->dev,
#
config NET_VENDOR_CAVIUM
- tristate "Cavium ethernet drivers"
+ bool "Cavium ethernet drivers"
depends on PCI
default y
---help---
struct nicpf {
struct pci_dev *pdev;
- u8 rev_id;
u8 node;
unsigned int flags;
u8 num_vf_en; /* No of VF enabled */
u8 duplex[MAX_LMAC];
u32 speed[MAX_LMAC];
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
+ u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
u16 rss_ind_tbl_size;
bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
bool irq_allocated[NIC_PF_MSIX_VECTORS];
};
+static inline bool pass1_silicon(struct nicpf *nic)
+{
+ return nic->pdev->revision < 8;
+}
+
/* Supported devices */
static const struct pci_device_id nic_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
* when PF writes to MBOX(1), in next revisions when
* PF writes to MBOX(0)
*/
- if (nic->rev_id == 0) {
+ if (pass1_silicon(nic)) {
/* see the comment for nic_reg_write()/nic_reg_read()
* functions above
*/
{
int i;
- /* Reset NIC, in case the driver is repeatedly inserted and removed */
- nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
-
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
/* Leave RSS_SIZE as '0' to disable RSS */
- nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
- (vnic << 24) | (padd << 16) | (rssi_base + rssi));
+ if (pass1_silicon(nic)) {
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (padd << 16) |
+ (rssi_base + rssi));
+ } else {
+ /* Set MPI_ALG to '0' to disable MCAM parsing */
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (padd << 16));
+ /* MPI index is same as CPI if MPI_ALG is not enabled */
+ nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (rssi_base + rssi));
+ }
if ((rssi + 1) >= cfg->rq_cnt)
continue;
rssi = ((cpi - cpi_base) & 0x38) >> 3;
}
nic->cpi_base[cfg->vf_id] = cpi_base;
+ nic->rssi_base[cfg->vf_id] = rssi_base;
}
/* Responsds to VF with its RSS indirection table size */
{
u8 qset, idx = 0;
u64 cpi_cfg, cpi_base, rssi_base, rssi;
+ u64 idx_addr;
- cpi_base = nic->cpi_base[cfg->vf_id];
- cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
- rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
+ rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
rssi = rssi_base;
qset = cfg->vf_id;
idx++;
}
+ cpi_base = nic->cpi_base[cfg->vf_id];
+ if (pass1_silicon(nic))
+ idx_addr = NIC_PF_CPI_0_2047_CFG;
+ else
+ idx_addr = NIC_PF_MPI_0_2047_CFG;
+ cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
cpi_cfg &= ~(0xFULL << 20);
cpi_cfg |= (cfg->hash_bits << 20);
- nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg);
+ nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
}
/* 4 level transmit side scheduler configutation
goto err_release_regions;
}
- pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
-
nic->node = nic_get_node_id(pdev);
nic_set_lmac_vf_mapping(nic);
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_MCAM_0_191_ENA (0x100000)
+#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000)
+#define NIC_PF_MCAM_CTRL (0x120000)
#define NIC_PF_CPI_0_2047_CFG (0x200000)
+#define NIC_PF_MPI_0_2047_CFG (0x210000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_NIC_VF,
- PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ PCI_VENDOR_ID_CAVIUM, 0xA134) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
PCI_VENDOR_ID_CAVIUM, 0xA11E) },
SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
bgx->lmac[lmac].lmacid = lmac;
lmac++;
- if (lmac == MAX_LMAC_PER_BGX)
+ if (lmac == MAX_LMAC_PER_BGX) {
+ of_node_put(np_child);
break;
+ }
}
return 0;
}
if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
priv->uses_rxfcb = 1;
- if (priv->hwts_rx_en)
+ if (priv->hwts_rx_en || priv->rx_filer_enable)
priv->uses_rxfcb = 1;
}
u32 rctrl = 0;
if (priv->rx_filer_enable) {
- rctrl |= RCTRL_FILREN;
+ rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
/* Program the RIR0 reg with the required distribution */
if (priv->poll_mode == GFAR_SQ_POLLING)
gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
if (events & IEVENT_BSY) {
- dev->stats.rx_errors++;
+ dev->stats.rx_over_errors++;
atomic64_inc(&priv->extra_stats.rx_bsy);
- gfar_receive(irq, grp_id);
-
netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
gfar_read(®s->rstat));
}
u32 fcr = 0x0, fpr = FPR_FILER_MASK;
if (ethflow & RXH_L2DA) {
- fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
+ fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
- fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
+ fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
+ data[i++] = veb->tc_stats.tc_tx_packets[j];
+ data[i++] = veb->tc_stats.tc_tx_bytes[j];
+ data[i++] = veb->tc_stats.tc_rx_packets[j];
+ data[i++] = veb->tc_stats.tc_rx_bytes[j];
+ }
}
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
if (pf->hw.func_caps.vmdq) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+ pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
#ifdef I40E_FCOE
desc->l4i_chk = 0;
desc->byte_cnt = length;
- desc->buf_ptr = dma_map_single(dev->dev.parent, data,
- length, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
- WARN(1, "dma_map_single failed!\n");
- return -ENOMEM;
+
+ if (length <= 8 && (uintptr_t)data & 0x7) {
+ /* Copy unaligned small data fragment to TSO header data area */
+ memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
+ data, length);
+ desc->buf_ptr = txq->tso_hdrs_dma
+ + txq->tx_curr_desc * TSO_HEADER_SIZE;
+ } else {
+ /* Alignment is okay, map buffer and hand off to hardware */
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+ desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+ length, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ desc->buf_ptr))) {
+ WARN(1, "dma_map_single failed!\n");
+ return -ENOMEM;
+ }
}
cmd_sts = BUFFER_OWNED_BY_DMA;
}
static inline void
-txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
+ u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
int ret;
u32 cmd_csum = 0;
u16 l4i_chk = 0;
+ u32 cmd_sts;
tx_index = txq->tx_curr_desc;
desc = &txq->tx_desc_area[tx_index];
desc->byte_cnt = hdr_len;
desc->buf_ptr = txq->tso_hdrs_dma +
txq->tx_curr_desc * TSO_HEADER_SIZE;
- desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
+ cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
GEN_CRC;
+ /* Defer updating the first command descriptor until all
+ * following descriptors have been written.
+ */
+ if (first_desc)
+ *first_cmd_sts = cmd_sts;
+ else
+ desc->cmd_sts = cmd_sts;
+
txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
int desc_count = 0;
struct tso_t tso;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct tx_desc *first_tx_desc;
+ u32 first_cmd_sts = 0;
/* Count needed descriptors */
if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
return -EBUSY;
}
+ first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
+
/* Initialize the TSO handler, and prepare the first payload */
tso_start(skb, &tso);
total_len = skb->len - hdr_len;
while (total_len > 0) {
+ bool first_desc = (desc_count == 0);
char *hdr;
data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
/* prepare packet headers: MAC + IP + TCP */
hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
- txq_put_hdr_tso(skb, txq, data_left);
+ txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
+ first_desc);
while (data_left > 0) {
int size;
__skb_queue_tail(&txq->tx_skb, skb);
skb_tx_timestamp(skb);
+ /* ensure all other descriptors are written before first cmd_sts */
+ wmb();
+ first_tx_desc->cmd_sts = first_cmd_sts;
+
/* clear TX_END status */
mp->work_tx_end &= ~(1 << txq->index);
for_each_available_child_of_node(np, pnp) {
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
- if (ret)
+ if (ret) {
+ of_node_put(pnp);
return ret;
+ }
}
return 0;
}
}
}
- memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
+ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
mlx4_master_comm_channel);
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
else if (vlan_proto == ETH_P_8021Q)
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+ else
+ tx_desc->ctrl.ins_vlan = 0;
tx_desc->ctrl.fence_size = real_size;
return;
}
- memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
+ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
dma_wmb();
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 mask = 0;
+ unsigned long flags;
+ unsigned int irq = 0;
/*
* First disable irq(s) and then
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector;
else
- disable_irq_lockdep(np->pci_dev->irq);
+ irq = np->pci_dev->irq;
mask = np->irqmask;
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector;
mask |= NVREG_IRQ_RX_ALL;
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector;
mask |= NVREG_IRQ_TX_ALL;
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector;
mask |= NVREG_IRQ_OTHER;
}
}
- /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
+
+ disable_irq_nosync_lockdep_irqsave(irq, &flags);
+ synchronize_irq(irq);
if (np->recover_error) {
np->recover_error = 0;
nv_nic_irq_optimized(0, dev);
else
nv_nic_irq(0, dev);
- if (np->msi_flags & NV_MSI_X_ENABLED)
- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- enable_irq_lockdep(np->pci_dev->irq);
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
nv_nic_irq_rx(0, dev);
- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
nv_nic_irq_tx(0, dev);
- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
nv_nic_irq_other(0, dev);
- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
}
+ enable_irq_lockdep_irqrestore(irq, &flags);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
struct sh_eth_txdesc *txdesc = NULL;
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
- int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr;
mdp->cur_rx = 0;
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
- /* The size of the buffer is a multiple of 16 bytes. */
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+ /* The size of the buffer is a multiple of 32 bytes. */
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
dma_addr = dma_map_single(&ndev->dev, skb->data,
rxdesc->buffer_length,
DMA_FROM_DEVICE);
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
- int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr;
boguscnt = min(boguscnt, *quota);
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
dma_unmap_single(&ndev->dev, rxdesc->addr,
- ALIGN(mdp->rx_buf_sz, 16),
+ ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
entry = mdp->dirty_rx % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
- /* The size of the buffer is 16 byte boundary. */
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+ /* The size of the buffer is 32 byte boundary. */
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size);
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
#include <linux/if_vlan.h>
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
+ struct device_node *phy_node;
struct napi_struct napi_rx;
struct napi_struct napi_tx;
struct device *dev;
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
- slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+ if (priv->phy_node)
+ slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+ &cpsw_adjust_link, 0, slave->data->phy_if);
+ else
+ slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
&cpsw_adjust_link, slave->data->phy_if);
if (IS_ERR(slave->phy)) {
dev_err(priv->dev, "phy %s not found on slave %d\n",
slave->port_vlan = data->dual_emac_res_vlan;
}
-static int cpsw_probe_dt(struct cpsw_platform_data *data,
+static int cpsw_probe_dt(struct cpsw_priv *priv,
struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device_node *slave_node;
+ struct cpsw_platform_data *data = &priv->data;
int i = 0, ret;
u32 prop;
if (strcmp(slave_node->name, "slave"))
continue;
+ priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
-
slave_data->phy_if = of_get_phy_mode(slave_node);
if (slave_data->phy_if < 0) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(&priv->data, pdev)) {
+ if (cpsw_probe_dt(priv, pdev)) {
dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
mac_phy_link = true;
slave->open = true;
- if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
+ if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
+ of_node_put(port);
break;
+ }
}
/* of_phy_connect() is needed only for MAC-PHY interface */
continue;
}
gbe_dev->num_slaves++;
- if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
+ if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
+ of_node_put(interface);
break;
+ }
}
of_node_put(interfaces);
rt = ip_route_output_key(geneve->net, fl4);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
- dev->stats.tx_carrier_errors++;
- return rt;
+ return ERR_PTR(-ENETUNREACH);
}
if (rt->dst.dev == dev) { /* is this necessary? */
netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
- dev->stats.collisions++;
ip_rt_put(rt);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ELOOP);
}
return rt;
}
struct ip_tunnel_info *info = NULL;
struct rtable *rt = NULL;
const struct iphdr *iip; /* interior IP header */
+ int err = -EINVAL;
struct flowi4 fl4;
__u8 tos, ttl;
__be16 sport;
bool udp_csum;
__be16 df;
- int err;
if (geneve->collect_md) {
info = skb_tunnel_info(skb);
rt = geneve_get_rt(skb, dev, &fl4, info);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
- dev->stats.tx_carrier_errors++;
+ err = PTR_ERR(rt);
goto tx_error;
}
tx_error:
dev_kfree_skb(skb);
err:
- dev->stats.tx_errors++;
+ if (err == -ELOOP)
+ dev->stats.collisions++;
+ else if (err == -ENETUNREACH)
+ dev->stats.tx_carrier_errors++;
+ else
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
+static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct rtable *rt;
+ struct flowi4 fl4;
+
+ if (ip_tunnel_info_af(info) != AF_INET)
+ return -EINVAL;
+
+ rt = geneve_get_rt(skb, dev, &fl4, info);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ ip_rt_put(rt);
+ info->key.u.ipv4.src = fl4.saddr;
+ info->key.tp_src = udp_flow_src_port(geneve->net, skb,
+ 1, USHRT_MAX, true);
+ info->key.tp_dst = geneve->dst_port;
+ return 0;
+}
+
static const struct net_device_ops geneve_netdev_ops = {
.ndo_init = geneve_init,
.ndo_uninit = geneve_uninit,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_fill_metadata_dst = geneve_fill_metadata_dst,
};
static void geneve_get_drvinfo(struct net_device *dev,
#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
NETIF_F_TSO6 | NETIF_F_UFO)
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
-#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
{
---help---
Supports the KSZ9021, VSC8201, KS8001 PHYs.
+config DP83848_PHY
+ tristate "Driver for Texas Instruments DP83848 PHY"
+ ---help---
+ Supports the DP83848 PHY.
+
config DP83867_PHY
tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
---help---
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
+obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
--- /dev/null
+/*
+ * Driver for the Texas Instruments DP83848 PHY
+ *
+ * Copyright (C) 2015 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define DP83848_PHY_ID 0x20005c90
+
+/* Registers */
+#define DP83848_MICR 0x11
+#define DP83848_MISR 0x12
+
+/* MICR Register Fields */
+#define DP83848_MICR_INT_OE BIT(0) /* Interrupt Output Enable */
+#define DP83848_MICR_INTEN BIT(1) /* Interrupt Enable */
+
+/* MISR Register Fields */
+#define DP83848_MISR_RHF_INT_EN BIT(0) /* Receive Error Counter */
+#define DP83848_MISR_FHF_INT_EN BIT(1) /* False Carrier Counter */
+#define DP83848_MISR_ANC_INT_EN BIT(2) /* Auto-negotiation complete */
+#define DP83848_MISR_DUP_INT_EN BIT(3) /* Duplex Status */
+#define DP83848_MISR_SPD_INT_EN BIT(4) /* Speed status */
+#define DP83848_MISR_LINK_INT_EN BIT(5) /* Link status */
+#define DP83848_MISR_ED_INT_EN BIT(6) /* Energy detect */
+#define DP83848_MISR_LQM_INT_EN BIT(7) /* Link Quality Monitor */
+
+static int dp83848_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, DP83848_MISR);
+
+ return err < 0 ? err : 0;
+}
+
+static int dp83848_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_write(phydev, DP83848_MICR,
+ DP83848_MICR_INT_OE |
+ DP83848_MICR_INTEN);
+ if (err < 0)
+ return err;
+
+ return phy_write(phydev, DP83848_MISR,
+ DP83848_MISR_ANC_INT_EN |
+ DP83848_MISR_DUP_INT_EN |
+ DP83848_MISR_SPD_INT_EN |
+ DP83848_MISR_LINK_INT_EN);
+ }
+
+ return phy_write(phydev, DP83848_MICR, 0x0);
+}
+
+static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+ { DP83848_PHY_ID, 0xfffffff0 },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+
+static struct phy_driver dp83848_driver[] = {
+ {
+ .phy_id = DP83848_PHY_ID,
+ .phy_id_mask = 0xfffffff0,
+ .name = "TI DP83848",
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+
+ .soft_reset = genphy_soft_reset,
+ .config_init = genphy_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+
+ /* IRQ related */
+ .ack_interrupt = dp83848_ack_interrupt,
+ .config_intr = dp83848_config_intr,
+
+ .driver = { .owner = THIS_MODULE, },
+ },
+};
+module_phy_driver(dp83848_driver);
+
+MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com");
+MODULE_LICENSE("GPL");
if (!iprop || len != sizeof(uint32_t)) {
dev_err(&pdev->dev, "mdio-mux child node %s is "
"missing a 'reg' property\n", np2->full_name);
+ of_node_put(np2);
return -ENODEV;
}
if (be32_to_cpup(iprop) & ~s->mask) {
dev_err(&pdev->dev, "mdio-mux child node %s has "
"a 'reg' value with unmasked bits\n",
np2->full_name);
+ of_node_put(np2);
return -ENODEV;
}
}
dev_err(dev,
"Error: Failed to allocate memory for child\n");
ret_val = -ENOMEM;
+ of_node_put(child_bus_node);
break;
}
cb->bus_number = v;
return 0;
}
+static int ksz9031_read_status(struct phy_device *phydev)
+{
+ int err;
+ int regval;
+
+ err = genphy_read_status(phydev);
+ if (err)
+ return err;
+
+ /* Make sure the PHY is not broken. Read idle error count,
+ * and reset the PHY if it is maxed out.
+ */
+ regval = phy_read(phydev, MII_STAT1000);
+ if ((regval & 0xFF) == 0xFF) {
+ phy_init_hw(phydev);
+ phydev->link = 0;
+ }
+
+ return 0;
+}
+
static int ksz8873mll_config_aneg(struct phy_device *phydev)
{
return 0;
.driver_data = &ksz9021_type,
.config_init = ksz9031_config_init,
.config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
+ .read_status = ksz9031_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
static int smsc_phy_config_init(struct phy_device *phydev)
{
+ int __maybe_unused len;
+ struct device *dev __maybe_unused = &phydev->dev;
+ struct device_node *of_node __maybe_unused = dev->of_node;
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ int enable_energy = 1;
if (rc < 0)
return rc;
- /* Enable energy detect mode for this SMSC Transceivers */
- rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
- rc | MII_LAN83C185_EDPWRDOWN);
- if (rc < 0)
- return rc;
+ if (of_find_property(of_node, "smsc,disable-energy-detect", &len))
+ enable_energy = 0;
+
+ if (enable_energy) {
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+ }
return smsc_phy_ack_interrupt(phydev);
}
po = pppox_sk(sk);
- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ if (po->pppoe_dev) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
{QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9057, 8)},
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
+ {QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
return 0;
}
+static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
+ struct ip_tunnel_info *info,
+ __be16 sport, __be16 dport)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct rtable *rt;
+ struct flowi4 fl4;
+
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_tos = RT_TOS(info->key.tos);
+ fl4.flowi4_mark = skb->mark;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ fl4.daddr = info->key.u.ipv4.dst;
+
+ rt = ip_route_output_key(vxlan->net, &fl4);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+ ip_rt_put(rt);
+
+ info->key.u.ipv4.src = fl4.saddr;
+ info->key.tp_src = sport;
+ info->key.tp_dst = dport;
+ return 0;
+}
+
+static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ __be16 sport, dport;
+
+ sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+ vxlan->cfg.port_max, true);
+ dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
+
+ if (ip_tunnel_info_af(info) == AF_INET)
+ return egress_ipv4_tun_info(dev, skb, info, sport, dport);
+ return -EINVAL;
+}
+
static const struct net_device_ops vxlan_netdev_ops = {
.ndo_init = vxlan_init,
.ndo_uninit = vxlan_uninit,
.ndo_fdb_add = vxlan_fdb_add,
.ndo_fdb_del = vxlan_fdb_delete,
.ndo_fdb_dump = vxlan_fdb_dump,
+ .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
/* Info for udev, that this is a virtual tunnel endpoint */
board_filename, ret);
continue;
}
+ of_node_put(node);
return true;
}
return false;
}
static int xennet_create_queues(struct netfront_info *info,
- unsigned int num_queues)
+ unsigned int *num_queues)
{
unsigned int i;
int ret;
- info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+ info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
GFP_KERNEL);
if (!info->queues)
return -ENOMEM;
rtnl_lock();
- for (i = 0; i < num_queues; i++) {
+ for (i = 0; i < *num_queues; i++) {
struct netfront_queue *queue = &info->queues[i];
queue->id = i;
if (ret < 0) {
dev_warn(&info->netdev->dev,
"only created %d queues\n", i);
- num_queues = i;
+ *num_queues = i;
break;
}
napi_enable(&queue->napi);
}
- netif_set_real_num_tx_queues(info->netdev, num_queues);
+ netif_set_real_num_tx_queues(info->netdev, *num_queues);
rtnl_unlock();
- if (num_queues == 0) {
+ if (*num_queues == 0) {
dev_err(&info->netdev->dev, "no queues\n");
return -EINVAL;
}
if (info->queues)
xennet_destroy_queues(info);
- err = xennet_create_queues(info, num_queues);
+ err = xennet_create_queues(info, &num_queues);
if (err < 0)
goto destroy_ring;
if (ret)
return ret;
- if (!node_online(node))
+ if (node >= MAX_NUMNODES || !node_online(node))
return -EINVAL;
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
struct mvs_slot_info *slot, u32 slot_idx)
{
+ if (!slot)
+ return;
if (!slot->task)
return;
if (!sas_protocol_ata(task->task_proto))
drv = scsi_dh_find_driver(sdev);
if (drv)
- devinfo = scsi_dh_lookup(drv);
+ devinfo = __scsi_dh_lookup(drv);
if (devinfo)
err = scsi_dh_handler_attach(sdev, devinfo);
return err;
}
-void scsi_dh_remove_device(struct scsi_device *sdev)
+void scsi_dh_release_device(struct scsi_device *sdev)
{
if (sdev->handler)
scsi_dh_handler_detach(sdev);
+}
+
+void scsi_dh_remove_device(struct scsi_device *sdev)
+{
device_remove_file(&sdev->sdev_gendev, &scsi_dh_state_attr);
}
/* scsi_dh.c */
#ifdef CONFIG_SCSI_DH
int scsi_dh_add_device(struct scsi_device *sdev);
+void scsi_dh_release_device(struct scsi_device *sdev);
void scsi_dh_remove_device(struct scsi_device *sdev);
#else
static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; }
+static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
#endif
sdev = container_of(work, struct scsi_device, ew.work);
+ scsi_dh_release_device(sdev);
+
parent = sdev->sdev_gendev.parent;
spin_lock_irqsave(sdev->host->host_lock, flags);
is for the regular SPI controller. Slave mode operation is not also
not supported.
+config SPI_BCM2835AUX
+ tristate "BCM2835 SPI auxiliary controller"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on GPIOLIB
+ help
+ This selects a driver for the Broadcom BCM2835 SPI aux master.
+
+ The BCM2835 contains two types of SPI master controller; the
+ "universal SPI master", and the regular SPI controller.
+ This driver is for the universal/auxiliary SPI controller.
+
config SPI_BFIN5XX
tristate "SPI controller driver for ADI Blackfin5xx"
depends on BLACKFIN && !BF60x
config SPI_BCM63XX
tristate "Broadcom BCM63xx SPI controller"
- depends on BCM63XX
+ depends on BCM63XX || COMPILE_TEST
help
Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
- depends on SOC_VF610 || SOC_LS1021A || COMPILE_TEST
+ depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
+obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o
obj-$(CONFIG_SPI_BCM53XX) += spi-bcm53xx.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
sp->bitbang.flags = SPI_CS_HIGH;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- ret = -ENOENT;
- goto err_put_master;
- }
-
- sp->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
- if (!sp->base) {
- ret = -ENXIO;
+ sp->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(sp->base)) {
+ ret = PTR_ERR(sp->base);
goto err_put_master;
}
*plen = len;
- if (atmel_spi_dma_slave_config(as, &slave_config, 8))
+ if (atmel_spi_dma_slave_config(as, &slave_config,
+ xfer->bits_per_word))
goto err_exit;
/* Send both scatterlists */
* Calculate the lowest divider that satisfies the
* constraint, assuming div32/fdiv/mbz == 0.
*/
- if (xfer->speed_hz)
- scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
- else
- /*
- * This can happend if max_speed is null.
- * In this case, we set the lowest possible speed
- */
- scbr = 0xff;
+ scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
/*
* If the resulting divider doesn't fit into the
return -EINVAL;
}
- if (xfer->bits_per_word) {
- asd = spi->controller_state;
- bits = (asd->csr >> 4) & 0xf;
- if (bits != xfer->bits_per_word - 8) {
- dev_dbg(&spi->dev,
+ asd = spi->controller_state;
+ bits = (asd->csr >> 4) & 0xf;
+ if (bits != xfer->bits_per_word - 8) {
+ dev_dbg(&spi->dev,
"you can't yet change bits_per_word in transfers\n");
- return -ENOPROTOOPT;
- }
+ return -ENOPROTOOPT;
}
/*
unsigned bpw, hz;
u32 cfg, stat;
- bpw = spi->bits_per_word;
- hz = spi->max_speed_hz;
if (t) {
- if (t->bits_per_word)
- bpw = t->bits_per_word;
- if (t->speed_hz)
- hz = t->speed_hz;
+ bpw = t->bits_per_word;
+ hz = t->speed_hz;
+ } else {
+ bpw = spi->bits_per_word;
+ hz = spi->max_speed_hz;
}
if (!hz)
goto out_master_put;
}
- bs->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
err = bs->irq ? bs->irq : -ENODEV;
clk_prepare_enable(bs->clk);
+ bcm2835_dma_init(master, &pdev->dev);
+
+ /* initialise the hardware with the default polarities */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
dev_name(&pdev->dev), master);
if (err) {
goto out_clk_disable;
}
- bcm2835_dma_init(master, &pdev->dev);
-
- /* initialise the hardware with the default polarities */
- bcm2835_wr(bs, BCM2835_SPI_CS,
- BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
-
err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
--- /dev/null
+/*
+ * Driver for Broadcom BCM2835 auxiliary SPI Controllers
+ *
+ * the driver does not rely on the native chipselects at all
+ * but only uses the gpio type chipselects
+ *
+ * Based on: spi-bcm2835.c
+ *
+ * Copyright (C) 2015 Martin Sperl
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+
+/*
+ * spi register defines
+ *
+ * note there is garbage in the "official" documentation,
+ * so some data is taken from the file:
+ * brcm_usrlib/dag/vmcsx/vcinclude/bcm2708_chip/aux_io.h
+ * inside of:
+ * http://www.broadcom.com/docs/support/videocore/Brcm_Android_ICS_Graphics_Stack.tar.gz
+ */
+
+/* SPI register offsets */
+#define BCM2835_AUX_SPI_CNTL0 0x00
+#define BCM2835_AUX_SPI_CNTL1 0x04
+#define BCM2835_AUX_SPI_STAT 0x08
+#define BCM2835_AUX_SPI_PEEK 0x0C
+#define BCM2835_AUX_SPI_IO 0x20
+#define BCM2835_AUX_SPI_TXHOLD 0x30
+
+/* Bitfields in CNTL0 */
+#define BCM2835_AUX_SPI_CNTL0_SPEED 0xFFF00000
+#define BCM2835_AUX_SPI_CNTL0_SPEED_MAX 0xFFF
+#define BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT 20
+#define BCM2835_AUX_SPI_CNTL0_CS 0x000E0000
+#define BCM2835_AUX_SPI_CNTL0_POSTINPUT 0x00010000
+#define BCM2835_AUX_SPI_CNTL0_VAR_CS 0x00008000
+#define BCM2835_AUX_SPI_CNTL0_VAR_WIDTH 0x00004000
+#define BCM2835_AUX_SPI_CNTL0_DOUTHOLD 0x00003000
+#define BCM2835_AUX_SPI_CNTL0_ENABLE 0x00000800
+#define BCM2835_AUX_SPI_CNTL0_CPHA_IN 0x00000400
+#define BCM2835_AUX_SPI_CNTL0_CLEARFIFO 0x00000200
+#define BCM2835_AUX_SPI_CNTL0_CPHA_OUT 0x00000100
+#define BCM2835_AUX_SPI_CNTL0_CPOL 0x00000080
+#define BCM2835_AUX_SPI_CNTL0_MSBF_OUT 0x00000040
+#define BCM2835_AUX_SPI_CNTL0_SHIFTLEN 0x0000003F
+
+/* Bitfields in CNTL1 */
+#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
+#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000080
+#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000040
+#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
+#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
+
+/* Bitfields in STAT */
+#define BCM2835_AUX_SPI_STAT_TX_LVL 0xFF000000
+#define BCM2835_AUX_SPI_STAT_RX_LVL 0x00FF0000
+#define BCM2835_AUX_SPI_STAT_TX_FULL 0x00000400
+#define BCM2835_AUX_SPI_STAT_TX_EMPTY 0x00000200
+#define BCM2835_AUX_SPI_STAT_RX_FULL 0x00000100
+#define BCM2835_AUX_SPI_STAT_RX_EMPTY 0x00000080
+#define BCM2835_AUX_SPI_STAT_BUSY 0x00000040
+#define BCM2835_AUX_SPI_STAT_BITCOUNT 0x0000003F
+
+/* timeout values */
+#define BCM2835_AUX_SPI_POLLING_LIMIT_US 30
+#define BCM2835_AUX_SPI_POLLING_JIFFIES 2
+
+#define BCM2835_AUX_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
+ | SPI_NO_CS)
+
+struct bcm2835aux_spi {
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ u32 cntl[2];
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int tx_len;
+ int rx_len;
+ int pending;
+};
+
+static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
+{
+ return readl(bs->regs + reg);
+}
+
+static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned reg,
+ u32 val)
+{
+ writel(val, bs->regs + reg);
+}
+
+static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs)
+{
+ u32 data;
+ int count = min(bs->rx_len, 3);
+
+ data = bcm2835aux_rd(bs, BCM2835_AUX_SPI_IO);
+ if (bs->rx_buf) {
+ switch (count) {
+ case 4:
+ *bs->rx_buf++ = (data >> 24) & 0xff;
+ /* fallthrough */
+ case 3:
+ *bs->rx_buf++ = (data >> 16) & 0xff;
+ /* fallthrough */
+ case 2:
+ *bs->rx_buf++ = (data >> 8) & 0xff;
+ /* fallthrough */
+ case 1:
+ *bs->rx_buf++ = (data >> 0) & 0xff;
+ /* fallthrough - no default */
+ }
+ }
+ bs->rx_len -= count;
+ bs->pending -= count;
+}
+
+static inline void bcm2835aux_wr_fifo(struct bcm2835aux_spi *bs)
+{
+ u32 data;
+ u8 byte;
+ int count;
+ int i;
+
+ /* gather up to 3 bytes to write to the FIFO */
+ count = min(bs->tx_len, 3);
+ data = 0;
+ for (i = 0; i < count; i++) {
+ byte = bs->tx_buf ? *bs->tx_buf++ : 0;
+ data |= byte << (8 * (2 - i));
+ }
+
+ /* and set the variable bit-length */
+ data |= (count * 8) << 24;
+
+ /* and decrement length */
+ bs->tx_len -= count;
+ bs->pending += count;
+
+ /* write to the correct TX-register */
+ if (bs->tx_len)
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_TXHOLD, data);
+ else
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_IO, data);
+}
+
+static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
+{
+ /* disable spi clearing fifo and interrupts */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, 0);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0,
+ BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
+}
+
+static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ irqreturn_t ret = IRQ_NONE;
+
+ /* check if we have data to read */
+ while (bs->rx_len &&
+ (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+ BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
+ bcm2835aux_rd_fifo(bs);
+ ret = IRQ_HANDLED;
+ }
+
+ /* check if we have data to write */
+ while (bs->tx_len &&
+ (bs->pending < 12) &&
+ (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+ BCM2835_AUX_SPI_STAT_TX_FULL))) {
+ bcm2835aux_wr_fifo(bs);
+ ret = IRQ_HANDLED;
+ }
+
+ /* and check if we have reached "done" */
+ while (bs->rx_len &&
+ (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+ BCM2835_AUX_SPI_STAT_BUSY))) {
+ bcm2835aux_rd_fifo(bs);
+ ret = IRQ_HANDLED;
+ }
+
+ /* and if rx_len is 0 then wake up completion and disable spi */
+ if (!bs->rx_len) {
+ bcm2835aux_spi_reset_hw(bs);
+ complete(&master->xfer_completion);
+ }
+
+ /* and return */
+ return ret;
+}
+
+static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ /* enable interrupts */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
+ BCM2835_AUX_SPI_CNTL1_TXEMPTY |
+ BCM2835_AUX_SPI_CNTL1_IDLE);
+
+ /* and wait for finish... */
+ return 1;
+}
+
+static int bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ /* fill in registers and fifos before enabling interrupts */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
+
+ /* fill in tx fifo with data before enabling interrupts */
+ while ((bs->tx_len) &&
+ (bs->pending < 12) &&
+ (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+ BCM2835_AUX_SPI_STAT_TX_FULL))) {
+ bcm2835aux_wr_fifo(bs);
+ }
+
+ /* now run the interrupt mode */
+ return __bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
+}
+
+static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ unsigned long timeout;
+ u32 stat;
+
+ /* configure spi */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
+
+ /* set the timeout */
+ timeout = jiffies + BCM2835_AUX_SPI_POLLING_JIFFIES;
+
+ /* loop until finished the transfer */
+ while (bs->rx_len) {
+ /* read status */
+ stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
+
+ /* fill in tx fifo with remaining data */
+ if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
+ bcm2835aux_wr_fifo(bs);
+ continue;
+ }
+
+ /* read data from fifo for both cases */
+ if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
+ bcm2835aux_rd_fifo(bs);
+ continue;
+ }
+ if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
+ bcm2835aux_rd_fifo(bs);
+ continue;
+ }
+
+ /* there is still data pending to read check the timeout */
+ if (bs->rx_len && time_after(jiffies, timeout)) {
+ dev_dbg_ratelimited(&spi->dev,
+ "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
+ jiffies - timeout,
+ bs->tx_len, bs->rx_len);
+ /* forward to interrupt handler */
+ return __bcm2835aux_spi_transfer_one_irq(master,
+ spi, tfr);
+ }
+ }
+
+ /* Transfer complete - reset SPI HW */
+ bcm2835aux_spi_reset_hw(bs);
+
+ /* and return without waiting for completion */
+ return 0;
+}
+
+static int bcm2835aux_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ unsigned long spi_hz, clk_hz, speed;
+ unsigned long spi_used_hz;
+ unsigned long long xfer_time_us;
+
+ /* calculate the registers to handle
+ *
+ * note that we use the variable data mode, which
+ * is not optimal for longer transfers as we waste registers
+ * resulting (potentially) in more interrupts when transferring
+ * more than 12 bytes
+ */
+ bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
+ BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
+ BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
+ bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
+
+ /* set clock */
+ spi_hz = tfr->speed_hz;
+ clk_hz = clk_get_rate(bs->clk);
+
+ if (spi_hz >= clk_hz / 2) {
+ speed = 0;
+ } else if (spi_hz) {
+ speed = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1;
+ if (speed > BCM2835_AUX_SPI_CNTL0_SPEED_MAX)
+ speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
+ } else { /* the slowest we can go */
+ speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
+ }
+ bs->cntl[0] |= speed << BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT;
+
+ spi_used_hz = clk_hz / (2 * (speed + 1));
+
+ /* handle all the modes */
+ if (spi->mode & SPI_CPOL)
+ bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
+ if (spi->mode & SPI_CPHA)
+ bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPHA_OUT |
+ BCM2835_AUX_SPI_CNTL0_CPHA_IN;
+
+ /* set transmit buffers and length */
+ bs->tx_buf = tfr->tx_buf;
+ bs->rx_buf = tfr->rx_buf;
+ bs->tx_len = tfr->len;
+ bs->rx_len = tfr->len;
+ bs->pending = 0;
+
+ /* calculate the estimated time in us the transfer runs
+ * note that there are are 2 idle clocks after each
+ * chunk getting transferred - in our case the chunk size
+ * is 3 bytes, so we approximate this by 9 bits/byte
+ */
+ xfer_time_us = tfr->len * 9 * 1000000;
+ do_div(xfer_time_us, spi_used_hz);
+
+ /* run in polling mode for short transfers */
+ if (xfer_time_us < BCM2835_AUX_SPI_POLLING_LIMIT_US)
+ return bcm2835aux_spi_transfer_one_poll(master, spi, tfr);
+
+ /* run in interrupt mode for all others */
+ return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
+}
+
+static void bcm2835aux_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ bcm2835aux_spi_reset_hw(bs);
+}
+
+static int bcm2835aux_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm2835aux_spi *bs;
+ struct resource *res;
+ unsigned long clk_hz;
+ int err;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ dev_err(&pdev->dev, "spi_alloc_master() failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ master->mode_bits = BCM2835_AUX_SPI_MODE_BITS;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->num_chipselect = -1;
+ master->transfer_one = bcm2835aux_spi_transfer_one;
+ master->handle_err = bcm2835aux_spi_handle_err;
+ master->dev.of_node = pdev->dev.of_node;
+
+ bs = spi_master_get_devdata(master);
+
+ /* the main area */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bs->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bs->regs)) {
+ err = PTR_ERR(bs->regs);
+ goto out_master_put;
+ }
+
+ bs->clk = devm_clk_get(&pdev->dev, NULL);
+ if ((!bs->clk) || (IS_ERR(bs->clk))) {
+ err = PTR_ERR(bs->clk);
+ dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ goto out_master_put;
+ }
+
+ bs->irq = platform_get_irq(pdev, 0);
+ if (bs->irq <= 0) {
+ dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
+ err = bs->irq ? bs->irq : -ENODEV;
+ goto out_master_put;
+ }
+
+ /* this also enables the HW block */
+ err = clk_prepare_enable(bs->clk);
+ if (err) {
+ dev_err(&pdev->dev, "could not prepare clock: %d\n", err);
+ goto out_master_put;
+ }
+
+ /* just checking if the clock returns a sane value */
+ clk_hz = clk_get_rate(bs->clk);
+ if (!clk_hz) {
+ dev_err(&pdev->dev, "clock returns 0 Hz\n");
+ err = -ENODEV;
+ goto out_clk_disable;
+ }
+
+ /* reset SPI-HW block */
+ bcm2835aux_spi_reset_hw(bs);
+
+ err = devm_request_irq(&pdev->dev, bs->irq,
+ bcm2835aux_spi_interrupt,
+ IRQF_SHARED,
+ dev_name(&pdev->dev), master);
+ if (err) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err) {
+ dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(bs->clk);
+out_master_put:
+ spi_master_put(master);
+ return err;
+}
+
+static int bcm2835aux_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ bcm2835aux_spi_reset_hw(bs);
+
+ /* disable the HW block by releasing the clock */
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835aux_spi_match[] = {
+ { .compatible = "brcm,bcm2835-aux-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm2835aux_spi_match);
+
+static struct platform_driver bcm2835aux_spi_driver = {
+ .driver = {
+ .name = "spi-bcm2835aux",
+ .of_match_table = bcm2835aux_spi_match,
+ },
+ .probe = bcm2835aux_spi_probe,
+ .remove = bcm2835aux_spi_remove,
+};
+module_platform_driver(bcm2835aux_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835 aux");
+MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>");
+MODULE_LICENSE("GPL v2");
if (err) {
spi_master_put(master);
bcma_set_drvdata(core, NULL);
- goto out;
+ return err;
}
/* Broadcom SoCs (at least with the CC rev 42) use SPI for flash only */
spi_new_device(master, &bcm53xx_info);
-out:
- return err;
-}
-
-static void bcm53xxspi_bcma_remove(struct bcma_device *core)
-{
- struct bcm53xxspi *b53spi = bcma_get_drvdata(core);
-
- spi_unregister_master(b53spi->master);
+ return 0;
}
static struct bcma_driver bcm53xxspi_bcma_driver = {
.name = KBUILD_MODNAME,
.id_table = bcm53xxspi_bcma_tbl,
.probe = bcm53xxspi_bcma_probe,
- .remove = bcm53xxspi_bcma_remove,
};
/**************************************************
#include <linux/err.h>
#include <linux/pm_runtime.h>
-#include <bcm63xx_dev_spi.h>
+/* BCM 6338/6348 SPI core */
+#define SPI_6348_RSET_SIZE 64
+#define SPI_6348_CMD 0x00 /* 16-bits register */
+#define SPI_6348_INT_STATUS 0x02
+#define SPI_6348_INT_MASK_ST 0x03
+#define SPI_6348_INT_MASK 0x04
+#define SPI_6348_ST 0x05
+#define SPI_6348_CLK_CFG 0x06
+#define SPI_6348_FILL_BYTE 0x07
+#define SPI_6348_MSG_TAIL 0x09
+#define SPI_6348_RX_TAIL 0x0b
+#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */
+#define SPI_6348_MSG_CTL_WIDTH 8
+#define SPI_6348_MSG_DATA 0x41
+#define SPI_6348_MSG_DATA_SIZE 0x3f
+#define SPI_6348_RX_DATA 0x80
+#define SPI_6348_RX_DATA_SIZE 0x3f
+
+/* BCM 3368/6358/6262/6368 SPI core */
+#define SPI_6358_RSET_SIZE 1804
+#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
+#define SPI_6358_MSG_CTL_WIDTH 16
+#define SPI_6358_MSG_DATA 0x02
+#define SPI_6358_MSG_DATA_SIZE 0x21e
+#define SPI_6358_RX_DATA 0x400
+#define SPI_6358_RX_DATA_SIZE 0x220
+#define SPI_6358_CMD 0x700 /* 16-bits register */
+#define SPI_6358_INT_STATUS 0x702
+#define SPI_6358_INT_MASK_ST 0x703
+#define SPI_6358_INT_MASK 0x704
+#define SPI_6358_ST 0x705
+#define SPI_6358_CLK_CFG 0x706
+#define SPI_6358_FILL_BYTE 0x707
+#define SPI_6358_MSG_TAIL 0x709
+#define SPI_6358_RX_TAIL 0x70B
+
+/* Shared SPI definitions */
+
+/* Message configuration */
+#define SPI_FD_RW 0x00
+#define SPI_HD_W 0x01
+#define SPI_HD_R 0x02
+#define SPI_BYTE_CNT_SHIFT 0
+#define SPI_6348_MSG_TYPE_SHIFT 6
+#define SPI_6358_MSG_TYPE_SHIFT 14
+
+/* Command */
+#define SPI_CMD_NOOP 0x00
+#define SPI_CMD_SOFT_RESET 0x01
+#define SPI_CMD_HARD_RESET 0x02
+#define SPI_CMD_START_IMMEDIATE 0x03
+#define SPI_CMD_COMMAND_SHIFT 0
+#define SPI_CMD_COMMAND_MASK 0x000f
+#define SPI_CMD_DEVICE_ID_SHIFT 4
+#define SPI_CMD_PREPEND_BYTE_CNT_SHIFT 8
+#define SPI_CMD_ONE_BYTE_SHIFT 11
+#define SPI_CMD_ONE_WIRE_SHIFT 12
+#define SPI_DEV_ID_0 0
+#define SPI_DEV_ID_1 1
+#define SPI_DEV_ID_2 2
+#define SPI_DEV_ID_3 3
+
+/* Interrupt mask */
+#define SPI_INTR_CMD_DONE 0x01
+#define SPI_INTR_RX_OVERFLOW 0x02
+#define SPI_INTR_TX_UNDERFLOW 0x04
+#define SPI_INTR_TX_OVERFLOW 0x08
+#define SPI_INTR_RX_UNDERFLOW 0x10
+#define SPI_INTR_CLEAR_ALL 0x1f
+
+/* Status */
+#define SPI_RX_EMPTY 0x02
+#define SPI_CMD_BUSY 0x04
+#define SPI_SERIAL_BUSY 0x08
+
+/* Clock configuration */
+#define SPI_CLK_20MHZ 0x00
+#define SPI_CLK_0_391MHZ 0x01
+#define SPI_CLK_0_781MHZ 0x02 /* default */
+#define SPI_CLK_1_563MHZ 0x03
+#define SPI_CLK_3_125MHZ 0x04
+#define SPI_CLK_6_250MHZ 0x05
+#define SPI_CLK_12_50MHZ 0x06
+#define SPI_CLK_MASK 0x07
+#define SPI_SSOFFTIME_MASK 0x38
+#define SPI_SSOFFTIME_SHIFT 3
+#define SPI_BYTE_SWAP 0x80
+
+enum bcm63xx_regs_spi {
+ SPI_CMD,
+ SPI_INT_STATUS,
+ SPI_INT_MASK_ST,
+ SPI_INT_MASK,
+ SPI_ST,
+ SPI_CLK_CFG,
+ SPI_FILL_BYTE,
+ SPI_MSG_TAIL,
+ SPI_RX_TAIL,
+ SPI_MSG_CTL,
+ SPI_MSG_DATA,
+ SPI_RX_DATA,
+ SPI_MSG_TYPE_SHIFT,
+ SPI_MSG_CTL_WIDTH,
+ SPI_MSG_DATA_SIZE,
+};
#define BCM63XX_SPI_MAX_PREPEND 15
+#define BCM63XX_SPI_MAX_CS 8
+#define BCM63XX_SPI_BUS_NUM 0
+
struct bcm63xx_spi {
struct completion done;
int irq;
/* Platform data */
+ const unsigned long *reg_offsets;
unsigned fifo_size;
unsigned int msg_type_shift;
unsigned int msg_ctl_width;
};
static inline u8 bcm_spi_readb(struct bcm63xx_spi *bs,
- unsigned int offset)
+ unsigned int offset)
{
- return bcm_readb(bs->regs + bcm63xx_spireg(offset));
+ return readb(bs->regs + bs->reg_offsets[offset]);
}
static inline u16 bcm_spi_readw(struct bcm63xx_spi *bs,
unsigned int offset)
{
- return bcm_readw(bs->regs + bcm63xx_spireg(offset));
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ return ioread16be(bs->regs + bs->reg_offsets[offset]);
+#else
+ return readw(bs->regs + bs->reg_offsets[offset]);
+#endif
}
static inline void bcm_spi_writeb(struct bcm63xx_spi *bs,
u8 value, unsigned int offset)
{
- bcm_writeb(value, bs->regs + bcm63xx_spireg(offset));
+ writeb(value, bs->regs + bs->reg_offsets[offset]);
}
static inline void bcm_spi_writew(struct bcm63xx_spi *bs,
u16 value, unsigned int offset)
{
- bcm_writew(value, bs->regs + bcm63xx_spireg(offset));
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ iowrite16be(value, bs->regs + bs->reg_offsets[offset]);
+#else
+ writew(value, bs->regs + bs->reg_offsets[offset]);
+#endif
}
static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
u16 msg_ctl;
u16 cmd;
- u8 rx_tail;
unsigned int i, timeout = 0, prepend_len = 0, len = 0;
struct spi_transfer *t = first;
bool do_rx = false;
return IRQ_HANDLED;
}
+static const unsigned long bcm6348_spi_reg_offsets[] = {
+ [SPI_CMD] = SPI_6348_CMD,
+ [SPI_INT_STATUS] = SPI_6348_INT_STATUS,
+ [SPI_INT_MASK_ST] = SPI_6348_INT_MASK_ST,
+ [SPI_INT_MASK] = SPI_6348_INT_MASK,
+ [SPI_ST] = SPI_6348_ST,
+ [SPI_CLK_CFG] = SPI_6348_CLK_CFG,
+ [SPI_FILL_BYTE] = SPI_6348_FILL_BYTE,
+ [SPI_MSG_TAIL] = SPI_6348_MSG_TAIL,
+ [SPI_RX_TAIL] = SPI_6348_RX_TAIL,
+ [SPI_MSG_CTL] = SPI_6348_MSG_CTL,
+ [SPI_MSG_DATA] = SPI_6348_MSG_DATA,
+ [SPI_RX_DATA] = SPI_6348_RX_DATA,
+ [SPI_MSG_TYPE_SHIFT] = SPI_6348_MSG_TYPE_SHIFT,
+ [SPI_MSG_CTL_WIDTH] = SPI_6348_MSG_CTL_WIDTH,
+ [SPI_MSG_DATA_SIZE] = SPI_6348_MSG_DATA_SIZE,
+};
+
+static const unsigned long bcm6358_spi_reg_offsets[] = {
+ [SPI_CMD] = SPI_6358_CMD,
+ [SPI_INT_STATUS] = SPI_6358_INT_STATUS,
+ [SPI_INT_MASK_ST] = SPI_6358_INT_MASK_ST,
+ [SPI_INT_MASK] = SPI_6358_INT_MASK,
+ [SPI_ST] = SPI_6358_ST,
+ [SPI_CLK_CFG] = SPI_6358_CLK_CFG,
+ [SPI_FILL_BYTE] = SPI_6358_FILL_BYTE,
+ [SPI_MSG_TAIL] = SPI_6358_MSG_TAIL,
+ [SPI_RX_TAIL] = SPI_6358_RX_TAIL,
+ [SPI_MSG_CTL] = SPI_6358_MSG_CTL,
+ [SPI_MSG_DATA] = SPI_6358_MSG_DATA,
+ [SPI_RX_DATA] = SPI_6358_RX_DATA,
+ [SPI_MSG_TYPE_SHIFT] = SPI_6358_MSG_TYPE_SHIFT,
+ [SPI_MSG_CTL_WIDTH] = SPI_6358_MSG_CTL_WIDTH,
+ [SPI_MSG_DATA_SIZE] = SPI_6358_MSG_DATA_SIZE,
+};
+
+static const struct platform_device_id bcm63xx_spi_dev_match[] = {
+ {
+ .name = "bcm6348-spi",
+ .driver_data = (unsigned long)bcm6348_spi_reg_offsets,
+ },
+ {
+ .name = "bcm6358-spi",
+ .driver_data = (unsigned long)bcm6358_spi_reg_offsets,
+ },
+ {
+ },
+};
static int bcm63xx_spi_probe(struct platform_device *pdev)
{
struct resource *r;
+ const unsigned long *bcm63xx_spireg;
struct device *dev = &pdev->dev;
- struct bcm63xx_spi_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq;
struct spi_master *master;
struct clk *clk;
struct bcm63xx_spi *bs;
int ret;
+ if (!pdev->id_entry->driver_data)
+ return -EINVAL;
+
+ bcm63xx_spireg = (const unsigned long *)pdev->id_entry->driver_data;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "no irq\n");
bs->irq = irq;
bs->clk = clk;
- bs->fifo_size = pdata->fifo_size;
+ bs->reg_offsets = bcm63xx_spireg;
+ bs->fifo_size = bs->reg_offsets[SPI_MSG_DATA_SIZE];
ret = devm_request_irq(&pdev->dev, irq, bcm63xx_spi_interrupt, 0,
pdev->name, master);
goto out_err;
}
- master->bus_num = pdata->bus_num;
- master->num_chipselect = pdata->num_chipselect;
+ master->bus_num = BCM63XX_SPI_BUS_NUM;
+ master->num_chipselect = BCM63XX_SPI_MAX_CS;
master->transfer_one_message = bcm63xx_spi_transfer_one;
master->mode_bits = MODEBITS;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->auto_runtime_pm = true;
- bs->msg_type_shift = pdata->msg_type_shift;
- bs->msg_ctl_width = pdata->msg_ctl_width;
- bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
- bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
-
- switch (bs->msg_ctl_width) {
- case 8:
- case 16:
- break;
- default:
- dev_err(dev, "unsupported MSG_CTL width: %d\n",
- bs->msg_ctl_width);
- goto out_err;
- }
+ bs->msg_type_shift = bs->reg_offsets[SPI_MSG_TYPE_SHIFT];
+ bs->msg_ctl_width = bs->reg_offsets[SPI_MSG_CTL_WIDTH];
+ bs->tx_io = (u8 *)(bs->regs + bs->reg_offsets[SPI_MSG_DATA]);
+ bs->rx_io = (const u8 *)(bs->regs + bs->reg_offsets[SPI_RX_DATA]);
/* Initialize hardware */
ret = clk_prepare_enable(bs->clk);
.name = "bcm63xx-spi",
.pm = &bcm63xx_spi_pm_ops,
},
+ .id_table = bcm63xx_spi_dev_match,
.probe = bcm63xx_spi_probe,
.remove = bcm63xx_spi_remove,
};
transfer = drv_data->cur_transfer;
chip = drv_data->cur_chip;
- if (transfer->speed_hz)
- transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
- else
- transfer_speed = chip->baud;
+ transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
SSYNC();
message->state = RUNNING_STATE;
dma_config = 0;
- /* Speed setup (surely valid because already checked) */
- if (transfer->speed_hz)
- bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz));
- else
- bfin_write(&drv_data->regs->baud, chip->baud);
+ bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz));
bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
bfin_spi_cs_active(drv_data, chip);
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
+#define SPI_BITBANG_CS_DELAY 100
+
/*----------------------------------------------------------------------*/
{
struct spi_bitbang_cs *cs = spi->controller_state;
struct spi_bitbang *bitbang;
- unsigned long flags;
bitbang = spi_master_get_devdata(spi->master);
*/
/* deselect chip (low or high) */
- spin_lock_irqsave(&bitbang->lock, flags);
+ mutex_lock(&bitbang->lock);
if (!bitbang->busy) {
bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
ndelay(cs->nsecs);
}
- spin_unlock_irqrestore(&bitbang->lock, flags);
+ mutex_unlock(&bitbang->lock);
return 0;
}
static int spi_bitbang_prepare_hardware(struct spi_master *spi)
{
struct spi_bitbang *bitbang;
- unsigned long flags;
bitbang = spi_master_get_devdata(spi);
- spin_lock_irqsave(&bitbang->lock, flags);
+ mutex_lock(&bitbang->lock);
bitbang->busy = 1;
- spin_unlock_irqrestore(&bitbang->lock, flags);
+ mutex_unlock(&bitbang->lock);
return 0;
}
static int spi_bitbang_transfer_one(struct spi_master *master,
- struct spi_message *m)
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
{
- struct spi_bitbang *bitbang;
- unsigned nsecs;
- struct spi_transfer *t = NULL;
- unsigned cs_change;
- int status;
- int do_setup = -1;
- struct spi_device *spi = m->spi;
-
- bitbang = spi_master_get_devdata(master);
-
- /* FIXME this is made-up ... the correct value is known to
- * word-at-a-time bitbang code, and presumably chipselect()
- * should enforce these requirements too?
- */
- nsecs = 100;
-
- cs_change = 1;
- status = 0;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
-
- /* override speed or wordsize? */
- if (t->speed_hz || t->bits_per_word)
- do_setup = 1;
-
- /* init (-1) or override (1) transfer params */
- if (do_setup != 0) {
- if (bitbang->setup_transfer) {
- status = bitbang->setup_transfer(spi, t);
- if (status < 0)
- break;
- }
- if (do_setup == -1)
- do_setup = 0;
- }
-
- /* set up default clock polarity, and activate chip;
- * this implicitly updates clock and spi modes as
- * previously recorded for this device via setup().
- * (and also deselects any other chip that might be
- * selected ...)
- */
- if (cs_change) {
- bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
- ndelay(nsecs);
- }
- cs_change = t->cs_change;
- if (!t->tx_buf && !t->rx_buf && t->len) {
- status = -EINVAL;
- break;
- }
-
- /* transfer data. the lower level code handles any
- * new dma mappings it needs. our caller always gave
- * us dma-safe buffers.
- */
- if (t->len) {
- /* REVISIT dma API still needs a designated
- * DMA_ADDR_INVALID; ~0 might be better.
- */
- if (!m->is_dma_mapped)
- t->rx_dma = t->tx_dma = 0;
- status = bitbang->txrx_bufs(spi, t);
- }
- if (status > 0)
- m->actual_length += status;
- if (status != t->len) {
- /* always report some kind of error */
- if (status >= 0)
- status = -EREMOTEIO;
- break;
- }
- status = 0;
+ struct spi_bitbang *bitbang = spi_master_get_devdata(master);
+ int status = 0;
- /* protocol tweaks before next transfer */
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- if (cs_change &&
- !list_is_last(&t->transfer_list, &m->transfers)) {
- /* sometimes a short mid-message deselect of the chip
- * may be needed to terminate a mode or command
- */
- ndelay(nsecs);
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(nsecs);
- }
+ if (bitbang->setup_transfer) {
+ status = bitbang->setup_transfer(spi, transfer);
+ if (status < 0)
+ goto out;
}
- m->status = status;
+ if (transfer->len)
+ status = bitbang->txrx_bufs(spi, transfer);
- /* normally deactivate chipselect ... unless no error and
- * cs_change has hinted that the next message will probably
- * be for this chip too.
- */
- if (!(status == 0 && cs_change)) {
- ndelay(nsecs);
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(nsecs);
- }
+ if (status == transfer->len)
+ status = 0;
+ else if (status >= 0)
+ status = -EREMOTEIO;
- spi_finalize_current_message(master);
+out:
+ spi_finalize_current_transfer(master);
return status;
}
static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
{
struct spi_bitbang *bitbang;
- unsigned long flags;
bitbang = spi_master_get_devdata(spi);
- spin_lock_irqsave(&bitbang->lock, flags);
+ mutex_lock(&bitbang->lock);
bitbang->busy = 0;
- spin_unlock_irqrestore(&bitbang->lock, flags);
+ mutex_unlock(&bitbang->lock);
return 0;
}
+static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_bitbang *bitbang = spi_master_get_devdata(spi->master);
+
+ /* SPI core provides CS high / low, but bitbang driver
+ * expects CS active
+ * spi device driver takes care of handling SPI_CS_HIGH
+ */
+ enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
+
+ ndelay(SPI_BITBANG_CS_DELAY);
+ bitbang->chipselect(spi, enable ? BITBANG_CS_ACTIVE :
+ BITBANG_CS_INACTIVE);
+ ndelay(SPI_BITBANG_CS_DELAY);
+}
+
/*----------------------------------------------------------------------*/
/**
if (!master || !bitbang->chipselect)
return -EINVAL;
- spin_lock_init(&bitbang->lock);
+ mutex_init(&bitbang->lock);
if (!master->mode_bits)
master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
- master->transfer_one_message = spi_bitbang_transfer_one;
+ master->transfer_one = spi_bitbang_transfer_one;
+ master->set_cs = spi_bitbang_set_cs;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
master->auto_runtime_pm = true;
platform_set_drvdata(pdev, master);
+ pm_runtime_enable(&pdev->dev);
status = devm_spi_register_master(&pdev->dev, master);
if (status) {
dev_dbg(&pdev->dev, "spi_register_master failed\n");
goto fail2;
}
- pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
return 0;
fail2:
+ pm_runtime_disable(&pdev->dev);
mcfqspi_cs_teardown(mcfqspi);
fail1:
clk_disable(mcfqspi->clk);
struct davinci_spi_config *spicfg = spi->controller_data;
u8 chip_sel = spi->chip_select;
u16 spidat1 = CS_DEFAULT;
- bool gpio_chipsel = false;
- int gpio;
dspi = spi_master_get_devdata(spi->master);
pdata = &dspi->pdata;
- if (spi->cs_gpio >= 0) {
- /* SPI core parse and update master->cs_gpio */
- gpio_chipsel = true;
- gpio = spi->cs_gpio;
- }
-
/* program delay transfers if tx_delay is non zero */
if (spicfg->wdelay)
spidat1 |= SPIDAT1_WDEL;
* Board specific chip select logic decides the polarity and cs
* line for the controller
*/
- if (gpio_chipsel) {
+ if (spi->cs_gpio >= 0) {
if (value == BITBANG_CS_ACTIVE)
- gpio_set_value(gpio, spi->mode & SPI_CS_HIGH);
+ gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH);
else
- gpio_set_value(gpio, !(spi->mode & SPI_CS_HIGH));
+ gpio_set_value(spi->cs_gpio,
+ !(spi->mode & SPI_CS_HIGH));
} else {
if (value == BITBANG_CS_ACTIVE) {
spidat1 |= SPIDAT1_CSHOLD_MASK;
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
+#include <linux/property.h>
#include "spi-dw.h"
dws->max_freq = clk_get_rate(dwsmmio->clk);
- of_property_read_u32(pdev->dev.of_node, "reg-io-width",
- &dws->reg_io_width);
+ device_property_read_u32(&pdev->dev, "reg-io-width", &dws->reg_io_width);
num_cs = 4;
- if (pdev->dev.of_node)
- of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+ device_property_read_u32(&pdev->dev, "num-cs", &num_cs);
dws->num_cs = num_cs;
#define DRIVER_NAME "dw_spi_pci"
-struct dw_spi_pci {
- struct pci_dev *pdev;
- struct dw_spi dws;
-};
-
struct spi_pci_desc {
int (*setup)(struct dw_spi *);
u16 num_cs;
static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct dw_spi_pci *dwpci;
struct dw_spi *dws;
struct spi_pci_desc *desc = (struct spi_pci_desc *)ent->driver_data;
int pci_bar = 0;
if (ret)
return ret;
- dwpci = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_pci),
- GFP_KERNEL);
- if (!dwpci)
+ dws = devm_kzalloc(&pdev->dev, sizeof(*dws), GFP_KERNEL);
+ if (!dws)
return -ENOMEM;
- dwpci->pdev = pdev;
- dws = &dwpci->dws;
-
/* Get basic io resource and map it */
dws->paddr = pci_resource_start(pdev, pci_bar);
return ret;
dws->regs = pcim_iomap_table(pdev)[pci_bar];
-
dws->irq = pdev->irq;
/*
return ret;
/* PCI hook and SPI hook use the same drv data */
- pci_set_drvdata(pdev, dwpci);
+ pci_set_drvdata(pdev, dws);
dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
static void spi_pci_remove(struct pci_dev *pdev)
{
- struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+ struct dw_spi *dws = pci_get_drvdata(pdev);
- dw_spi_remove_host(&dwpci->dws);
+ dw_spi_remove_host(dws);
}
#ifdef CONFIG_PM_SLEEP
static int spi_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+ struct dw_spi *dws = pci_get_drvdata(pdev);
- return dw_spi_suspend_host(&dwpci->dws);
+ return dw_spi_suspend_host(dws);
}
static int spi_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+ struct dw_spi *dws = pci_get_drvdata(pdev);
- return dw_spi_resume_host(&dwpci->dws);
+ return dw_spi_resume_host(dws);
}
#endif
/* Slave spi_dev related */
struct chip_data {
- u16 cr0;
u8 cs; /* chip select pin */
- u8 n_bytes; /* current is a 1/2/4 byte op */
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
u8 poll_mode; /* 1 means use poll mode */
- u32 dma_width;
- u32 rx_threshold;
- u32 tx_threshold;
u8 enable_dma;
- u8 bits_per_word;
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
void (*cs_control)(u32 command);
struct chip_data *chip = spi_get_ctldata(spi);
u8 imask = 0;
u16 txlevel = 0;
- u16 clk_div = 0;
- u32 speed = 0;
- u32 cr0 = 0;
+ u16 clk_div;
+ u32 cr0;
int ret;
dws->dma_mapped = 0;
- dws->n_bytes = chip->n_bytes;
- dws->dma_width = chip->dma_width;
dws->tx = (void *)transfer->tx_buf;
dws->tx_end = dws->tx + transfer->len;
spi_enable_chip(dws, 0);
- cr0 = chip->cr0;
-
/* Handle per transfer options for bpw and speed */
- if (transfer->speed_hz) {
- speed = chip->speed_hz;
-
- if ((transfer->speed_hz != speed) || !chip->clk_div) {
- speed = transfer->speed_hz;
-
- /* clk_div doesn't support odd number */
- clk_div = (dws->max_freq / speed + 1) & 0xfffe;
+ if (transfer->speed_hz != chip->speed_hz) {
+ /* clk_div doesn't support odd number */
+ clk_div = (dws->max_freq / transfer->speed_hz + 1) & 0xfffe;
- chip->speed_hz = speed;
- chip->clk_div = clk_div;
+ chip->speed_hz = transfer->speed_hz;
+ chip->clk_div = clk_div;
- spi_set_clk(dws, chip->clk_div);
- }
+ spi_set_clk(dws, chip->clk_div);
}
- if (transfer->bits_per_word) {
- if (transfer->bits_per_word == 8) {
- dws->n_bytes = 1;
- dws->dma_width = 1;
- } else if (transfer->bits_per_word == 16) {
- dws->n_bytes = 2;
- dws->dma_width = 2;
- }
- cr0 = (transfer->bits_per_word - 1)
- | (chip->type << SPI_FRF_OFFSET)
- | (spi->mode << SPI_MODE_OFFSET)
- | (chip->tmode << SPI_TMOD_OFFSET);
+ if (transfer->bits_per_word == 8) {
+ dws->n_bytes = 1;
+ dws->dma_width = 1;
+ } else if (transfer->bits_per_word == 16) {
+ dws->n_bytes = 2;
+ dws->dma_width = 2;
+ } else {
+ return -EINVAL;
}
+ /* Default SPI mode is SCPOL = 0, SCPH = 0 */
+ cr0 = (transfer->bits_per_word - 1)
+ | (chip->type << SPI_FRF_OFFSET)
+ | (spi->mode << SPI_MODE_OFFSET)
+ | (chip->tmode << SPI_TMOD_OFFSET);
/*
* Adjust transfer mode if necessary. Requires platform dependent
chip->poll_mode = chip_info->poll_mode;
chip->type = chip_info->type;
-
- chip->rx_threshold = 0;
- chip->tx_threshold = 0;
- }
-
- if (spi->bits_per_word == 8) {
- chip->n_bytes = 1;
- chip->dma_width = 1;
- } else if (spi->bits_per_word == 16) {
- chip->n_bytes = 2;
- chip->dma_width = 2;
- }
- chip->bits_per_word = spi->bits_per_word;
-
- if (!spi->max_speed_hz) {
- dev_err(&spi->dev, "No max speed HZ parameter\n");
- return -EINVAL;
}
chip->tmode = 0; /* Tx & Rx */
- /* Default SPI mode is SCPOL = 0, SCPH = 0 */
- chip->cr0 = (chip->bits_per_word - 1)
- | (chip->type << SPI_FRF_OFFSET)
- | (spi->mode << SPI_MODE_OFFSET)
- | (chip->tmode << SPI_TMOD_OFFSET);
-
- if (spi->mode & SPI_LOOP)
- chip->cr0 |= 1 << SPI_SRL_OFFSET;
if (gpio_is_valid(spi->cs_gpio)) {
ret = gpio_direction_output(spi->cs_gpio,
dws->master = master;
dws->type = SSI_MOTO_SPI;
dws->dma_inited = 0;
- dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
+ dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num);
- ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED,
- dws->name, master);
+ ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, master);
if (ret < 0) {
- dev_err(&master->dev, "can not get IRQ\n");
+ dev_err(dev, "can not get IRQ\n");
goto err_free_master;
}
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
spi_enable_chip(dws, 0);
+ free_irq(dws->irq, master);
err_free_master:
spi_master_put(master);
return ret;
void dw_spi_remove_host(struct dw_spi *dws)
{
- if (!dws)
- return;
dw_spi_debugfs_remove(dws);
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
- spi_enable_chip(dws, 0);
- /* Disable clk */
- spi_set_clk(dws, 0);
+
+ spi_shutdown_chip(dws);
+
+ free_irq(dws->irq, dws->master);
}
EXPORT_SYMBOL_GPL(dw_spi_remove_host);
int dw_spi_suspend_host(struct dw_spi *dws)
{
- int ret = 0;
+ int ret;
ret = spi_master_suspend(dws->master);
if (ret)
return ret;
- spi_enable_chip(dws, 0);
- spi_set_clk(dws, 0);
- return ret;
+
+ spi_shutdown_chip(dws);
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
spi_enable_chip(dws, 1);
}
+static inline void spi_shutdown_chip(struct dw_spi *dws)
+{
+ spi_enable_chip(dws, 0);
+ spi_set_clk(dws, 0);
+}
+
/*
* Each SPI slave device to work with dw_api controller should
* has such a structure claiming its working mode (poll or PIO/DMA),
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
dspi->cur_chip->ctar_val);
- if (transfer->speed_hz)
- regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
- dspi->cur_chip->ctar_val);
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
if (config->mode & SPI_CPHA)
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
+ else
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
if (config->mode & SPI_CPOL) {
cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
+ } else {
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
}
if (config->mode & SPI_CS_HIGH)
cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
+ else
+ cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
cs_change = 1;
status = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- status = mpc512x_psc_spi_transfer_setup(spi, t);
- if (status < 0)
- break;
- }
+ status = mpc512x_psc_spi_transfer_setup(spi, t);
+ if (status < 0)
+ break;
if (cs_change)
mpc512x_psc_spi_activate_cs(spi);
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
unsigned int i;
- const __be32 *val;
- int len;
+ u32 val;
if (!np)
return 0;
return -ENODEV;
}
hw->bitbang.master->dev.of_node = pdev->dev.of_node;
- val = of_get_property(pdev->dev.of_node,
- "clock-frequency", &len);
- if (val && len >= sizeof(__be32))
- hw->freq = be32_to_cpup(val);
- val = of_get_property(pdev->dev.of_node, "baud-width", &len);
- if (val && len >= sizeof(__be32))
- hw->baudwidth = be32_to_cpup(val);
+ if (!of_property_read_u32(np, "clock-frequency", &val))
+ hw->freq = val;
+ if (!of_property_read_u32(np, "baud-width", &val))
+ hw->baudwidth = val;
return 0;
}
#else /* !CONFIG_OF */
cpha = mode & SPI_CPHA;
cpol = mode & SPI_CPOL;
- speed_hz = xfer->speed_hz ? : spi->max_speed_hz;
+ speed_hz = xfer->speed_hz;
clkdiv = octeon_get_io_clock_rate() / (2 * speed_hz);
return status;
}
+static int omap2_mcspi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
+
+ /* Only a single channel can have the FORCE bit enabled
+ * in its chconf0 register.
+ * Scan all channels and disable them except the current one.
+ * A FORCE can remain from a last transfer having cs_change enabled
+ */
+ list_for_each_entry(cs, &ctx->cs, node) {
+ if (msg->spi->controller_state == cs)
+ continue;
+
+ if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) {
+ cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
+ writel_relaxed(cs->chconf0,
+ cs->base + OMAP2_MCSPI_CHCONF0);
+ readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0);
+ }
+ }
+
+ return 0;
+}
+
static int omap2_mcspi_transfer_one(struct spi_master *master,
struct spi_device *spi, struct spi_transfer *t)
{
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = omap2_mcspi_setup;
master->auto_runtime_pm = true;
+ master->prepare_message = omap2_mcspi_prepare_message;
master->transfer_one = omap2_mcspi_transfer_one;
master->set_cs = omap2_mcspi_set_cs;
master->cleanup = omap2_mcspi_cleanup;
if (in_8(&hw->regs->cdm) != cdm)
out_8(&hw->regs->cdm, cdm);
- spin_lock(&hw->bitbang.lock);
+ mutex_lock(&hw->bitbang.lock);
if (!hw->bitbang.busy) {
hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
/* Need to ndelay here? */
}
- spin_unlock(&hw->bitbang.lock);
+ mutex_unlock(&hw->bitbang.lock);
return 0;
}
if (ret)
return ret;
- spin_lock(&hw->bitbang.lock);
+ mutex_lock(&hw->bitbang.lock);
if (!hw->bitbang.busy) {
hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
/* need to ndelay for 0.5 clocktick ? */
}
- spin_unlock(&hw->bitbang.lock);
+ mutex_unlock(&hw->bitbang.lock);
return 0;
}
mutex_unlock(&qspi->list_lock);
+ ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
m->status = status;
spi_finalize_current_message(master);
- ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
-
return status;
}
while (remaining_words) {
int n_words, tx_words, rx_words;
+ u32 sr;
n_words = min(remaining_words, xspi->buffer_size);
if (use_irq) {
xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
wait_for_completion(&xspi->done);
- } else
- while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) &
- XSPI_SR_TX_EMPTY_MASK))
- ;
-
- /* A transmit has just completed. Process received data and
- * check for more data to transmit. Always inhibit the
- * transmitter while the Isr refills the transmit register/FIFO,
- * or make sure it is stopped if we're done.
- */
- if (use_irq)
+ /* A transmit has just completed. Process received data
+ * and check for more data to transmit. Always inhibit
+ * the transmitter while the Isr refills the transmit
+ * register/FIFO, or make sure it is stopped if we're
+ * done.
+ */
xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
+ xspi->regs + XSPI_CR_OFFSET);
+ sr = XSPI_SR_TX_EMPTY_MASK;
+ } else
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
/* Read out all the data from the Rx FIFO */
rx_words = n_words;
- while (rx_words--)
- xilinx_spi_rx(xspi);
+ while (rx_words) {
+ if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
+ xilinx_spi_rx(xspi);
+ rx_words--;
+ continue;
+ }
+
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ if (!(sr & XSPI_SR_RX_EMPTY_MASK)) {
+ xilinx_spi_rx(xspi);
+ rx_words--;
+ }
+ }
remaining_words -= n_words;
}
SPI_STATISTICS_SHOW(bytes_rx, "%llu");
SPI_STATISTICS_SHOW(bytes_tx, "%llu");
+#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
+ SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
+ "transfer_bytes_histo_" number, \
+ transfer_bytes_histo[index], "%lu")
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
+
static struct attribute *spi_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
&dev_attr_spi_device_bytes.attr,
&dev_attr_spi_device_bytes_rx.attr,
&dev_attr_spi_device_bytes_tx.attr,
+ &dev_attr_spi_device_transfer_bytes_histo0.attr,
+ &dev_attr_spi_device_transfer_bytes_histo1.attr,
+ &dev_attr_spi_device_transfer_bytes_histo2.attr,
+ &dev_attr_spi_device_transfer_bytes_histo3.attr,
+ &dev_attr_spi_device_transfer_bytes_histo4.attr,
+ &dev_attr_spi_device_transfer_bytes_histo5.attr,
+ &dev_attr_spi_device_transfer_bytes_histo6.attr,
+ &dev_attr_spi_device_transfer_bytes_histo7.attr,
+ &dev_attr_spi_device_transfer_bytes_histo8.attr,
+ &dev_attr_spi_device_transfer_bytes_histo9.attr,
+ &dev_attr_spi_device_transfer_bytes_histo10.attr,
+ &dev_attr_spi_device_transfer_bytes_histo11.attr,
+ &dev_attr_spi_device_transfer_bytes_histo12.attr,
+ &dev_attr_spi_device_transfer_bytes_histo13.attr,
+ &dev_attr_spi_device_transfer_bytes_histo14.attr,
+ &dev_attr_spi_device_transfer_bytes_histo15.attr,
+ &dev_attr_spi_device_transfer_bytes_histo16.attr,
NULL,
};
&dev_attr_spi_master_bytes.attr,
&dev_attr_spi_master_bytes_rx.attr,
&dev_attr_spi_master_bytes_tx.attr,
+ &dev_attr_spi_master_transfer_bytes_histo0.attr,
+ &dev_attr_spi_master_transfer_bytes_histo1.attr,
+ &dev_attr_spi_master_transfer_bytes_histo2.attr,
+ &dev_attr_spi_master_transfer_bytes_histo3.attr,
+ &dev_attr_spi_master_transfer_bytes_histo4.attr,
+ &dev_attr_spi_master_transfer_bytes_histo5.attr,
+ &dev_attr_spi_master_transfer_bytes_histo6.attr,
+ &dev_attr_spi_master_transfer_bytes_histo7.attr,
+ &dev_attr_spi_master_transfer_bytes_histo8.attr,
+ &dev_attr_spi_master_transfer_bytes_histo9.attr,
+ &dev_attr_spi_master_transfer_bytes_histo10.attr,
+ &dev_attr_spi_master_transfer_bytes_histo11.attr,
+ &dev_attr_spi_master_transfer_bytes_histo12.attr,
+ &dev_attr_spi_master_transfer_bytes_histo13.attr,
+ &dev_attr_spi_master_transfer_bytes_histo14.attr,
+ &dev_attr_spi_master_transfer_bytes_histo15.attr,
+ &dev_attr_spi_master_transfer_bytes_histo16.attr,
NULL,
};
struct spi_master *master)
{
unsigned long flags;
+ int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
+
+ if (l2len < 0)
+ l2len = 0;
spin_lock_irqsave(&stats->lock, flags);
stats->transfers++;
+ stats->transfer_bytes_histo[l2len]++;
stats->bytes += xfer->len;
if ((xfer->tx_buf) &&
static int spi_drv_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ struct spi_device *spi = to_spi_device(dev);
int ret;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret)
return ret;
+ if (dev->of_node) {
+ spi->irq = of_irq_get(dev->of_node, 0);
+ if (spi->irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (spi->irq < 0)
+ spi->irq = 0;
+ }
+
ret = dev_pm_domain_attach(dev, true);
if (ret != -EPROBE_DEFER) {
- ret = sdrv->probe(to_spi_device(dev));
+ ret = sdrv->probe(spi);
if (ret)
dev_pm_domain_detach(dev, true);
}
* spi_register_driver - register a SPI driver
* @sdrv: the driver to register
* Context: can sleep
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_register_driver(struct spi_driver *sdrv)
{
* needs to discard the spi_device without adding it, then it should
* call spi_dev_put() on it.
*
- * Returns a pointer to the new device, or NULL.
+ * Return: a pointer to the new device, or NULL.
*/
struct spi_device *spi_alloc_device(struct spi_master *master)
{
* Companion function to spi_alloc_device. Devices allocated with
* spi_alloc_device can be added onto the spi bus with this function.
*
- * Returns 0 on success; negative errno on failure
+ * Return: 0 on success; negative errno on failure
*/
int spi_add_device(struct spi_device *spi)
{
* this is exported so that for example a USB or parport based adapter
* driver could add devices (which it would learn about out-of-band).
*
- * Returns the new device, or NULL.
+ * Return: the new device, or NULL.
*/
struct spi_device *spi_new_device(struct spi_master *master,
struct spi_board_info *chip)
*
* The board info passed can safely be __initdata ... but be careful of
* any embedded pointers (platform_data, etc), they're copied as-is.
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_register_board_info(struct spi_board_info const *info, unsigned n)
{
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi->cs_gpio >= 0)
+ if (gpio_is_valid(spi->cs_gpio))
gpio_set_value(spi->cs_gpio, !enable);
else if (spi->master->set_cs)
spi->master->set_cs(spi, !enable);
*
* If there are more messages in the queue, the next message is returned from
* this call.
+ *
+ * Return: the next message in the queue, else NULL if the queue is empty.
*/
struct spi_message *spi_get_next_queued_message(struct spi_master *master)
{
* spi_queued_transfer - transfer function for queued transfers
* @spi: spi device which is requesting transfer
* @msg: spi message which is to handled is queued to driver queue
+ *
+ * Return: zero on success, else a negative error code.
*/
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
}
spi->max_speed_hz = value;
- /* IRQ */
- spi->irq = irq_of_parse_and_map(nc, 0);
-
/* Store a pointer to the node in the device structure */
of_node_get(nc);
spi->dev.of_node = nc;
* only ones directly touching chip registers. It's how they allocate
* an spi_master structure, prior to calling spi_register_master().
*
- * This must be called from context that can sleep. It returns the SPI
- * master structure on success, else NULL.
+ * This must be called from context that can sleep.
*
* The caller is responsible for assigning the bus number and initializing
* the master's methods before calling spi_register_master(); and (after errors
* adding the device) calling spi_master_put() to prevent a memory leak.
+ *
+ * Return: the SPI master structure on success, else NULL.
*/
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
{
* success, else a negative error code (dropping the master's refcount).
* After a successful return, the caller is responsible for calling
* spi_unregister_master().
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_register_master(struct spi_master *master)
{
*
* Register a SPI device as with spi_register_master() which will
* automatically be unregister
+ *
+ * Return: zero on success, else a negative error code.
*/
int devm_spi_register_master(struct device *dev, struct spi_master *master)
{
* arch init time. It returns a refcounted pointer to the relevant
* spi_master (which the caller must release), or NULL if there is
* no such master registered.
+ *
+ * Return: the SPI master structure on success, else NULL.
*/
struct spi_master *spi_busnum_to_master(u16 bus_num)
{
* that the underlying controller or its driver does not support. For
* example, not all hardware supports wire transfers using nine bit words,
* LSB-first wire encoding, or active-high chipselects.
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_setup(struct spi_device *spi)
{
unsigned bad_bits, ugly_bits;
- int status = 0;
+ int status;
/* check mode to prevent that DUAL and QUAD set at the same time
*/
if (!spi->bits_per_word)
spi->bits_per_word = 8;
- if (__spi_validate_bits_per_word(spi->master, spi->bits_per_word))
- return -EINVAL;
+ status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
+ if (status)
+ return status;
if (!spi->max_speed_hz)
spi->max_speed_hz = spi->master->max_speed_hz;
- spi_set_cs(spi, false);
-
if (spi->master->setup)
status = spi->master->setup(spi);
+ spi_set_cs(spi, false);
+
dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
* no other spi_message queued to that device will be processed.
* (This rule applies equally to all the synchronous transfer calls,
* which are wrappers around this core asynchronous primitive.)
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_async(struct spi_device *spi, struct spi_message *message)
{
* no other spi_message queued to that device will be processed.
* (This rule applies equally to all the synchronous transfer calls,
* which are wrappers around this core asynchronous primitive.)
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_async_locked(struct spi_device *spi, struct spi_message *message)
{
* Also, the caller is guaranteeing that the memory associated with the
* message will not be freed before this call returns.
*
- * It returns zero on success, else a negative error code.
+ * Return: zero on success, else a negative error code.
*/
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
* SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
* be released by a spi_bus_unlock call when the exclusive access is over.
*
- * It returns zero on success, else a negative error code.
+ * Return: zero on success, else a negative error code.
*/
int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
{
* exclusive access is over. Data transfer must be done by spi_sync_locked
* and spi_async_locked calls when the SPI bus lock is held.
*
- * It returns zero on success, else a negative error code.
+ * Return: always zero.
*/
int spi_bus_lock(struct spi_master *master)
{
* This call releases an SPI bus lock previously obtained by an spi_bus_lock
* call.
*
- * It returns zero on success, else a negative error code.
+ * Return: always zero.
*/
int spi_bus_unlock(struct spi_master *master)
{
* portable code should never use this for more than 32 bytes.
* Performance-sensitive or bulk transfer code should instead use
* spi_{async,sync}() calls with dma-safe buffers.
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_write_then_read(struct spi_device *spi,
const void *txbuf, unsigned n_tx,
}
musb->isr = omap2430_musb_interrupt;
+ /*
+ * Enable runtime PM for musb parent (this driver). We can't
+ * do it earlier as struct musb is not yet allocated and we
+ * need to touch the musb registers for runtime PM.
+ */
+ pm_runtime_enable(glue->dev);
+ status = pm_runtime_get_sync(glue->dev);
+ if (status < 0)
+ goto err1;
+
status = pm_runtime_get_sync(dev);
if (status < 0) {
dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
+ pm_runtime_put_sync(glue->dev);
goto err1;
}
phy_power_on(musb->phy);
pm_runtime_put_noidle(musb->controller);
+ pm_runtime_put_noidle(glue->dev);
return 0;
err1:
goto err2;
}
- pm_runtime_enable(&pdev->dev);
+ /*
+ * Note that we cannot enable PM runtime yet for this
+ * driver as we need struct musb initialized first.
+ * See omap2430_musb_init above.
+ */
ret = platform_device_add(musb);
if (ret) {
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
- if (musb) {
- omap2430_low_level_init(musb);
- musb_writel(musb->mregs, OTG_INTERFSEL,
- musb->context.otg_interfsel);
- }
+ if (!musb)
+ return -EPROBE_DEFER;
+
+ omap2430_low_level_init(musb);
+ musb_writel(musb->mregs, OTG_INTERFSEL,
+ musb->context.otg_interfsel);
return 0;
}
return vq->acked_features & (1ULL << bit);
}
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
{
return vq->is_le;
}
+#else
+static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
+{
+ return virtio_legacy_is_little_endian() || vq->is_le;
+}
+#endif
/* Memory accessors */
static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
if (len == 0)
return 0;
- old_file = ovl_path_open(old, O_RDONLY);
+ old_file = ovl_path_open(old, O_LARGEFILE | O_RDONLY);
if (IS_ERR(old_file))
return PTR_ERR(old_file);
- new_file = ovl_path_open(new, O_WRONLY);
+ new_file = ovl_path_open(new, O_LARGEFILE | O_WRONLY);
if (IS_ERR(new_file)) {
error = PTR_ERR(new_file);
goto out_fput;
out_cleanup:
ovl_cleanup(wdir, newdentry);
- goto out;
+ goto out2;
}
/*
ovl_path_upper(dentry, &realpath);
}
+ if (realpath.dentry->d_flags & DCACHE_OP_SELECT_INODE)
+ return realpath.dentry->d_op->d_select_inode(realpath.dentry, file_flags);
+
return d_backing_inode(realpath.dentry);
}
mntput(ufs->upper_mnt);
for (i = 0; i < ufs->numlower; i++)
mntput(ufs->lower_mnt[i]);
+ kfree(ufs->lower_mnt);
kfree(ufs->config.lowerdir);
kfree(ufs->config.upperdir);
oe->lowerstack[i].dentry = stack[i].dentry;
oe->lowerstack[i].mnt = ufs->lower_mnt[i];
}
+ kfree(stack);
root_dentry->d_fsdata = oe;
if (!throtl) {
blkg = blkg ?: q->root_blkg;
- blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
+ blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
bio->bi_iter.bi_size);
- blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
+ blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
}
rcu_read_unlock();
* This function is used to pass protocol port error state information
* to the switch driver. The switch driver can react to the proto_down
* by doing a phys down on the associated switch port.
+ * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
+ * This function is used to get egress tunnel information for given skb.
+ * This is useful for retrieving outer tunnel header parameters while
+ * sampling packet.
*
*/
struct net_device_ops {
int (*ndo_get_iflink)(const struct net_device *dev);
int (*ndo_change_proto_down)(struct net_device *dev,
bool proto_down);
+ int (*ndo_fill_metadata_dst)(struct net_device *dev,
+ struct sk_buff *skb);
};
/**
void dev_remove_offload(struct packet_offload *po);
int dev_get_iflink(const struct net_device *dev);
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
#include <linux/platform_device.h>
-#define INT_DMA_LCD 25
+#define INT_DMA_LCD (NR_IRQS_LEGACY + 25)
#define OMAP1_DMA_TOUT_IRQ (1 << 0)
#define OMAP_DMA_DROP_IRQ (1 << 1)
* @bytes_tx: number of bytes sent to device
* @bytes_rx: number of bytes received from device
*
+ * @transfer_bytes_histo:
+ * transfer bytes histogramm
*/
struct spi_statistics {
spinlock_t lock; /* lock for the whole structure */
unsigned long long bytes_rx;
unsigned long long bytes_tx;
+#define SPI_STATISTICS_HISTO_SIZE 17
+ unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
};
void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
* @len: data buffer size
* Context: can sleep
*
- * This writes the buffer and returns zero or a negative error code.
+ * This function writes the buffer @buf.
* Callable only from contexts that can sleep.
+ *
+ * Return: zero on success, else a negative error code.
*/
static inline int
spi_write(struct spi_device *spi, const void *buf, size_t len)
* @len: data buffer size
* Context: can sleep
*
- * This reads the buffer and returns zero or a negative error code.
+ * This function reads the buffer @buf.
* Callable only from contexts that can sleep.
+ *
+ * Return: zero on success, else a negative error code.
*/
static inline int
spi_read(struct spi_device *spi, void *buf, size_t len)
*
* For more specific semantics see spi_sync().
*
- * It returns zero on success, else a negative error code.
+ * Return: Return: zero on success, else a negative error code.
*/
static inline int
spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
* @cmd: command to be written before data is read back
* Context: can sleep
*
- * This returns the (unsigned) eight bit number returned by the
- * device, or else a negative error code. Callable only from
- * contexts that can sleep.
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) eight bit number returned by the
+ * device, or else a negative error code.
*/
static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
{
* @cmd: command to be written before data is read back
* Context: can sleep
*
- * This returns the (unsigned) sixteen bit number returned by the
- * device, or else a negative error code. Callable only from
- * contexts that can sleep.
- *
* The number is returned in wire-order, which is at least sometimes
* big-endian.
+ *
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) sixteen bit number returned by the
+ * device, or else a negative error code.
*/
static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
{
* @cmd: command to be written before data is read back
* Context: can sleep
*
- * This returns the (unsigned) sixteen bit number returned by the device in cpu
- * endianness, or else a negative error code. Callable only from contexts that
- * can sleep.
- *
* This function is similar to spi_w8r16, with the exception that it will
* convert the read 16 bit data word from big-endian to native endianness.
*
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) sixteen bit number returned by the device in cpu
+ * endianness, or else a negative error code.
*/
static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
#include <linux/workqueue.h>
struct spi_bitbang {
- spinlock_t lock;
+ struct mutex lock;
u8 busy;
u8 use_dma;
u8 flags; /* extra spi->mode support */
return tun_dst;
}
+static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ int md_size = md_dst->u.tun_info.options_len;
+ struct metadata_dst *new_md;
+
+ if (!md_dst)
+ return ERR_PTR(-EINVAL);
+
+ new_md = metadata_dst_alloc(md_size, GFP_ATOMIC);
+ if (!new_md)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
+ sizeof(struct ip_tunnel_info) + md_size);
+ skb_dst_drop(skb);
+ dst_hold(&new_md->dst);
+ skb_dst_set(skb, &new_md->dst);
+ return new_md;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
+{
+ struct metadata_dst *dst;
+
+ dst = tun_dst_unclone(skb);
+ if (IS_ERR(dst))
+ return NULL;
+
+ return &dst->u.tun_info;
+}
+
static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
__be16 flags,
__be64 tunnel_id,
* enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
* @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack
* table. This allows future packets for the same connection to be identified
- * as 'established' or 'related'.
+ * as 'established' or 'related'. The flow key for the current packet will
+ * retain the pre-commit connection state.
* @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
* @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
* mask, the corresponding bit in the value is copied to the connection
}
#endif
+static void *try_ram_remap(resource_size_t offset, size_t size)
+{
+ struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
+
+ /* In the simple case just return the existing linear address */
+ if (!PageHighMem(page))
+ return __va(offset);
+ return NULL; /* fallback to ioremap_cache */
+}
+
/**
* memremap() - remap an iomem_resource as cacheable memory
* @offset: iomem resource start address
* the requested range is potentially in "System RAM"
*/
if (is_ram == REGION_INTERSECTS)
- addr = __va(offset);
- else
+ addr = try_ram_remap(offset, size);
+ if (!addr)
addr = ioremap_cache(offset, size);
}
if (core_kernel_text(a))
return;
- /* module_text_address is safe here: we're supposed to have reference
- * to module from symbol_get, so it can't go away. */
+ /*
+ * Even though we hold a reference on the module; we still need to
+ * disable preemption in order to safely traverse the data structure.
+ */
+ preempt_disable();
modaddr = __module_text_address(a);
BUG_ON(!modaddr);
module_put(modaddr);
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(symbol_put_addr);
#include <linux/rtnetlink.h>
#include <linux/stat.h>
#include <net/dst.h>
+#include <net/dst_metadata.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
#include <net/xfrm.h>
}
EXPORT_SYMBOL(dev_get_iflink);
+/**
+ * dev_fill_metadata_dst - Retrieve tunnel egress information.
+ * @dev: targeted interface
+ * @skb: The packet.
+ *
+ * For better visibility of tunnel traffic OVS needs to retrieve
+ * egress tunnel information for a packet. Following API allows
+ * user to get this info.
+ */
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ip_tunnel_info *info;
+
+ if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
+ return -EINVAL;
+
+ info = skb_tunnel_info_unclone(skb);
+ if (!info)
+ return -ENOMEM;
+ if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
+ return -EINVAL;
+
+ return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
+}
+EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
+
/**
* __dev_get_by_name - find a device by its name
* @net: the applicable net namespace
do {
/* record parent and next child index */
pn = n;
- cindex = key ? get_index(key, pn) : 0;
+ cindex = (key > pn->key) ? get_index(key, pn) : 0;
if (cindex >> pn->bits)
break;
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
- SKB_GSO_IPIP)))
+ SKB_GSO_IPIP |
+ SKB_GSO_SIT)))
goto out;
if (!skb->encapsulation)
csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
}
+static struct rtable *gre_get_rt(struct sk_buff *skb,
+ struct net_device *dev,
+ struct flowi4 *fl,
+ const struct ip_tunnel_key *key)
+{
+ struct net *net = dev_net(dev);
+
+ memset(fl, 0, sizeof(*fl));
+ fl->daddr = key->u.ipv4.dst;
+ fl->saddr = key->u.ipv4.src;
+ fl->flowi4_tos = RT_TOS(key->tos);
+ fl->flowi4_mark = skb->mark;
+ fl->flowi4_proto = IPPROTO_GRE;
+
+ return ip_route_output_key(net, fl);
+}
+
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel_info *tun_info;
- struct net *net = dev_net(dev);
const struct ip_tunnel_key *key;
struct flowi4 fl;
struct rtable *rt;
goto err_free_skb;
key = &tun_info->key;
- memset(&fl, 0, sizeof(fl));
- fl.daddr = key->u.ipv4.dst;
- fl.saddr = key->u.ipv4.src;
- fl.flowi4_tos = RT_TOS(key->tos);
- fl.flowi4_mark = skb->mark;
- fl.flowi4_proto = IPPROTO_GRE;
-
- rt = ip_route_output_key(net, &fl);
+ rt = gre_get_rt(skb, dev, &fl, key);
if (IS_ERR(rt))
goto err_free_skb;
dev->stats.tx_dropped++;
}
+static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ struct rtable *rt;
+ struct flowi4 fl4;
+
+ if (ip_tunnel_info_af(info) != AF_INET)
+ return -EINVAL;
+
+ rt = gre_get_rt(skb, dev, &fl4, &info->key);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ ip_rt_put(rt);
+ info->key.u.ipv4.src = fl4.saddr;
+ return 0;
+}
+
static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct net_device *dev)
{
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
+ .ndo_fill_metadata_dst = gre_fill_metadata_dst,
};
static void ipgre_tap_setup(struct net_device *dev)
config NF_DUP_IPV4
tristate "Netfilter IPv4 packet duplication to alternate destination"
+ depends on !NF_CONNTRACK || NF_CONNTRACK
help
This option enables the nf_dup_ipv4 core, which duplicates an IPv4
packet to be rerouted to another destination.
if (FIB_RES_DEV(res) == dev)
dev_match = true;
#endif
- if (dev_match || flags & XT_RPFILTER_LOOSE)
- return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
- return dev_match;
+ return dev_match || flags & XT_RPFILTER_LOOSE;
}
static bool rpfilter_is_local(const struct sk_buff *skb)
/* alpha = (1 - g) * alpha + g * F */
- alpha -= alpha >> dctcp_shift_g;
+ alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
if (bytes_ecn) {
/* If dctcp_shift_g == 1, a 32bit value would overflow
* after 8 Mbytes.
*/
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
skb_mstamp_get(&skb->skb_mstamp);
- NET_INC_STATS_BH(sock_net(sk), mib);
+ NET_INC_STATS(sock_net(sk), mib);
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
mtu = dst_mtu(skb_dst(skb));
if (skb->len > mtu) {
+ skb->protocol = htons(ETH_P_IP);
+
if (skb->sk)
xfrm_local_error(skb, mtu);
else
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
int flags, pol_lookup_t lookup)
{
+ struct rt6_info *rt;
struct fib_lookup_arg arg = {
.lookup_ptr = lookup,
.flags = FIB_LOOKUP_NOREF,
fib_rules_lookup(net->ipv6.fib6_rules_ops,
flowi6_to_flowi(fl6), flags, &arg);
- if (arg.result)
- return arg.result;
+ rt = arg.result;
- dst_hold(&net->ipv6.ip6_null_entry->dst);
- return &net->ipv6.ip6_null_entry->dst;
+ if (!rt) {
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return &net->ipv6.ip6_null_entry->dst;
+ }
+
+ if (rt->rt6i_flags & RTF_REJECT &&
+ rt->dst.error == -EAGAIN) {
+ ip6_rt_put(rt);
+ rt = net->ipv6.ip6_null_entry;
+ dst_hold(&rt->dst);
+ }
+
+ return &rt->dst;
}
static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
int flags, pol_lookup_t lookup)
{
- return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+ struct rt6_info *rt;
+
+ rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+ if (rt->rt6i_flags & RTF_REJECT &&
+ rt->dst.error == -EAGAIN) {
+ ip6_rt_put(rt);
+ rt = net->ipv6.ip6_null_entry;
+ dst_hold(&rt->dst);
+ }
+
+ return &rt->dst;
}
static void __net_init fib6_tables_init(struct net *net)
if (np->frag_size)
mtu = np->frag_size;
}
+ if (mtu < hlen + sizeof(struct frag_hdr) + 8)
+ goto fail_toobig;
mtu -= hlen + sizeof(struct frag_hdr);
frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
#ifdef CONFIG_IPV6_SUBTREES
ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
#endif
- (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
+ (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
+ (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
dst_release(dst);
dst = NULL;
}
config NF_DUP_IPV6
tristate "Netfilter IPv6 packet duplication to alternate destination"
+ depends on !NF_CONNTRACK || NF_CONNTRACK
help
This option enables the nf_dup_ipv6 core, which duplicates an IPv6
packet to be rerouted to another destination.
s = s2;
}
}
+EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig);
static int nf_ct_net_init(struct net *net)
{
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
saved_fn = fn;
+ if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
+ oif = 0;
+
redo_rt6_select:
rt = rt6_select(fn, oif, strict);
if (rt->rt6i_nsiblings)
struct flowi6 *fl6)
{
int flags = 0;
+ bool any_src;
fl6->flowi6_iif = LOOPBACK_IFINDEX;
+ any_src = ipv6_addr_any(&fl6->saddr);
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
- fl6->flowi6_oif)
+ (fl6->flowi6_oif && any_src))
flags |= RT6_LOOKUP_F_IFACE;
- if (!ipv6_addr_any(&fl6->saddr))
+ if (!any_src)
flags |= RT6_LOOKUP_F_HAS_SADDR;
else if (sk)
flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
if (!skb->ignore_df && skb->len > mtu) {
skb->dev = dst->dev;
+ skb->protocol = htons(ETH_P_IPV6);
if (xfrm6_local_dontfrag(skb))
xfrm6_local_rxpmtu(skb, mtu);
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
int mtu;
+ bool toobig;
#ifdef CONFIG_NETFILTER
if (!x) {
}
#endif
+ if (x->props.mode != XFRM_MODE_TUNNEL)
+ goto skip_frag;
+
if (skb->protocol == htons(ETH_P_IPV6))
mtu = ip6_skb_dst_mtu(skb);
else
mtu = dst_mtu(skb_dst(skb));
- if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
+ toobig = skb->len > mtu && !skb_is_gso(skb);
+
+ if (toobig && xfrm6_local_dontfrag(skb)) {
xfrm6_local_rxpmtu(skb, mtu);
return -EMSGSIZE;
- } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
+ } else if (!skb->ignore_df && toobig && skb->sk) {
xfrm_local_error(skb, mtu);
return -EMSGSIZE;
}
- if (x->props.mode == XFRM_MODE_TUNNEL &&
- ((skb->len > mtu && !skb_is_gso(skb)) ||
- dst_allfrag(skb_dst(skb)))) {
+ if (toobig || dst_allfrag(skb_dst(skb)))
return ip6_fragment(sk, skb,
x->outer_mode->afinfo->output_finish);
- }
+
+skip_frag:
return x->outer_mode->afinfo->output_finish(sk, skb);
}
return;
case IPPROTO_ICMPV6:
- if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
+ if (!onlyproto && (nh + offset + 2 < skb->data ||
+ pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
u8 *icmp;
nh = skb_network_header(skb);
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPPROTO_MH:
offset += ipv6_optlen(exthdr);
- if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
+ if (!onlyproto && (nh + offset + 3 < skb->data ||
+ pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
struct ip6_mh *mh;
nh = skb_network_header(skb);
for (element = hashbin_get_first(iter->hashbin);
element != NULL;
element = hashbin_get_next(iter->hashbin)) {
- if (!off || *off-- == 0) {
+ if (!off || (*off)-- == 0) {
/* NB: hashbin left locked */
return element;
}
err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
- /* Error is cleare after succecful sending to at least one
+ /* Error is cleared after successful sending to at least one
* registered KM */
if ((broadcast_flags & BROADCAST_REGISTERED) && err)
err = err2;
#endif
synchronize_net();
nf_queue_nf_hook_drop(net, &entry->ops);
+ /* other cpu might still process nfqueue verdict that used reg */
+ synchronize_net();
kfree(entry);
}
EXPORT_SYMBOL(nf_unregister_net_hook);
ip_set_timeout_expired(ext_timeout(n, set))))
n = NULL;
- e = kzalloc(set->dsize, GFP_KERNEL);
+ e = kzalloc(set->dsize, GFP_ATOMIC);
if (!e)
return -ENOMEM;
e->id = d->id;
int pos, idx, shift;
err = 0;
- netlink_table_grab();
+ netlink_lock_table();
for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
if (len - pos < sizeof(u32))
break;
}
if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
err = -EFAULT;
- netlink_table_ungrab();
+ netlink_unlock_table();
break;
}
case NETLINK_CAP_ACK:
struct sw_flow_key *key, const struct nlattr *attr,
const struct nlattr *actions, int actions_len)
{
- struct ip_tunnel_info info;
struct dp_upcall_info upcall;
const struct nlattr *a;
int rem;
if (vport) {
int err;
- upcall.egress_tun_info = &info;
- err = ovs_vport_get_egress_tun_info(vport, skb,
- &upcall);
- if (err)
- upcall.egress_tun_info = NULL;
+ err = dev_fill_metadata_dst(vport->dev, skb);
+ if (!err)
+ upcall.egress_tun_info = skb_tunnel_info(skb);
}
break;
nla_data(a));
/* Hide stolen IP fragments from user space. */
- if (err == -EINPROGRESS)
- return 0;
+ if (err)
+ return err == -EINPROGRESS ? 0 : err;
break;
}
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
state = ovs_ct_get_state(ctinfo);
+ if (!nf_ct_is_confirmed(ct))
+ state |= OVS_CS_F_NEW;
if (ct->master)
state |= OVS_CS_F_RELATED;
zone = nf_ct_zone(ct);
struct nf_conn *ct;
int err;
- if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS))
- return -ENOTSUPP;
-
/* The connection could be invalid, in which case set_label is no-op.*/
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
return helper->help(skb, protoff, ct, ctinfo);
}
+/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
+ * value if 'skb' is freed.
+ */
static int handle_fragments(struct net *net, struct sw_flow_key *key,
u16 zone, struct sk_buff *skb)
{
return err;
ovs_cb.mru = IPCB(skb)->frag_max_size;
- } else if (key->eth.type == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ } else if (key->eth.type == htons(ETH_P_IPV6)) {
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
struct sk_buff *reasm;
if (!reasm)
return -EINPROGRESS;
- if (skb == reasm)
+ if (skb == reasm) {
+ kfree_skb(skb);
return -EINVAL;
+ }
+
+ /* Don't free 'skb' even though it is one of the original
+ * fragments, as we're going to morph it into the head.
+ */
+ skb_get(skb);
+ nf_ct_frag6_consume_orig(reasm);
key->ip.proto = ipv6_hdr(reasm)->nexthdr;
skb_morph(skb, reasm);
+ skb->next = reasm->next;
consume_skb(reasm);
ovs_cb.mru = IP6CB(skb)->frag_max_size;
-#else
- return -EPFNOSUPPORT;
#endif
} else {
+ kfree_skb(skb);
return -EPFNOSUPPORT;
}
return true;
}
-static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key,
+static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
const struct ovs_conntrack_info *info,
struct sk_buff *skb)
{
}
}
+ ovs_ct_update_key(skb, key, true);
+
return 0;
}
err = __ovs_ct_lookup(net, key, info, skb);
if (err)
return err;
-
- ovs_ct_update_key(skb, key, true);
}
return 0;
if (nf_conntrack_confirm(skb) != NF_ACCEPT)
return -EINVAL;
- ovs_ct_update_key(skb, key, true);
-
return 0;
}
return false;
}
+/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
+ * value if 'skb' is freed.
+ */
int ovs_ct_execute(struct net *net, struct sk_buff *skb,
struct sw_flow_key *key,
const struct ovs_conntrack_info *info)
&info->labels.mask);
err:
skb_push(skb, nh_ofs);
+ if (err)
+ kfree_skb(skb);
return err;
}
case OVS_CT_ATTR_MARK: {
struct md_mark *mark = nla_data(a);
+ if (!mark->mask) {
+ OVS_NLERR(log, "ct_mark mask cannot be 0");
+ return -EINVAL;
+ }
info->mark = *mark;
break;
}
case OVS_CT_ATTR_LABELS: {
struct md_labels *labels = nla_data(a);
+ if (!labels_nonzero(&labels->mask)) {
+ OVS_NLERR(log, "ct_labels mask cannot be 0");
+ return -EINVAL;
+ }
info->labels = *labels;
break;
}
if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
return -EMSGSIZE;
- if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
&ct_info->mark))
return -EMSGSIZE;
if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ labels_nonzero(&ct_info->labels.mask) &&
nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
&ct_info->labels))
return -EMSGSIZE;
int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
void ovs_ct_free_action(const struct nlattr *a);
-static inline bool ovs_ct_state_supported(u32 state)
-{
- return !(state & ~(OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED |
- OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR |
- OVS_CS_F_INVALID | OVS_CS_F_TRACKED));
-}
+#define CT_SUPPORTED_MASK (OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | \
+ OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR | \
+ OVS_CS_F_INVALID | OVS_CS_F_TRACKED)
#else
#include <linux/errno.h>
return false;
}
-static inline bool ovs_ct_state_supported(u32 state)
-{
- return false;
-}
-
static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla,
const struct sw_flow_key *key,
struct sw_flow_actions **acts, bool log)
struct sw_flow_key *key,
const struct ovs_conntrack_info *info)
{
+ kfree_skb(skb);
return -ENOTSUPP;
}
}
static inline void ovs_ct_free_action(const struct nlattr *a) { }
+
+#define CT_SUPPORTED_MASK 0
#endif /* CONFIG_NF_CONNTRACK */
#endif /* ovs_conntrack.h */
if (upcall_info->egress_tun_info) {
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
- err = ovs_nla_put_egress_tunnel_key(user_skb,
- upcall_info->egress_tun_info,
- upcall_info->egress_tun_opts);
+ err = ovs_nla_put_tunnel_info(user_skb,
+ upcall_info->egress_tun_info);
BUG_ON(err);
nla_nest_end(user_skb, nla);
}
*/
struct dp_upcall_info {
struct ip_tunnel_info *egress_tun_info;
- const void *egress_tun_opts;
const struct nlattr *userdata;
const struct nlattr *actions;
int actions_len;
if ((output->tun_flags & TUNNEL_OAM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
- if (tun_opts) {
+ if (swkey_tun_opts_len) {
if (output->tun_flags & TUNNEL_GENEVE_OPT &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
swkey_tun_opts_len, tun_opts))
return 0;
}
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
- const struct ip_tunnel_info *egress_tun_info,
- const void *egress_tun_opts)
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info)
{
- return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
- egress_tun_opts,
- egress_tun_info->options_len);
+ return __ipv4_tun_to_nlattr(skb, &tun_info->key,
+ ip_tunnel_info_opts(tun_info),
+ tun_info->options_len);
}
static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
- if (!is_mask && !ovs_ct_state_supported(ct_state)) {
+ if (ct_state & ~CT_SUPPORTED_MASK) {
OVS_NLERR(log, "ct_state flags %08x unsupported",
ct_state);
return -EINVAL;
} else {
memset(nla_data(nla), val, nla_len(nla));
}
+
+ if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
+ *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
}
}
if (!start)
return -EMSGSIZE;
- err = ipv4_tun_to_nlattr(skb, &tun_info->key,
- tun_info->options_len ?
- ip_tunnel_info_opts(tun_info) : NULL,
- tun_info->options_len);
+ err = ovs_nla_put_tunnel_info(skb, tun_info);
if (err)
return err;
nla_nest_end(skb, start);
int ovs_nla_get_match(struct net *, struct sw_flow_match *,
const struct nlattr *key, const struct nlattr *mask,
bool log);
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
- const struct ip_tunnel_info *,
- const void *egress_tun_opts);
+
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info);
bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
return 0;
}
-static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct dp_upcall_info *upcall)
-{
- struct geneve_port *geneve_port = geneve_vport(vport);
- struct net *net = ovs_dp_get_net(vport->dp);
- __be16 dport = htons(geneve_port->port_no);
- __be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
-
- return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
- skb, IPPROTO_UDP, sport, dport);
-}
-
static struct vport *geneve_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
.get_options = geneve_get_options,
.send = ovs_netdev_send,
.owner = THIS_MODULE,
- .get_egress_tun_info = geneve_get_egress_tun_info,
};
static int __init ovs_geneve_tnl_init(void)
return ovs_netdev_link(vport, parms->name);
}
-static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct dp_upcall_info *upcall)
-{
- return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
- skb, IPPROTO_GRE, 0, 0);
-}
-
static struct vport_ops ovs_gre_vport_ops = {
.type = OVS_VPORT_TYPE_GRE,
.create = gre_create,
.send = ovs_netdev_send,
- .get_egress_tun_info = gre_get_egress_tun_info,
.destroy = ovs_netdev_tunnel_destroy,
.owner = THIS_MODULE,
};
free_netdev(dev);
}
+static struct rtnl_link_stats64 *
+internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->tx_errors = dev->stats.tx_errors;
+ stats->tx_dropped = dev->stats.tx_dropped;
+ stats->rx_dropped = dev->stats.rx_dropped;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_sw_netstats *percpu_stats;
+ struct pcpu_sw_netstats local_stats;
+ unsigned int start;
+
+ percpu_stats = per_cpu_ptr(dev->tstats, i);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
+ local_stats = *percpu_stats;
+ } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
+
+ stats->rx_bytes += local_stats.rx_bytes;
+ stats->rx_packets += local_stats.rx_packets;
+ stats->tx_bytes += local_stats.tx_bytes;
+ stats->tx_packets += local_stats.tx_packets;
+ }
+
+ return stats;
+}
+
static const struct net_device_ops internal_dev_netdev_ops = {
.ndo_open = internal_dev_open,
.ndo_stop = internal_dev_stop,
.ndo_start_xmit = internal_dev_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = internal_dev_change_mtu,
+ .ndo_get_stats64 = internal_get_stats,
};
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
err = -ENOMEM;
goto error_free_vport;
}
+ vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!vport->dev->tstats) {
+ err = -ENOMEM;
+ goto error_free_netdev;
+ }
dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
internal_dev = internal_dev_priv(vport->dev);
rtnl_lock();
err = register_netdevice(vport->dev);
if (err)
- goto error_free_netdev;
+ goto error_unlock;
dev_set_promiscuity(vport->dev, 1);
rtnl_unlock();
return vport;
-error_free_netdev:
+error_unlock:
rtnl_unlock();
+ free_percpu(vport->dev->tstats);
+error_free_netdev:
free_netdev(vport->dev);
error_free_vport:
ovs_vport_free(vport);
/* unregister_netdevice() waits for an RCU grace period. */
unregister_netdevice(vport->dev);
-
+ free_percpu(vport->dev->tstats);
rtnl_unlock();
}
return ovs_netdev_link(vport, parms->name);
}
-static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct dp_upcall_info *upcall)
-{
- struct vxlan_dev *vxlan = netdev_priv(vport->dev);
- struct net *net = ovs_dp_get_net(vport->dp);
- __be16 dst_port = vxlan_dev_dst_port(vxlan);
- __be16 src_port;
- int port_min;
- int port_max;
-
- inet_get_local_port_range(net, &port_min, &port_max);
- src_port = udp_flow_src_port(net, skb, 0, 0, true);
-
- return ovs_tunnel_get_egress_info(upcall, net,
- skb, IPPROTO_UDP,
- src_port, dst_port);
-}
-
static struct vport_ops ovs_vxlan_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_VXLAN,
.create = vxlan_create,
.destroy = ovs_netdev_tunnel_destroy,
.get_options = vxlan_get_options,
.send = ovs_netdev_send,
- .get_egress_tun_info = vxlan_get_egress_tun_info,
};
static int __init ovs_vxlan_tnl_init(void)
call_rcu(&vport->rcu, free_vport_rcu);
}
EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
-
-int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
- struct net *net,
- struct sk_buff *skb,
- u8 ipproto,
- __be16 tp_src,
- __be16 tp_dst)
-{
- struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info;
- const struct ip_tunnel_info *tun_info = skb_tunnel_info(skb);
- const struct ip_tunnel_key *tun_key;
- u32 skb_mark = skb->mark;
- struct rtable *rt;
- struct flowi4 fl;
-
- if (unlikely(!tun_info))
- return -EINVAL;
- if (ip_tunnel_info_af(tun_info) != AF_INET)
- return -EINVAL;
-
- tun_key = &tun_info->key;
-
- /* Route lookup to get srouce IP address.
- * The process may need to be changed if the corresponding process
- * in vports ops changed.
- */
- rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
- if (IS_ERR(rt))
- return PTR_ERR(rt);
-
- ip_rt_put(rt);
-
- /* Generate egress_tun_info based on tun_info,
- * saddr, tp_src and tp_dst
- */
- ip_tunnel_key_init(&egress_tun_info->key,
- fl.saddr, tun_key->u.ipv4.dst,
- tun_key->tos,
- tun_key->ttl,
- tp_src, tp_dst,
- tun_key->tun_id,
- tun_key->tun_flags);
- egress_tun_info->options_len = tun_info->options_len;
- egress_tun_info->mode = tun_info->mode;
- upcall->egress_tun_opts = ip_tunnel_info_opts(egress_tun_info);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
-
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct dp_upcall_info *upcall)
-{
- /* get_egress_tun_info() is only implemented on tunnel ports. */
- if (unlikely(!vport->ops->get_egress_tun_info))
- return -EINVAL;
-
- return vport->ops->get_egress_tun_info(vport, skb, upcall);
-}
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/u64_stats_sync.h>
-#include <net/route.h>
#include "datapath.h"
int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
-int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
- struct net *net,
- struct sk_buff *,
- u8 ipproto,
- __be16 tp_src,
- __be16 tp_dst);
-
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct dp_upcall_info *upcall);
-
/**
* struct vport_portids - array of netlink portids of a vport.
* must be protected by rcu.
* have any configuration.
* @send: Send a packet on the device.
* zero for dropped packets or negative for error.
- * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for
- * a packet.
*/
struct vport_ops {
enum ovs_vport_type type;
int (*get_options)(const struct vport *, struct sk_buff *);
void (*send)(struct vport *, struct sk_buff *);
- int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
- struct dp_upcall_info *upcall);
-
struct module *owner;
struct list_head list;
};
int ovs_vport_ops_register(struct vport_ops *ops);
void ovs_vport_ops_unregister(struct vport_ops *ops);
-static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
- const struct ip_tunnel_key *key,
- u32 mark,
- struct flowi4 *fl,
- u8 protocol)
-{
- struct rtable *rt;
-
- memset(fl, 0, sizeof(*fl));
- fl->daddr = key->u.ipv4.dst;
- fl->saddr = key->u.ipv4.src;
- fl->flowi4_tos = RT_TOS(key->tos);
- fl->flowi4_mark = mark;
- fl->flowi4_proto = protocol;
-
- rt = ip_route_output_key(net, fl);
- return rt;
-}
-
static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
{
vport->ops->send(vport, skb);
}
to_copy = min(tc->t_tinc_data_rem, left);
- pskb_pull(clone, offset);
- pskb_trim(clone, to_copy);
+ if (!pskb_pull(clone, offset) ||
+ pskb_trim(clone, to_copy)) {
+ pr_warn("rds_tcp_data_recv: pull/trim failed "
+ "left %zu data_rem %zu skb_len %d\n",
+ left, tc->t_tinc_data_rem, skb->len);
+ kfree_skb(clone);
+ desc->error = -ENOMEM;
+ goto out;
+ }
skb_queue_tail(&tinc->ti_skb_list, clone);
rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
goto out;
ret = register_pernet_subsys(&sysctl_pernet_ops);
if (ret)
- goto out;
+ goto out1;
register_sysctl_root(&net_sysctl_root);
out:
return ret;
+out1:
+ unregister_sysctl_table(net_header);
+ net_header = NULL;
+ goto out;
}
struct ctl_table_header *register_net_sysctl(struct net *net,
#include "core.h"
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
-#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
+#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
+#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
const char tipc_bclink_name[] = "broadcast-link";
if (!bcl)
return -ENOPROTOOPT;
- if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+ if (limit < BCLINK_WIN_MIN)
+ limit = BCLINK_WIN_MIN;
+ if (limit > TIPC_MAX_LINK_WIN)
return -EINVAL;
-
tipc_bclink_lock(net);
tipc_link_set_queue_limits(bcl, limit);
tipc_bclink_unlock(net);
{
struct sk_buff *head = *headbuf;
struct sk_buff *frag = *buf;
- struct sk_buff *tail;
+ struct sk_buff *tail = NULL;
struct tipc_msg *msg;
u32 fragid;
int delta;
if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
goto err;
head = *headbuf = frag;
- skb_frag_list_init(head);
- TIPC_SKB_CB(head)->tail = NULL;
*buf = NULL;
+ TIPC_SKB_CB(head)->tail = NULL;
+ if (skb_is_nonlinear(head)) {
+ skb_walk_frags(head, tail) {
+ TIPC_SKB_CB(head)->tail = tail;
+ }
+ } else {
+ skb_frag_list_init(head);
+ }
return 0;
}
/* IANA assigned UDP port */
#define UDP_PORT_DEFAULT 6118
+#define UDP_MIN_HEADROOM 28
+
static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
[TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
[TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY,
struct sk_buff *clone;
struct rtable *rt;
+ if (skb_headroom(skb) < UDP_MIN_HEADROOM)
+ pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+
clone = skb_clone(skb, GFP_ATOMIC);
skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
ub = rcu_dereference_rtnl(b->media_ptr);
err = misc_register(&vsock_device);
if (err) {
pr_err("Failed to register misc device\n");
- return -ENOENT;
+ goto err_reset_transport;
}
err = proto_register(&vsock_proto, 1); /* we want our slab */
if (err) {
pr_err("Cannot register vsock protocol\n");
- goto err_misc_deregister;
+ goto err_deregister_misc;
}
err = sock_register(&vsock_family_ops);
err_unregister_proto:
proto_unregister(&vsock_proto);
-err_misc_deregister:
+err_deregister_misc:
misc_deregister(&vsock_device);
+err_reset_transport:
transport = NULL;
err_busy:
mutex_unlock(&vsock_register_mutex);
static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
-static void vmci_transport_peer_attach_cb(u32 sub_id,
- const struct vmci_event_data *ed,
- void *client_data);
static void vmci_transport_peer_detach_cb(u32 sub_id,
const struct vmci_event_data *ed,
void *client_data);
static void vmci_transport_recv_pkt_work(struct work_struct *work);
+static void vmci_transport_cleanup(struct work_struct *work);
static int vmci_transport_recv_listen(struct sock *sk,
struct vmci_transport_packet *pkt);
static int vmci_transport_recv_connecting_server(
struct vmci_transport_packet pkt;
};
+static LIST_HEAD(vmci_transport_cleanup_list);
+static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
+static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
+
static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
VMCI_INVALID_ID };
static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
return err;
}
-static void vmci_transport_peer_attach_cb(u32 sub_id,
- const struct vmci_event_data *e_data,
- void *client_data)
-{
- struct sock *sk = client_data;
- const struct vmci_event_payload_qp *e_payload;
- struct vsock_sock *vsk;
-
- e_payload = vmci_event_data_const_payload(e_data);
-
- vsk = vsock_sk(sk);
-
- /* We don't ask for delayed CBs when we subscribe to this event (we
- * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
- * guarantees in that case about what context we might be running in,
- * so it could be BH or process, blockable or non-blockable. So we
- * need to account for all possible contexts here.
- */
- local_bh_disable();
- bh_lock_sock(sk);
-
- /* XXX This is lame, we should provide a way to lookup sockets by
- * qp_handle.
- */
- if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
- e_payload->handle)) {
- /* XXX This doesn't do anything, but in the future we may want
- * to set a flag here to verify the attach really did occur and
- * we weren't just sent a datagram claiming it was.
- */
- goto out;
- }
-
-out:
- bh_unlock_sock(sk);
- local_bh_enable();
-}
-
static void vmci_transport_handle_detach(struct sock *sk)
{
struct vsock_sock *vsk;
const struct vmci_event_data *e_data,
void *client_data)
{
- struct sock *sk = client_data;
+ struct vmci_transport *trans = client_data;
const struct vmci_event_payload_qp *e_payload;
- struct vsock_sock *vsk;
e_payload = vmci_event_data_const_payload(e_data);
- vsk = vsock_sk(sk);
- if (vmci_handle_is_invalid(e_payload->handle))
- return;
-
- /* Same rules for locking as for peer_attach_cb(). */
- local_bh_disable();
- bh_lock_sock(sk);
/* XXX This is lame, we should provide a way to lookup sockets by
* qp_handle.
*/
- if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
- e_payload->handle))
- vmci_transport_handle_detach(sk);
+ if (vmci_handle_is_invalid(e_payload->handle) ||
+ vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+ return;
- bh_unlock_sock(sk);
- local_bh_enable();
+ /* We don't ask for delayed CBs when we subscribe to this event (we
+ * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
+ * guarantees in that case about what context we might be running in,
+ * so it could be BH or process, blockable or non-blockable. So we
+ * need to account for all possible contexts here.
+ */
+ spin_lock_bh(&trans->lock);
+ if (!trans->sk)
+ goto out;
+
+ /* Apart from here, trans->lock is only grabbed as part of sk destruct,
+ * where trans->sk isn't locked.
+ */
+ bh_lock_sock(trans->sk);
+
+ vmci_transport_handle_detach(trans->sk);
+
+ bh_unlock_sock(trans->sk);
+ out:
+ spin_unlock_bh(&trans->lock);
}
static void vmci_transport_qp_resumed_cb(u32 sub_id,
*/
err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
vmci_transport_peer_detach_cb,
- pending, &detach_sub_id);
+ vmci_trans(vpending), &detach_sub_id);
if (err < VMCI_SUCCESS) {
vmci_transport_send_reset(pending, pkt);
err = vmci_transport_error_to_vsock_error(err);
|| vmci_trans(vsk)->qpair
|| vmci_trans(vsk)->produce_size != 0
|| vmci_trans(vsk)->consume_size != 0
- || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID
|| vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
skerr = EPROTO;
err = -EINVAL;
struct vsock_sock *vsk;
struct vmci_handle handle;
struct vmci_qp *qpair;
- u32 attach_sub_id;
u32 detach_sub_id;
bool is_local;
u32 flags;
vsk = vsock_sk(sk);
handle = VMCI_INVALID_HANDLE;
- attach_sub_id = VMCI_INVALID_ID;
detach_sub_id = VMCI_INVALID_ID;
/* If we have gotten here then we should be past the point where old
goto destroy;
}
- /* Subscribe to attach and detach events first.
+ /* Subscribe to detach events first.
*
* XXX We attach once for each queue pair created for now so it is easy
* to find the socket (it's provided), but later we should only
* subscribe once and add a way to lookup sockets by queue pair handle.
*/
- err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
- vmci_transport_peer_attach_cb,
- sk, &attach_sub_id);
- if (err < VMCI_SUCCESS) {
- err = vmci_transport_error_to_vsock_error(err);
- goto destroy;
- }
-
err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
vmci_transport_peer_detach_cb,
- sk, &detach_sub_id);
+ vmci_trans(vsk), &detach_sub_id);
if (err < VMCI_SUCCESS) {
err = vmci_transport_error_to_vsock_error(err);
goto destroy;
vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
pkt->u.size;
- vmci_trans(vsk)->attach_sub_id = attach_sub_id;
vmci_trans(vsk)->detach_sub_id = detach_sub_id;
vmci_trans(vsk)->notify_ops->process_negotiate(sk);
return 0;
destroy:
- if (attach_sub_id != VMCI_INVALID_ID)
- vmci_event_unsubscribe(attach_sub_id);
-
if (detach_sub_id != VMCI_INVALID_ID)
vmci_event_unsubscribe(detach_sub_id);
vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
vmci_trans(vsk)->qpair = NULL;
vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
- vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id =
- VMCI_INVALID_ID;
+ vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
vmci_trans(vsk)->notify_ops = NULL;
+ INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
+ vmci_trans(vsk)->sk = &vsk->sk;
+ spin_lock_init(&vmci_trans(vsk)->lock);
if (psk) {
vmci_trans(vsk)->queue_pair_size =
vmci_trans(psk)->queue_pair_size;
return 0;
}
-static void vmci_transport_destruct(struct vsock_sock *vsk)
+static void vmci_transport_free_resources(struct list_head *transport_list)
{
- if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) {
- vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id);
- vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID;
- }
+ while (!list_empty(transport_list)) {
+ struct vmci_transport *transport =
+ list_first_entry(transport_list, struct vmci_transport,
+ elem);
+ list_del(&transport->elem);
- if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
- vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id);
- vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
- }
+ if (transport->detach_sub_id != VMCI_INVALID_ID) {
+ vmci_event_unsubscribe(transport->detach_sub_id);
+ transport->detach_sub_id = VMCI_INVALID_ID;
+ }
- if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
- vmci_qpair_detach(&vmci_trans(vsk)->qpair);
- vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
- vmci_trans(vsk)->produce_size = 0;
- vmci_trans(vsk)->consume_size = 0;
+ if (!vmci_handle_is_invalid(transport->qp_handle)) {
+ vmci_qpair_detach(&transport->qpair);
+ transport->qp_handle = VMCI_INVALID_HANDLE;
+ transport->produce_size = 0;
+ transport->consume_size = 0;
+ }
+
+ kfree(transport);
}
+}
+
+static void vmci_transport_cleanup(struct work_struct *work)
+{
+ LIST_HEAD(pending);
+
+ spin_lock_bh(&vmci_transport_cleanup_lock);
+ list_replace_init(&vmci_transport_cleanup_list, &pending);
+ spin_unlock_bh(&vmci_transport_cleanup_lock);
+ vmci_transport_free_resources(&pending);
+}
+
+static void vmci_transport_destruct(struct vsock_sock *vsk)
+{
+ /* Ensure that the detach callback doesn't use the sk/vsk
+ * we are about to destruct.
+ */
+ spin_lock_bh(&vmci_trans(vsk)->lock);
+ vmci_trans(vsk)->sk = NULL;
+ spin_unlock_bh(&vmci_trans(vsk)->lock);
if (vmci_trans(vsk)->notify_ops)
vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
- kfree(vsk->trans);
+ spin_lock_bh(&vmci_transport_cleanup_lock);
+ list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
+ spin_unlock_bh(&vmci_transport_cleanup_lock);
+ schedule_work(&vmci_transport_cleanup_work);
+
vsk->trans = NULL;
}
static void __exit vmci_transport_exit(void)
{
+ cancel_work_sync(&vmci_transport_cleanup_work);
+ vmci_transport_free_resources(&vmci_transport_cleanup_list);
+
if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
if (vmci_datagram_destroy_handle(
vmci_transport_stream_handle) != VMCI_SUCCESS)
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
+MODULE_VERSION("1.0.2.0-k");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("vmware_vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
u64 queue_pair_size;
u64 queue_pair_min_size;
u64 queue_pair_max_size;
- u32 attach_sub_id;
u32 detach_sub_id;
union vmci_transport_notify notify;
struct vmci_transport_notify_ops *notify_ops;
+ struct list_head elem;
+ struct sock *sk;
+ spinlock_t lock; /* protects sk. */
};
int vmci_transport_register(void);
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
+ struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
+ struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
- if (!lt && !rp && !re)
+ if (!lt && !rp && !re && !et && !rt)
return err;
/* pedantic mode - thou shalt sayeth replaceth */
#define PT_REGS_RC(x) ((x)->gprs[2])
#define PT_REGS_SP(x) ((x)->gprs[15])
+#elif defined(__aarch64__)
+
+#define PT_REGS_PARM1(x) ((x)->regs[0])
+#define PT_REGS_PARM2(x) ((x)->regs[1])
+#define PT_REGS_PARM3(x) ((x)->regs[2])
+#define PT_REGS_PARM4(x) ((x)->regs[3])
+#define PT_REGS_PARM5(x) ((x)->regs[4])
+#define PT_REGS_RET(x) ((x)->regs[30])
+#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->regs[0])
+#define PT_REGS_SP(x) ((x)->sp)
+
#endif
#endif
}
clearhandler(SIGSEGV);
+ /* Make sure nothing explodes if we fork. */
+ if (fork() > 0)
+ return 0;
+
return (nerrs == 0 ? 0 : 1);
}