config HISAX_NETJET
bool "NETjet card"
- depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
+ depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN) || MICROBLAZE))
depends on VIRT_TO_BUS
help
This enables HiSax support for the NetJet from Traverse
config HISAX_NETJET_U
bool "NETspider U card"
- depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
+ depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN) || MICROBLAZE))
depends on VIRT_TO_BUS
help
This enables HiSax support for the Netspider U interface ISDN card
struct can_frame *cf = (struct can_frame *)skb->data;
u8 dlc = cf->can_dlc;
+ if (!(skb->tstamp.tv64))
+ __net_timestamp(skb);
+
netif_rx(priv->echo_skb[idx]);
priv->echo_skb[idx] = NULL;
if (unlikely(!skb))
return NULL;
+ __net_timestamp(skb);
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (unlikely(!skb))
return NULL;
+ __net_timestamp(skb);
skb->protocol = htons(ETH_P_CANFD);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!skb)
return;
+ __net_timestamp(skb);
skb->dev = sl->dev;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (!(skb->tstamp.tv64))
+ __net_timestamp(skb);
+
netif_rx_ni(skb);
}
{
struct sk_buff *new_skb;
+ if (skb_linearize(skb))
+ return NULL;
+
/* Alloc new skb */
new_skb = netdev_alloc_skb(dev, skb->len + 4);
if (!new_skb)
cbd_t __iomem *bdp;
int curidx;
u16 sc;
- int nr_frags = skb_shinfo(skb)->nr_frags;
+ int nr_frags;
skb_frag_t *frag;
int len;
-
#ifdef CONFIG_FS_ENET_MPC5121_FEC
- if (((unsigned long)skb->data) & 0x3) {
+ int is_aligned = 1;
+ int i;
+
+ if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
+ is_aligned = 0;
+ } else {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag = skb_shinfo(skb)->frags;
+ for (i = 0; i < nr_frags; i++, frag++) {
+ if (!IS_ALIGNED(frag->page_offset, 4)) {
+ is_aligned = 0;
+ break;
+ }
+ }
+ }
+
+ if (!is_aligned) {
skb = tx_skb_align_workaround(dev, skb);
if (!skb) {
/*
}
}
#endif
+
spin_lock(&fep->tx_lock);
/*
*/
bdp = fep->cur_tx;
+ nr_frags = skb_shinfo(skb)->nr_frags;
if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
netif_stop_queue(dev);
spin_unlock(&fep->tx_lock);
val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
+ } else {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN);
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
mvneta_set_ucast_table(pp, -1);
}
}
+ if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
+ (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
+ (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
+ mlx4_warn(dev,
+ "Granular QoS per VF not supported with IB/Eth configuration\n");
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
+ }
+
dev->caps.max_counters = dev_cap->max_counters;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
return 0;
err_pci_register_driver:
- unregister_netdevice_notifier(&rocker_netevent_nb);
+ unregister_netevent_notifier(&rocker_netevent_nb);
unregister_netdevice_notifier(&rocker_netdevice_nb);
return err;
}
u32 buffer2_size:13;
u32 reserved4:3;
} etx; /* -- enhanced -- */
+
+ u64 all_flags;
} des01;
unsigned int des2;
unsigned int des3;
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
+ p->des01.all_flags = 0;
p->des01.erx.own = 1;
p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- p->des01.etx.own = 0;
+ p->des01.all_flags = 0;
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_tx_set_on_chain(p, end);
else
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
int end)
{
+ p->des01.all_flags = 0;
p->des01.rx.own = 1;
p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- p->des01.tx.own = 0;
+ p->des01.all_flags = 0;
if (mode == STMMAC_CHAIN_MODE)
ndesc_tx_set_on_chain(p, end);
else
goto err_tx_skbuff;
if (priv->extend_desc) {
- priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
+ priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
if (!priv->dma_erx)
goto err_dma;
- priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
+ priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
if (!priv->dma_etx) {
dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
goto err_dma;
}
} else {
- priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
- sizeof(struct dma_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
+ priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
+ sizeof(struct dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
if (!priv->dma_rx)
goto err_dma;
- priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
- sizeof(struct dma_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
+ priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
+ sizeof(struct dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
if (!priv->dma_tx) {
dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
+ sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
goto err_dma;
}
}
char *node;
unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
+ if (vif->credit_watch.node)
+ return -EADDRINUSE;
+
node = kmalloc(maxlen, GFP_KERNEL);
if (!node)
return -ENOMEM;
}
xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+ xen_unregister_watchers(be->vif);
xen_register_watchers(dev, be->vif);
read_xenbus_vif_flags(be);
struct list_head addr_waitq;
struct timer_list addr_wq_timer;
struct list_head auto_asconf_splist;
+ /* Lock that protects both addr_waitq and auto_asconf_splist */
spinlock_t addr_wq_lock;
/* Lock that protects the local_addr_list writers */
atomic_t pd_mode;
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
+
+ /* These must be the last fields, as they will skipped on copies,
+ * like on accept and peeloff operations
+ */
struct list_head auto_asconf_list;
int do_auto_asconf;
};
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
- spin_lock_bh(&br->lock);
br_stp_set_bridge_priority(br, args[1]);
- spin_unlock_bh(&br->lock);
return 0;
case BRCTL_SET_PORT_PRIORITY:
static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_own_query *query);
+static void br_multicast_add_router(struct net_bridge *br,
+ struct net_bridge_port *port);
unsigned int br_mdb_rehash_seq;
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_enable(&port->ip6_own_query);
#endif
+ if (port->multicast_router == 2 && hlist_unhashed(&port->rlist))
+ br_multicast_add_router(br, port);
out:
spin_unlock(&br->multicast_lock);
return true;
}
-/* called under bridge lock */
+/* Acquires and releases bridge lock */
void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
{
struct net_bridge_port *p;
int wasroot;
+ spin_lock_bh(&br->lock);
wasroot = br_is_root_bridge(br);
list_for_each_entry(p, &br->port_list, list) {
br_port_state_selection(br);
if (br_is_root_bridge(br) && !wasroot)
br_become_root_bridge(br);
+ spin_unlock_bh(&br->lock);
}
/* called under bridge lock */
return err;
}
- if (newskb)
+ if (newskb) {
+ if (!(newskb->tstamp.tv64))
+ __net_timestamp(newskb);
+
netif_rx_ni(newskb);
+ }
/* update statistics */
can_stats.tx_frames++;
rc = 0;
if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
goto out_unlock_bh;
+ if (neigh->dead)
+ goto out_dead;
if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
write_unlock(&neigh->lock);
local_bh_enable();
return rc;
+
+out_dead:
+ if (neigh->nud_state & NUD_STALE)
+ goto out_unlock_bh;
+ write_unlock_bh(&neigh->lock);
+ kfree_skb(skb);
+ return 1;
}
EXPORT_SYMBOL(__neigh_event_send);
if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
(old & (NUD_NOARP | NUD_PERMANENT)))
goto out;
+ if (neigh->dead)
+ goto out;
if (!(new & NUD_VALID)) {
neigh_del_timer(neigh);
*/
void __neigh_set_probe_once(struct neighbour *neigh)
{
+ if (neigh->dead)
+ return;
neigh->updated = jiffies;
if (!(neigh->nud_state & NUD_FAILED))
return;
err = 0;
if (err)
goto out;
+
+ tcp_fastopen_init_key_once(true);
}
err = inet_csk_listen_start(sk, backlog);
if (err)
kfree_skb(skb);
}
+/* For some errors we have valid addr_offset even with zero payload and
+ * zero port. Also, addr_offset should be supported if port is set.
+ */
+static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
+{
+ return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+ serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
+}
+
/* IPv4 supports cmsg on all imcp errors and some timestamps
*
* Timestamp code paths do not initialize the fields expected by cmsg:
serr = SKB_EXT_ERR(skb);
- if (sin && serr->port) {
+ if (sin && ipv4_datagram_support_addr(serr)) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
serr->addr_offset);
case TCP_FASTOPEN:
if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
- TCPF_LISTEN)))
+ TCPF_LISTEN))) {
+ tcp_fastopen_init_key_once(true);
+
err = fastopen_init_queue(sk, val);
- else
+ } else {
err = -EINVAL;
+ }
break;
case TCP_TIMESTAMP:
if (!tp->repair)
struct tcp_fastopen_context *ctx;
bool ok = false;
- tcp_fastopen_init_key_once(true);
-
rcu_read_lock();
ctx = rcu_dereference(tcp_fastopen_ctx);
if (ctx) {
kfree_skb(skb);
}
+/* For some errors we have valid addr_offset even with zero payload and
+ * zero port. Also, addr_offset should be supported if port is set.
+ */
+static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
+{
+ return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
+ serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+ serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
+}
+
/* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
*
* At one point, excluding local errors was a quick test to identify icmp/icmp6
serr = SKB_EXT_ERR(skb);
- if (sin && serr->port) {
+ if (sin && ipv6_datagram_support_addr(serr)) {
const unsigned char *nh = skb_network_header(skb);
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
if (sdata->vif.type != NL80211_IFTYPE_AP)
return;
- mutex_lock(&sdata->local->mtx);
+ /* crypto_tx_tailroom_needed_cnt is protected by this */
+ assert_key_lock(sdata->local);
+
+ rcu_read_lock();
- list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list)
vlan->crypto_tx_tailroom_needed_cnt += delta;
- mutex_unlock(&sdata->local->mtx);
+ rcu_read_unlock();
}
static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
* http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
*/
+ assert_key_lock(sdata->local);
+
update_vlan_tailroom_need_count(sdata, 1);
if (!sdata->crypto_tx_tailroom_needed_cnt++) {
static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
int delta)
{
+ assert_key_lock(sdata->local);
+
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
update_vlan_tailroom_need_count(sdata, -delta);
sk_refcnt_debug_dec(sk);
}
-static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
-{
- int x = atomic_read(&f->rr_cur) + 1;
-
- if (x >= num)
- x = 0;
-
- return x;
-}
-
static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
{
u32 rxhash;
struct sk_buff *skb,
unsigned int num)
{
- int cur, old;
+ unsigned int val = atomic_inc_return(&f->rr_cur);
- cur = atomic_read(&f->rr_cur);
- while ((old = atomic_cmpxchg(&f->rr_cur, cur,
- fanout_rr_next(f, num))) != cur)
- cur = old;
- return cur;
+ return val % num;
}
static unsigned int fanout_demux_cpu(struct packet_fanout *f,
struct packet_type *pt, struct net_device *orig_dev)
{
struct packet_fanout *f = pt->af_packet_priv;
- unsigned int num = f->num_members;
+ unsigned int num = READ_ONCE(f->num_members);
struct packet_sock *po;
unsigned int idx;
if (rose->device == dev) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
- rose->neighbour->use--;
+ if (rose->neighbour)
+ rose->neighbour->use--;
rose->device = NULL;
}
}
/* Supposedly, no process has access to the socket, but
* the net layers still may.
+ * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
+ * held and that should be grabbed before socket lock.
*/
- local_bh_disable();
+ spin_lock_bh(&net->sctp.addr_wq_lock);
bh_lock_sock(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
sk_common_release(sk);
bh_unlock_sock(sk);
- local_bh_enable();
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
sock_put(sk);
if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
return 0;
+ spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
if (val == 0 && sp->do_auto_asconf) {
list_del(&sp->auto_asconf_list);
sp->do_auto_asconf = 0;
&sock_net(sk)->sctp.auto_asconf_splist);
sp->do_auto_asconf = 1;
}
+ spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
return 0;
}
local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(net, sk->sk_prot, 1);
+
+ /* Nothing can fail after this block, otherwise
+ * sctp_destroy_sock() will be called without addr_wq_lock held
+ */
if (net->sctp.default_auto_asconf) {
+ spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
list_add_tail(&sp->auto_asconf_list,
&net->sctp.auto_asconf_splist);
sp->do_auto_asconf = 1;
- } else
+ spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
+ } else {
sp->do_auto_asconf = 0;
+ }
+
local_bh_enable();
return 0;
}
-/* Cleanup any SCTP per socket resources. */
+/* Cleanup any SCTP per socket resources. Must be called with
+ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
+ */
static void sctp_destroy_sock(struct sock *sk)
{
struct sctp_sock *sp;
newinet->mc_list = NULL;
}
+static inline void sctp_copy_descendant(struct sock *sk_to,
+ const struct sock *sk_from)
+{
+ int ancestor_size = sizeof(struct inet_sock) +
+ sizeof(struct sctp_sock) -
+ offsetof(struct sctp_sock, auto_asconf_list);
+
+ if (sk_from->sk_family == PF_INET6)
+ ancestor_size += sizeof(struct ipv6_pinfo);
+
+ __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
+}
+
/* Populate the fields of the newsk from the oldsk and migrate the assoc
* and its messages to the newsk.
*/
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
struct sctp_bind_hashbucket *head;
- struct list_head tmplist;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
newsk->sk_sndbuf = oldsk->sk_sndbuf;
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
/* Brute force copy old sctp opt. */
- if (oldsp->do_auto_asconf) {
- memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
- inet_sk_copy_descendant(newsk, oldsk);
- memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
- } else
- inet_sk_copy_descendant(newsk, oldsk);
+ sctp_copy_descendant(newsk, oldsk);
/* Restore the ep value that was overwritten with the above structure
* copy.