Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / net / batman-adv / send.c
index 0103976..8d4e1f5 100644 (file)
 
 #include <linux/atomic.h>
 #include <linux/byteorder/generic.h>
+#include <linux/errno.h>
 #include <linux/etherdevice.h>
 #include <linux/fs.h>
-#include <linux/if_ether.h>
 #include <linux/if.h>
+#include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
@@ -42,6 +43,7 @@
 #include "fragmentation.h"
 #include "gateway_client.h"
 #include "hard-interface.h"
+#include "log.h"
 #include "network-coding.h"
 #include "originator.h"
 #include "routing.h"
@@ -71,6 +73,7 @@ int batadv_send_skb_packet(struct sk_buff *skb,
 {
        struct batadv_priv *bat_priv;
        struct ethhdr *ethhdr;
+       int ret;
 
        bat_priv = netdev_priv(hard_iface->soft_iface);
 
@@ -108,8 +111,15 @@ int batadv_send_skb_packet(struct sk_buff *skb,
        /* dev_queue_xmit() returns a negative result on error.  However on
         * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
         * (which is > 0). This will not be treated as an error.
+        *
+        * a negative value cannot be returned because it could be interepreted
+        * as not consumed skb by callers of batadv_send_skb_to_orig.
         */
-       return dev_queue_xmit(skb);
+       ret = dev_queue_xmit(skb);
+       if (ret < 0)
+               ret = NET_XMIT_DROP;
+
+       return ret;
 send_skb_err:
        kfree_skb(skb);
        return NET_XMIT_DROP;
@@ -155,8 +165,11 @@ int batadv_send_unicast_skb(struct sk_buff *skb,
  * host, NULL can be passed as recv_if and no interface alternating is
  * attempted.
  *
- * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
- * NET_XMIT_POLICED if the skb is buffered for later transmit.
+ * Return: -1 on failure (and the skb is not consumed), -EINPROGRESS if the
+ * skb is buffered for later transmit or the NET_XMIT status returned by the
+ * lower routine if the packet has been passed down.
+ *
+ * If the returning value is not -1 the skb has been consumed.
  */
 int batadv_send_skb_to_orig(struct sk_buff *skb,
                            struct batadv_orig_node *orig_node,
@@ -164,7 +177,7 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
 {
        struct batadv_priv *bat_priv = orig_node->bat_priv;
        struct batadv_neigh_node *neigh_node;
-       int ret = NET_XMIT_DROP;
+       int ret = -1;
 
        /* batadv_find_router() increases neigh_nodes refcount if found. */
        neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
@@ -177,8 +190,7 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
        if (atomic_read(&bat_priv->fragmentation) &&
            skb->len > neigh_node->if_incoming->net_dev->mtu) {
                /* Fragment and send packet. */
-               if (batadv_frag_send_packet(skb, orig_node, neigh_node))
-                       ret = NET_XMIT_SUCCESS;
+               ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
 
                goto out;
        }
@@ -187,12 +199,10 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
         * (i.e. being forwarded). If the packet originates from this node or if
         * network coding fails, then send the packet as usual.
         */
-       if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
-               ret = NET_XMIT_POLICED;
-       } else {
-               batadv_send_unicast_skb(skb, neigh_node);
-               ret = NET_XMIT_SUCCESS;
-       }
+       if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
+               ret = -EINPROGRESS;
+       else
+               ret = batadv_send_unicast_skb(skb, neigh_node);
 
 out:
        if (neigh_node)
@@ -305,8 +315,7 @@ out:
  *
  * Wrap the given skb into a batman-adv unicast or unicast-4addr header
  * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
- * as packet_type. Then send this frame to the given orig_node and release a
- * reference to this orig_node.
+ * as packet_type. Then send this frame to the given orig_node.
  *
  * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
@@ -318,7 +327,7 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
 {
        struct batadv_unicast_packet *unicast_packet;
        struct ethhdr *ethhdr;
-       int ret = NET_XMIT_DROP;
+       int res, ret = NET_XMIT_DROP;
 
        if (!orig_node)
                goto out;
@@ -355,12 +364,11 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
        if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
                unicast_packet->ttvn = unicast_packet->ttvn - 1;
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
+       res = batadv_send_skb_to_orig(skb, orig_node, NULL);
+       if (res != -1)
                ret = NET_XMIT_SUCCESS;
 
 out:
-       if (orig_node)
-               batadv_orig_node_put(orig_node);
        if (ret == NET_XMIT_DROP)
                kfree_skb(skb);
        return ret;
@@ -392,6 +400,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
        struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
        struct batadv_orig_node *orig_node;
        u8 *src, *dst;
+       int ret;
 
        src = ethhdr->h_source;
        dst = ethhdr->h_dest;
@@ -403,8 +412,13 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
        }
        orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
 
-       return batadv_send_skb_unicast(bat_priv, skb, packet_type,
-                                      packet_subtype, orig_node, vid);
+       ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
+                                     packet_subtype, orig_node, vid);
+
+       if (orig_node)
+               batadv_orig_node_put(orig_node);
+
+       return ret;
 }
 
 /**
@@ -422,42 +436,99 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
                           unsigned short vid)
 {
        struct batadv_orig_node *orig_node;
+       int ret;
 
        orig_node = batadv_gw_get_selected_orig(bat_priv);
-       return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
-                                      BATADV_P_DATA, orig_node, vid);
-}
-
-void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
-{
-       struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
-
-       if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
-           (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
-               return;
+       ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
+                                     BATADV_P_DATA, orig_node, vid);
 
-       /* the interface gets activated here to avoid race conditions between
-        * the moment of activating the interface in
-        * hardif_activate_interface() where the originator mac is set and
-        * outdated packets (especially uninitialized mac addresses) in the
-        * packet queue
-        */
-       if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
-               hard_iface->if_status = BATADV_IF_ACTIVE;
+       if (orig_node)
+               batadv_orig_node_put(orig_node);
 
-       bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
+       return ret;
 }
 
-static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
+/**
+ * batadv_forw_packet_free - free a forwarding packet
+ * @forw_packet: The packet to free
+ *
+ * This frees a forwarding packet and releases any resources it might
+ * have claimed.
+ */
+void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
 {
        kfree_skb(forw_packet->skb);
        if (forw_packet->if_incoming)
                batadv_hardif_put(forw_packet->if_incoming);
        if (forw_packet->if_outgoing)
                batadv_hardif_put(forw_packet->if_outgoing);
+       if (forw_packet->queue_left)
+               atomic_inc(forw_packet->queue_left);
        kfree(forw_packet);
 }
 
+/**
+ * batadv_forw_packet_alloc - allocate a forwarding packet
+ * @if_incoming: The (optional) if_incoming to be grabbed
+ * @if_outgoing: The (optional) if_outgoing to be grabbed
+ * @queue_left: The (optional) queue counter to decrease
+ * @bat_priv: The bat_priv for the mesh of this forw_packet
+ *
+ * Allocates a forwarding packet and tries to get a reference to the
+ * (optional) if_incoming, if_outgoing and queue_left. If queue_left
+ * is NULL then bat_priv is optional, too.
+ *
+ * Return: An allocated forwarding packet on success, NULL otherwise.
+ */
+struct batadv_forw_packet *
+batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
+                        struct batadv_hard_iface *if_outgoing,
+                        atomic_t *queue_left,
+                        struct batadv_priv *bat_priv)
+{
+       struct batadv_forw_packet *forw_packet;
+       const char *qname;
+
+       if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
+               qname = "unknown";
+
+               if (queue_left == &bat_priv->bcast_queue_left)
+                       qname = "bcast";
+
+               if (queue_left == &bat_priv->batman_queue_left)
+                       qname = "batman";
+
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "%s queue is full\n", qname);
+
+               return NULL;
+       }
+
+       forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
+       if (!forw_packet)
+               goto err;
+
+       if (if_incoming)
+               kref_get(&if_incoming->refcount);
+
+       if (if_outgoing)
+               kref_get(&if_outgoing->refcount);
+
+       forw_packet->skb = NULL;
+       forw_packet->queue_left = queue_left;
+       forw_packet->if_incoming = if_incoming;
+       forw_packet->if_outgoing = if_outgoing;
+       forw_packet->num_packets = 0;
+
+       return forw_packet;
+
+err:
+       if (queue_left)
+               atomic_inc(queue_left);
+
+       return NULL;
+}
+
 static void
 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
                                 struct batadv_forw_packet *forw_packet,
@@ -496,24 +567,20 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
        struct batadv_bcast_packet *bcast_packet;
        struct sk_buff *newskb;
 
-       if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
-               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "bcast packet queue full\n");
-               goto out;
-       }
-
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
-               goto out_and_inc;
-
-       forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
+               goto err;
 
+       forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
+                                              &bat_priv->bcast_queue_left,
+                                              bat_priv);
+       batadv_hardif_put(primary_if);
        if (!forw_packet)
-               goto out_and_inc;
+               goto err;
 
        newskb = skb_copy(skb, GFP_ATOMIC);
        if (!newskb)
-               goto packet_free;
+               goto err_packet_free;
 
        /* as we have a copy now, it is safe to decrease the TTL */
        bcast_packet = (struct batadv_bcast_packet *)newskb->data;
@@ -522,11 +589,6 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
        skb_reset_mac_header(newskb);
 
        forw_packet->skb = newskb;
-       forw_packet->if_incoming = primary_if;
-       forw_packet->if_outgoing = NULL;
-
-       /* how often did we send the bcast packet ? */
-       forw_packet->num_packets = 0;
 
        INIT_DELAYED_WORK(&forw_packet->delayed_work,
                          batadv_send_outstanding_bcast_packet);
@@ -534,13 +596,9 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
        _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
        return NETDEV_TX_OK;
 
-packet_free:
-       kfree(forw_packet);
-out_and_inc:
-       atomic_inc(&bat_priv->bcast_queue_left);
-out:
-       if (primary_if)
-               batadv_hardif_put(primary_if);
+err_packet_free:
+       batadv_forw_packet_free(forw_packet);
+err:
        return NETDEV_TX_BUSY;
 }
 
@@ -601,46 +659,6 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
 
 out:
        batadv_forw_packet_free(forw_packet);
-       atomic_inc(&bat_priv->bcast_queue_left);
-}
-
-void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
-{
-       struct delayed_work *delayed_work;
-       struct batadv_forw_packet *forw_packet;
-       struct batadv_priv *bat_priv;
-
-       delayed_work = to_delayed_work(work);
-       forw_packet = container_of(delayed_work, struct batadv_forw_packet,
-                                  delayed_work);
-       bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
-       spin_lock_bh(&bat_priv->forw_bat_list_lock);
-       hlist_del(&forw_packet->list);
-       spin_unlock_bh(&bat_priv->forw_bat_list_lock);
-
-       if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
-               goto out;
-
-       bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
-
-       /* we have to have at least one packet in the queue to determine the
-        * queues wake up time unless we are shutting down.
-        *
-        * only re-schedule if this is the "original" copy, e.g. the OGM of the
-        * primary interface should only be rescheduled once per period, but
-        * this function will be called for the forw_packet instances of the
-        * other secondary interfaces as well.
-        */
-       if (forw_packet->own &&
-           forw_packet->if_incoming == forw_packet->if_outgoing)
-               batadv_schedule_bat_ogm(forw_packet->if_incoming);
-
-out:
-       /* don't count own packet */
-       if (!forw_packet->own)
-               atomic_inc(&bat_priv->batman_queue_left);
-
-       batadv_forw_packet_free(forw_packet);
 }
 
 void
@@ -681,9 +699,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 
                if (pending) {
                        hlist_del(&forw_packet->list);
-                       if (!forw_packet->own)
-                               atomic_inc(&bat_priv->bcast_queue_left);
-
                        batadv_forw_packet_free(forw_packet);
                }
        }
@@ -711,9 +726,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 
                if (pending) {
                        hlist_del(&forw_packet->list);
-                       if (!forw_packet->own)
-                               atomic_inc(&bat_priv->batman_queue_left);
-
                        batadv_forw_packet_free(forw_packet);
                }
        }