batman-adv: Prefix main enum with BATADV_
[cascardo/linux.git] / net / batman-adv / send.c
index f47299f..aad9818 100644 (file)
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -16,7 +15,6 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  * 02110-1301, USA
- *
  */
 
 #include "main.h"
 #include "gateway_common.h"
 #include "originator.h"
 
-static void send_outstanding_bcast_packet(struct work_struct *work);
+static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
 
 /* send out an already prepared packet to the given address via the
- * specified batman interface */
-int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
-                   const uint8_t *dst_addr)
+ * specified batman interface
+ */
+int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
+                          const uint8_t *dst_addr)
 {
        struct ethhdr *ethhdr;
 
-       if (hard_iface->if_status != IF_ACTIVE)
+       if (hard_iface->if_status != BATADV_IF_ACTIVE)
                goto send_skb_err;
 
        if (unlikely(!hard_iface->net_dev))
@@ -51,7 +50,7 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
        }
 
        /* push to the ethernet header. */
-       if (my_skb_head_push(skb, ETH_HLEN) < 0)
+       if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
                goto send_skb_err;
 
        skb_reset_mac_header(skb);
@@ -59,129 +58,56 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
        memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
        memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
-       ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
+       ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN);
 
        skb_set_network_header(skb, ETH_HLEN);
        skb->priority = TC_PRIO_CONTROL;
-       skb->protocol = __constant_htons(ETH_P_BATMAN);
+       skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN);
 
        skb->dev = hard_iface->net_dev;
 
        /* dev_queue_xmit() returns a negative result on error.  However on
         * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
-        * (which is > 0). This will not be treated as an error. */
-
+        * (which is > 0). This will not be treated as an error.
+        */
        return dev_queue_xmit(skb);
 send_skb_err:
        kfree_skb(skb);
        return NET_XMIT_DROP;
 }
 
-static void realloc_packet_buffer(struct hard_iface *hard_iface,
-                                 int new_len)
-{
-       unsigned char *new_buff;
-
-       new_buff = kmalloc(new_len, GFP_ATOMIC);
-
-       /* keep old buffer if kmalloc should fail */
-       if (new_buff) {
-               memcpy(new_buff, hard_iface->packet_buff,
-                      BATMAN_OGM_HLEN);
-
-               kfree(hard_iface->packet_buff);
-               hard_iface->packet_buff = new_buff;
-               hard_iface->packet_len = new_len;
-       }
-}
-
-/* when calling this function (hard_iface == primary_if) has to be true */
-static int prepare_packet_buffer(struct bat_priv *bat_priv,
-                                 struct hard_iface *hard_iface)
-{
-       int new_len;
-
-       new_len = BATMAN_OGM_HLEN +
-                 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
-
-       /* if we have too many changes for one packet don't send any
-        * and wait for the tt table request which will be fragmented */
-       if (new_len > hard_iface->soft_iface->mtu)
-               new_len = BATMAN_OGM_HLEN;
-
-       realloc_packet_buffer(hard_iface, new_len);
-
-       atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
-
-       /* reset the sending counter */
-       atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
-
-       return tt_changes_fill_buffer(bat_priv,
-                                     hard_iface->packet_buff + BATMAN_OGM_HLEN,
-                                     hard_iface->packet_len - BATMAN_OGM_HLEN);
-}
-
-static int reset_packet_buffer(struct bat_priv *bat_priv,
-                               struct hard_iface *hard_iface)
-{
-       realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
-       return 0;
-}
-
-void schedule_bat_ogm(struct hard_iface *hard_iface)
+void batadv_schedule_bat_ogm(struct hard_iface *hard_iface)
 {
        struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
-       struct hard_iface *primary_if;
-       int tt_num_changes = -1;
 
-       if ((hard_iface->if_status == IF_NOT_IN_USE) ||
-           (hard_iface->if_status == IF_TO_BE_REMOVED))
+       if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
+           (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
                return;
 
-       /**
-        * the interface gets activated here to avoid race conditions between
+       /* the interface gets activated here to avoid race conditions between
         * the moment of activating the interface in
         * hardif_activate_interface() where the originator mac is set and
         * outdated packets (especially uninitialized mac addresses) in the
         * packet queue
         */
-       if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
-               hard_iface->if_status = IF_ACTIVE;
-
-       primary_if = primary_if_get_selected(bat_priv);
+       if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+               hard_iface->if_status = BATADV_IF_ACTIVE;
 
-       if (hard_iface == primary_if) {
-               /* if at least one change happened */
-               if (atomic_read(&bat_priv->tt_local_changes) > 0) {
-                       tt_commit_changes(bat_priv);
-                       tt_num_changes = prepare_packet_buffer(bat_priv,
-                                                              hard_iface);
-               }
-
-               /* if the changes have been sent often enough */
-               if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
-                       tt_num_changes = reset_packet_buffer(bat_priv,
-                                                            hard_iface);
-       }
-
-       if (primary_if)
-               hardif_free_ref(primary_if);
-
-       bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
+       bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
 }
 
-static void forw_packet_free(struct forw_packet *forw_packet)
+static void batadv_forw_packet_free(struct forw_packet *forw_packet)
 {
        if (forw_packet->skb)
                kfree_skb(forw_packet->skb);
        if (forw_packet->if_incoming)
-               hardif_free_ref(forw_packet->if_incoming);
+               batadv_hardif_free_ref(forw_packet->if_incoming);
        kfree(forw_packet);
 }
 
-static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
-                                     struct forw_packet *forw_packet,
-                                     unsigned long send_time)
+static void _batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv,
+                                            struct forw_packet *forw_packet,
+                                            unsigned long send_time)
 {
        INIT_HLIST_NODE(&forw_packet->list);
 
@@ -192,8 +118,8 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
 
        /* start timer for this packet */
        INIT_DELAYED_WORK(&forw_packet->delayed_work,
-                         send_outstanding_bcast_packet);
-       queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
+                         batadv_send_outstanding_bcast_packet);
+       queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
                           send_time);
 }
 
@@ -204,21 +130,24 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
  * errors.
  *
  * The skb is not consumed, so the caller should make sure that the
- * skb is freed. */
-int add_bcast_packet_to_list(struct bat_priv *bat_priv,
-                            const struct sk_buff *skb, unsigned long delay)
+ * skb is freed.
+ */
+int batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv,
+                                   const struct sk_buff *skb,
+                                   unsigned long delay)
 {
        struct hard_iface *primary_if = NULL;
        struct forw_packet *forw_packet;
        struct bcast_packet *bcast_packet;
        struct sk_buff *newskb;
 
-       if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
-               bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
+       if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "bcast packet queue full\n");
                goto out;
        }
 
-       primary_if = primary_if_get_selected(bat_priv);
+       primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
                goto out_and_inc;
 
@@ -243,7 +172,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
        /* how often did we send the bcast packet ? */
        forw_packet->num_packets = 0;
 
-       _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
+       _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
        return NETDEV_TX_OK;
 
 packet_free:
@@ -252,11 +181,11 @@ out_and_inc:
        atomic_inc(&bat_priv->bcast_queue_left);
 out:
        if (primary_if)
-               hardif_free_ref(primary_if);
+               batadv_hardif_free_ref(primary_if);
        return NETDEV_TX_BUSY;
 }
 
-static void send_outstanding_bcast_packet(struct work_struct *work)
+static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
 {
        struct hard_iface *hard_iface;
        struct delayed_work *delayed_work =
@@ -271,19 +200,20 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
        hlist_del(&forw_packet->list);
        spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 
-       if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
+       if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
                goto out;
 
        /* rebroadcast packet */
        rcu_read_lock();
-       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+       list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
                if (hard_iface->soft_iface != soft_iface)
                        continue;
 
                /* send a copy of the saved skb */
                skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
                if (skb1)
-                       send_skb_packet(skb1, hard_iface, broadcast_addr);
+                       batadv_send_skb_packet(skb1, hard_iface,
+                                              batadv_broadcast_addr);
        }
        rcu_read_unlock();
 
@@ -291,17 +221,17 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
 
        /* if we still have some more bcasts to send */
        if (forw_packet->num_packets < 3) {
-               _add_bcast_packet_to_list(bat_priv, forw_packet,
-                                         msecs_to_jiffies(5));
+               _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
+                                                msecs_to_jiffies(5));
                return;
        }
 
 out:
-       forw_packet_free(forw_packet);
+       batadv_forw_packet_free(forw_packet);
        atomic_inc(&bat_priv->bcast_queue_left);
 }
 
-void send_outstanding_bat_ogm_packet(struct work_struct *work)
+void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
 {
        struct delayed_work *delayed_work =
                container_of(work, struct delayed_work, work);
@@ -314,49 +244,47 @@ void send_outstanding_bat_ogm_packet(struct work_struct *work)
        hlist_del(&forw_packet->list);
        spin_unlock_bh(&bat_priv->forw_bat_list_lock);
 
-       if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
+       if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
                goto out;
 
        bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
 
-       /**
-        * we have to have at least one packet in the queue
+       /* we have to have at least one packet in the queue
         * to determine the queues wake up time unless we are
         * shutting down
         */
        if (forw_packet->own)
-               schedule_bat_ogm(forw_packet->if_incoming);
+               batadv_schedule_bat_ogm(forw_packet->if_incoming);
 
 out:
        /* don't count own packet */
        if (!forw_packet->own)
                atomic_inc(&bat_priv->batman_queue_left);
 
-       forw_packet_free(forw_packet);
+       batadv_forw_packet_free(forw_packet);
 }
 
-void purge_outstanding_packets(struct bat_priv *bat_priv,
-                              const struct hard_iface *hard_iface)
+void batadv_purge_outstanding_packets(struct bat_priv *bat_priv,
+                                     const struct hard_iface *hard_iface)
 {
        struct forw_packet *forw_packet;
        struct hlist_node *tmp_node, *safe_tmp_node;
        bool pending;
 
        if (hard_iface)
-               bat_dbg(DBG_BATMAN, bat_priv,
-                       "purge_outstanding_packets(): %s\n",
-                       hard_iface->net_dev->name);
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "purge_outstanding_packets(): %s\n",
+                          hard_iface->net_dev->name);
        else
-               bat_dbg(DBG_BATMAN, bat_priv,
-                       "purge_outstanding_packets()\n");
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "purge_outstanding_packets()\n");
 
        /* free bcast list */
        spin_lock_bh(&bat_priv->forw_bcast_list_lock);
        hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
                                  &bat_priv->forw_bcast_list, list) {
 
-               /**
-                * if purge_outstanding_packets() was called with an argument
+               /* if purge_outstanding_packets() was called with an argument
                 * we delete only packets belonging to the given interface
                 */
                if ((hard_iface) &&
@@ -365,8 +293,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
 
                spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 
-               /**
-                * send_outstanding_bcast_packet() will lock the list to
+               /* batadv_send_outstanding_bcast_packet() will lock the list to
                 * delete the item from the list
                 */
                pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -374,7 +301,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
 
                if (pending) {
                        hlist_del(&forw_packet->list);
-                       forw_packet_free(forw_packet);
+                       batadv_forw_packet_free(forw_packet);
                }
        }
        spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -384,8 +311,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
        hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
                                  &bat_priv->forw_bat_list, list) {
 
-               /**
-                * if purge_outstanding_packets() was called with an argument
+               /* if purge_outstanding_packets() was called with an argument
                 * we delete only packets belonging to the given interface
                 */
                if ((hard_iface) &&
@@ -394,8 +320,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
 
                spin_unlock_bh(&bat_priv->forw_bat_list_lock);
 
-               /**
-                * send_outstanding_bat_packet() will lock the list to
+               /* send_outstanding_bat_packet() will lock the list to
                 * delete the item from the list
                 */
                pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -403,7 +328,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
 
                if (pending) {
                        hlist_del(&forw_packet->list);
-                       forw_packet_free(forw_packet);
+                       batadv_forw_packet_free(forw_packet);
                }
        }
        spin_unlock_bh(&bat_priv->forw_bat_list_lock);