batman-adv: Prefix send local static functions with batadv_
[cascardo/linux.git] / net / batman-adv / send.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19
20 #include "main.h"
21 #include "send.h"
22 #include "routing.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
26 #include "vis.h"
27 #include "gateway_common.h"
28 #include "originator.h"
29
30 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
31
32 /* send out an already prepared packet to the given address via the
33  * specified batman interface
34  */
35 int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
36                            const uint8_t *dst_addr)
37 {
38         struct ethhdr *ethhdr;
39
40         if (hard_iface->if_status != IF_ACTIVE)
41                 goto send_skb_err;
42
43         if (unlikely(!hard_iface->net_dev))
44                 goto send_skb_err;
45
46         if (!(hard_iface->net_dev->flags & IFF_UP)) {
47                 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
48                         hard_iface->net_dev->name);
49                 goto send_skb_err;
50         }
51
52         /* push to the ethernet header. */
53         if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
54                 goto send_skb_err;
55
56         skb_reset_mac_header(skb);
57
58         ethhdr = (struct ethhdr *)skb_mac_header(skb);
59         memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
60         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
61         ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
62
63         skb_set_network_header(skb, ETH_HLEN);
64         skb->priority = TC_PRIO_CONTROL;
65         skb->protocol = __constant_htons(ETH_P_BATMAN);
66
67         skb->dev = hard_iface->net_dev;
68
69         /* dev_queue_xmit() returns a negative result on error.  However on
70          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
71          * (which is > 0). This will not be treated as an error.
72          */
73         return dev_queue_xmit(skb);
74 send_skb_err:
75         kfree_skb(skb);
76         return NET_XMIT_DROP;
77 }
78
79 void batadv_schedule_bat_ogm(struct hard_iface *hard_iface)
80 {
81         struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
82
83         if ((hard_iface->if_status == IF_NOT_IN_USE) ||
84             (hard_iface->if_status == IF_TO_BE_REMOVED))
85                 return;
86
87         /* the interface gets activated here to avoid race conditions between
88          * the moment of activating the interface in
89          * hardif_activate_interface() where the originator mac is set and
90          * outdated packets (especially uninitialized mac addresses) in the
91          * packet queue
92          */
93         if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
94                 hard_iface->if_status = IF_ACTIVE;
95
96         bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
97 }
98
99 static void batadv_forw_packet_free(struct forw_packet *forw_packet)
100 {
101         if (forw_packet->skb)
102                 kfree_skb(forw_packet->skb);
103         if (forw_packet->if_incoming)
104                 batadv_hardif_free_ref(forw_packet->if_incoming);
105         kfree(forw_packet);
106 }
107
108 static void _batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv,
109                                              struct forw_packet *forw_packet,
110                                              unsigned long send_time)
111 {
112         INIT_HLIST_NODE(&forw_packet->list);
113
114         /* add new packet to packet list */
115         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
116         hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
117         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
118
119         /* start timer for this packet */
120         INIT_DELAYED_WORK(&forw_packet->delayed_work,
121                           batadv_send_outstanding_bcast_packet);
122         queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
123                            send_time);
124 }
125
126 /* add a broadcast packet to the queue and setup timers. broadcast packets
127  * are sent multiple times to increase probability for being received.
128  *
129  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
130  * errors.
131  *
132  * The skb is not consumed, so the caller should make sure that the
133  * skb is freed.
134  */
135 int batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv,
136                                     const struct sk_buff *skb,
137                                     unsigned long delay)
138 {
139         struct hard_iface *primary_if = NULL;
140         struct forw_packet *forw_packet;
141         struct bcast_packet *bcast_packet;
142         struct sk_buff *newskb;
143
144         if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
145                 batadv_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
146                 goto out;
147         }
148
149         primary_if = batadv_primary_if_get_selected(bat_priv);
150         if (!primary_if)
151                 goto out_and_inc;
152
153         forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
154
155         if (!forw_packet)
156                 goto out_and_inc;
157
158         newskb = skb_copy(skb, GFP_ATOMIC);
159         if (!newskb)
160                 goto packet_free;
161
162         /* as we have a copy now, it is safe to decrease the TTL */
163         bcast_packet = (struct bcast_packet *)newskb->data;
164         bcast_packet->header.ttl--;
165
166         skb_reset_mac_header(newskb);
167
168         forw_packet->skb = newskb;
169         forw_packet->if_incoming = primary_if;
170
171         /* how often did we send the bcast packet ? */
172         forw_packet->num_packets = 0;
173
174         _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
175         return NETDEV_TX_OK;
176
177 packet_free:
178         kfree(forw_packet);
179 out_and_inc:
180         atomic_inc(&bat_priv->bcast_queue_left);
181 out:
182         if (primary_if)
183                 batadv_hardif_free_ref(primary_if);
184         return NETDEV_TX_BUSY;
185 }
186
187 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
188 {
189         struct hard_iface *hard_iface;
190         struct delayed_work *delayed_work =
191                 container_of(work, struct delayed_work, work);
192         struct forw_packet *forw_packet =
193                 container_of(delayed_work, struct forw_packet, delayed_work);
194         struct sk_buff *skb1;
195         struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
196         struct bat_priv *bat_priv = netdev_priv(soft_iface);
197
198         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
199         hlist_del(&forw_packet->list);
200         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
201
202         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
203                 goto out;
204
205         /* rebroadcast packet */
206         rcu_read_lock();
207         list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
208                 if (hard_iface->soft_iface != soft_iface)
209                         continue;
210
211                 /* send a copy of the saved skb */
212                 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
213                 if (skb1)
214                         batadv_send_skb_packet(skb1, hard_iface,
215                                                batadv_broadcast_addr);
216         }
217         rcu_read_unlock();
218
219         forw_packet->num_packets++;
220
221         /* if we still have some more bcasts to send */
222         if (forw_packet->num_packets < 3) {
223                 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
224                                                  msecs_to_jiffies(5));
225                 return;
226         }
227
228 out:
229         batadv_forw_packet_free(forw_packet);
230         atomic_inc(&bat_priv->bcast_queue_left);
231 }
232
233 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
234 {
235         struct delayed_work *delayed_work =
236                 container_of(work, struct delayed_work, work);
237         struct forw_packet *forw_packet =
238                 container_of(delayed_work, struct forw_packet, delayed_work);
239         struct bat_priv *bat_priv;
240
241         bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
242         spin_lock_bh(&bat_priv->forw_bat_list_lock);
243         hlist_del(&forw_packet->list);
244         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
245
246         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
247                 goto out;
248
249         bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
250
251         /* we have to have at least one packet in the queue
252          * to determine the queues wake up time unless we are
253          * shutting down
254          */
255         if (forw_packet->own)
256                 batadv_schedule_bat_ogm(forw_packet->if_incoming);
257
258 out:
259         /* don't count own packet */
260         if (!forw_packet->own)
261                 atomic_inc(&bat_priv->batman_queue_left);
262
263         batadv_forw_packet_free(forw_packet);
264 }
265
266 void batadv_purge_outstanding_packets(struct bat_priv *bat_priv,
267                                       const struct hard_iface *hard_iface)
268 {
269         struct forw_packet *forw_packet;
270         struct hlist_node *tmp_node, *safe_tmp_node;
271         bool pending;
272
273         if (hard_iface)
274                 batadv_dbg(DBG_BATMAN, bat_priv,
275                            "purge_outstanding_packets(): %s\n",
276                            hard_iface->net_dev->name);
277         else
278                 batadv_dbg(DBG_BATMAN, bat_priv,
279                            "purge_outstanding_packets()\n");
280
281         /* free bcast list */
282         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
283         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
284                                   &bat_priv->forw_bcast_list, list) {
285
286                 /* if purge_outstanding_packets() was called with an argument
287                  * we delete only packets belonging to the given interface
288                  */
289                 if ((hard_iface) &&
290                     (forw_packet->if_incoming != hard_iface))
291                         continue;
292
293                 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
294
295                 /* batadv_send_outstanding_bcast_packet() will lock the list to
296                  * delete the item from the list
297                  */
298                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
299                 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
300
301                 if (pending) {
302                         hlist_del(&forw_packet->list);
303                         batadv_forw_packet_free(forw_packet);
304                 }
305         }
306         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
307
308         /* free batman packet list */
309         spin_lock_bh(&bat_priv->forw_bat_list_lock);
310         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
311                                   &bat_priv->forw_bat_list, list) {
312
313                 /* if purge_outstanding_packets() was called with an argument
314                  * we delete only packets belonging to the given interface
315                  */
316                 if ((hard_iface) &&
317                     (forw_packet->if_incoming != hard_iface))
318                         continue;
319
320                 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
321
322                 /* send_outstanding_bat_packet() will lock the list to
323                  * delete the item from the list
324                  */
325                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
326                 spin_lock_bh(&bat_priv->forw_bat_list_lock);
327
328                 if (pending) {
329                         hlist_del(&forw_packet->list);
330                         batadv_forw_packet_free(forw_packet);
331                 }
332         }
333         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
334 }