batman-adv: Prefix main non-static functions with batadv_
[cascardo/linux.git] / net / batman-adv / send.c
1 /*
2  * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "vis.h"
29 #include "gateway_common.h"
30 #include "originator.h"
31
32 static void send_outstanding_bcast_packet(struct work_struct *work);
33
34 /* send out an already prepared packet to the given address via the
35  * specified batman interface */
36 int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
37                            const uint8_t *dst_addr)
38 {
39         struct ethhdr *ethhdr;
40
41         if (hard_iface->if_status != IF_ACTIVE)
42                 goto send_skb_err;
43
44         if (unlikely(!hard_iface->net_dev))
45                 goto send_skb_err;
46
47         if (!(hard_iface->net_dev->flags & IFF_UP)) {
48                 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
49                         hard_iface->net_dev->name);
50                 goto send_skb_err;
51         }
52
53         /* push to the ethernet header. */
54         if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
55                 goto send_skb_err;
56
57         skb_reset_mac_header(skb);
58
59         ethhdr = (struct ethhdr *)skb_mac_header(skb);
60         memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
61         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
62         ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
63
64         skb_set_network_header(skb, ETH_HLEN);
65         skb->priority = TC_PRIO_CONTROL;
66         skb->protocol = __constant_htons(ETH_P_BATMAN);
67
68         skb->dev = hard_iface->net_dev;
69
70         /* dev_queue_xmit() returns a negative result on error.  However on
71          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72          * (which is > 0). This will not be treated as an error. */
73
74         return dev_queue_xmit(skb);
75 send_skb_err:
76         kfree_skb(skb);
77         return NET_XMIT_DROP;
78 }
79
80 void batadv_schedule_bat_ogm(struct hard_iface *hard_iface)
81 {
82         struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
83
84         if ((hard_iface->if_status == IF_NOT_IN_USE) ||
85             (hard_iface->if_status == IF_TO_BE_REMOVED))
86                 return;
87
88         /**
89          * the interface gets activated here to avoid race conditions between
90          * the moment of activating the interface in
91          * hardif_activate_interface() where the originator mac is set and
92          * outdated packets (especially uninitialized mac addresses) in the
93          * packet queue
94          */
95         if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
96                 hard_iface->if_status = IF_ACTIVE;
97
98         bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
99 }
100
101 static void forw_packet_free(struct forw_packet *forw_packet)
102 {
103         if (forw_packet->skb)
104                 kfree_skb(forw_packet->skb);
105         if (forw_packet->if_incoming)
106                 hardif_free_ref(forw_packet->if_incoming);
107         kfree(forw_packet);
108 }
109
110 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
111                                       struct forw_packet *forw_packet,
112                                       unsigned long send_time)
113 {
114         INIT_HLIST_NODE(&forw_packet->list);
115
116         /* add new packet to packet list */
117         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
118         hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
119         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
120
121         /* start timer for this packet */
122         INIT_DELAYED_WORK(&forw_packet->delayed_work,
123                           send_outstanding_bcast_packet);
124         queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
125                            send_time);
126 }
127
128 /* add a broadcast packet to the queue and setup timers. broadcast packets
129  * are sent multiple times to increase probability for being received.
130  *
131  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
132  * errors.
133  *
134  * The skb is not consumed, so the caller should make sure that the
135  * skb is freed. */
136 int batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv,
137                                     const struct sk_buff *skb,
138                                     unsigned long delay)
139 {
140         struct hard_iface *primary_if = NULL;
141         struct forw_packet *forw_packet;
142         struct bcast_packet *bcast_packet;
143         struct sk_buff *newskb;
144
145         if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
146                 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
147                 goto out;
148         }
149
150         primary_if = primary_if_get_selected(bat_priv);
151         if (!primary_if)
152                 goto out_and_inc;
153
154         forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
155
156         if (!forw_packet)
157                 goto out_and_inc;
158
159         newskb = skb_copy(skb, GFP_ATOMIC);
160         if (!newskb)
161                 goto packet_free;
162
163         /* as we have a copy now, it is safe to decrease the TTL */
164         bcast_packet = (struct bcast_packet *)newskb->data;
165         bcast_packet->header.ttl--;
166
167         skb_reset_mac_header(newskb);
168
169         forw_packet->skb = newskb;
170         forw_packet->if_incoming = primary_if;
171
172         /* how often did we send the bcast packet ? */
173         forw_packet->num_packets = 0;
174
175         _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
176         return NETDEV_TX_OK;
177
178 packet_free:
179         kfree(forw_packet);
180 out_and_inc:
181         atomic_inc(&bat_priv->bcast_queue_left);
182 out:
183         if (primary_if)
184                 hardif_free_ref(primary_if);
185         return NETDEV_TX_BUSY;
186 }
187
188 static void send_outstanding_bcast_packet(struct work_struct *work)
189 {
190         struct hard_iface *hard_iface;
191         struct delayed_work *delayed_work =
192                 container_of(work, struct delayed_work, work);
193         struct forw_packet *forw_packet =
194                 container_of(delayed_work, struct forw_packet, delayed_work);
195         struct sk_buff *skb1;
196         struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
197         struct bat_priv *bat_priv = netdev_priv(soft_iface);
198
199         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
200         hlist_del(&forw_packet->list);
201         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
202
203         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
204                 goto out;
205
206         /* rebroadcast packet */
207         rcu_read_lock();
208         list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
209                 if (hard_iface->soft_iface != soft_iface)
210                         continue;
211
212                 /* send a copy of the saved skb */
213                 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
214                 if (skb1)
215                         batadv_send_skb_packet(skb1, hard_iface,
216                                                batadv_broadcast_addr);
217         }
218         rcu_read_unlock();
219
220         forw_packet->num_packets++;
221
222         /* if we still have some more bcasts to send */
223         if (forw_packet->num_packets < 3) {
224                 _add_bcast_packet_to_list(bat_priv, forw_packet,
225                                           msecs_to_jiffies(5));
226                 return;
227         }
228
229 out:
230         forw_packet_free(forw_packet);
231         atomic_inc(&bat_priv->bcast_queue_left);
232 }
233
234 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
235 {
236         struct delayed_work *delayed_work =
237                 container_of(work, struct delayed_work, work);
238         struct forw_packet *forw_packet =
239                 container_of(delayed_work, struct forw_packet, delayed_work);
240         struct bat_priv *bat_priv;
241
242         bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
243         spin_lock_bh(&bat_priv->forw_bat_list_lock);
244         hlist_del(&forw_packet->list);
245         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
246
247         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
248                 goto out;
249
250         bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
251
252         /**
253          * we have to have at least one packet in the queue
254          * to determine the queues wake up time unless we are
255          * shutting down
256          */
257         if (forw_packet->own)
258                 batadv_schedule_bat_ogm(forw_packet->if_incoming);
259
260 out:
261         /* don't count own packet */
262         if (!forw_packet->own)
263                 atomic_inc(&bat_priv->batman_queue_left);
264
265         forw_packet_free(forw_packet);
266 }
267
268 void batadv_purge_outstanding_packets(struct bat_priv *bat_priv,
269                                       const struct hard_iface *hard_iface)
270 {
271         struct forw_packet *forw_packet;
272         struct hlist_node *tmp_node, *safe_tmp_node;
273         bool pending;
274
275         if (hard_iface)
276                 bat_dbg(DBG_BATMAN, bat_priv,
277                         "purge_outstanding_packets(): %s\n",
278                         hard_iface->net_dev->name);
279         else
280                 bat_dbg(DBG_BATMAN, bat_priv,
281                         "purge_outstanding_packets()\n");
282
283         /* free bcast list */
284         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
285         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
286                                   &bat_priv->forw_bcast_list, list) {
287
288                 /**
289                  * if purge_outstanding_packets() was called with an argument
290                  * we delete only packets belonging to the given interface
291                  */
292                 if ((hard_iface) &&
293                     (forw_packet->if_incoming != hard_iface))
294                         continue;
295
296                 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
297
298                 /**
299                  * send_outstanding_bcast_packet() will lock the list to
300                  * delete the item from the list
301                  */
302                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
303                 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
304
305                 if (pending) {
306                         hlist_del(&forw_packet->list);
307                         forw_packet_free(forw_packet);
308                 }
309         }
310         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
311
312         /* free batman packet list */
313         spin_lock_bh(&bat_priv->forw_bat_list_lock);
314         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
315                                   &bat_priv->forw_bat_list, list) {
316
317                 /**
318                  * if purge_outstanding_packets() was called with an argument
319                  * we delete only packets belonging to the given interface
320                  */
321                 if ((hard_iface) &&
322                     (forw_packet->if_incoming != hard_iface))
323                         continue;
324
325                 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
326
327                 /**
328                  * send_outstanding_bat_packet() will lock the list to
329                  * delete the item from the list
330                  */
331                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
332                 spin_lock_bh(&bat_priv->forw_bat_list_lock);
333
334                 if (pending) {
335                         hlist_del(&forw_packet->list);
336                         forw_packet_free(forw_packet);
337                 }
338         }
339         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
340 }