2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
29 #include "gateway_common.h"
30 #include "originator.h"
32 static void send_outstanding_bcast_packet(struct work_struct *work);
34 /* send out an already prepared packet to the given address via the
35 * specified batman interface */
36 int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
37 const uint8_t *dst_addr)
39 struct ethhdr *ethhdr;
41 if (hard_iface->if_status != IF_ACTIVE)
44 if (unlikely(!hard_iface->net_dev))
47 if (!(hard_iface->net_dev->flags & IFF_UP)) {
48 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
49 hard_iface->net_dev->name);
53 /* push to the ethernet header. */
54 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
57 skb_reset_mac_header(skb);
59 ethhdr = (struct ethhdr *)skb_mac_header(skb);
60 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
61 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
62 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
64 skb_set_network_header(skb, ETH_HLEN);
65 skb->priority = TC_PRIO_CONTROL;
66 skb->protocol = __constant_htons(ETH_P_BATMAN);
68 skb->dev = hard_iface->net_dev;
70 /* dev_queue_xmit() returns a negative result on error. However on
71 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72 * (which is > 0). This will not be treated as an error. */
74 return dev_queue_xmit(skb);
80 void batadv_schedule_bat_ogm(struct hard_iface *hard_iface)
82 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
84 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
85 (hard_iface->if_status == IF_TO_BE_REMOVED))
89 * the interface gets activated here to avoid race conditions between
90 * the moment of activating the interface in
91 * hardif_activate_interface() where the originator mac is set and
92 * outdated packets (especially uninitialized mac addresses) in the
95 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
96 hard_iface->if_status = IF_ACTIVE;
98 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
101 static void forw_packet_free(struct forw_packet *forw_packet)
103 if (forw_packet->skb)
104 kfree_skb(forw_packet->skb);
105 if (forw_packet->if_incoming)
106 hardif_free_ref(forw_packet->if_incoming);
110 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
111 struct forw_packet *forw_packet,
112 unsigned long send_time)
114 INIT_HLIST_NODE(&forw_packet->list);
116 /* add new packet to packet list */
117 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
118 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
119 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
121 /* start timer for this packet */
122 INIT_DELAYED_WORK(&forw_packet->delayed_work,
123 send_outstanding_bcast_packet);
124 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
128 /* add a broadcast packet to the queue and setup timers. broadcast packets
129 * are sent multiple times to increase probability for being received.
131 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
134 * The skb is not consumed, so the caller should make sure that the
136 int batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv,
137 const struct sk_buff *skb,
140 struct hard_iface *primary_if = NULL;
141 struct forw_packet *forw_packet;
142 struct bcast_packet *bcast_packet;
143 struct sk_buff *newskb;
145 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
146 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
150 primary_if = primary_if_get_selected(bat_priv);
154 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
159 newskb = skb_copy(skb, GFP_ATOMIC);
163 /* as we have a copy now, it is safe to decrease the TTL */
164 bcast_packet = (struct bcast_packet *)newskb->data;
165 bcast_packet->header.ttl--;
167 skb_reset_mac_header(newskb);
169 forw_packet->skb = newskb;
170 forw_packet->if_incoming = primary_if;
172 /* how often did we send the bcast packet ? */
173 forw_packet->num_packets = 0;
175 _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
181 atomic_inc(&bat_priv->bcast_queue_left);
184 hardif_free_ref(primary_if);
185 return NETDEV_TX_BUSY;
188 static void send_outstanding_bcast_packet(struct work_struct *work)
190 struct hard_iface *hard_iface;
191 struct delayed_work *delayed_work =
192 container_of(work, struct delayed_work, work);
193 struct forw_packet *forw_packet =
194 container_of(delayed_work, struct forw_packet, delayed_work);
195 struct sk_buff *skb1;
196 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
197 struct bat_priv *bat_priv = netdev_priv(soft_iface);
199 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
200 hlist_del(&forw_packet->list);
201 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
203 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
206 /* rebroadcast packet */
208 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
209 if (hard_iface->soft_iface != soft_iface)
212 /* send a copy of the saved skb */
213 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
215 batadv_send_skb_packet(skb1, hard_iface,
216 batadv_broadcast_addr);
220 forw_packet->num_packets++;
222 /* if we still have some more bcasts to send */
223 if (forw_packet->num_packets < 3) {
224 _add_bcast_packet_to_list(bat_priv, forw_packet,
225 msecs_to_jiffies(5));
230 forw_packet_free(forw_packet);
231 atomic_inc(&bat_priv->bcast_queue_left);
234 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
236 struct delayed_work *delayed_work =
237 container_of(work, struct delayed_work, work);
238 struct forw_packet *forw_packet =
239 container_of(delayed_work, struct forw_packet, delayed_work);
240 struct bat_priv *bat_priv;
242 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
243 spin_lock_bh(&bat_priv->forw_bat_list_lock);
244 hlist_del(&forw_packet->list);
245 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
247 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
250 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
253 * we have to have at least one packet in the queue
254 * to determine the queues wake up time unless we are
257 if (forw_packet->own)
258 batadv_schedule_bat_ogm(forw_packet->if_incoming);
261 /* don't count own packet */
262 if (!forw_packet->own)
263 atomic_inc(&bat_priv->batman_queue_left);
265 forw_packet_free(forw_packet);
268 void batadv_purge_outstanding_packets(struct bat_priv *bat_priv,
269 const struct hard_iface *hard_iface)
271 struct forw_packet *forw_packet;
272 struct hlist_node *tmp_node, *safe_tmp_node;
276 bat_dbg(DBG_BATMAN, bat_priv,
277 "purge_outstanding_packets(): %s\n",
278 hard_iface->net_dev->name);
280 bat_dbg(DBG_BATMAN, bat_priv,
281 "purge_outstanding_packets()\n");
283 /* free bcast list */
284 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
285 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
286 &bat_priv->forw_bcast_list, list) {
289 * if purge_outstanding_packets() was called with an argument
290 * we delete only packets belonging to the given interface
293 (forw_packet->if_incoming != hard_iface))
296 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
299 * send_outstanding_bcast_packet() will lock the list to
300 * delete the item from the list
302 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
303 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
306 hlist_del(&forw_packet->list);
307 forw_packet_free(forw_packet);
310 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
312 /* free batman packet list */
313 spin_lock_bh(&bat_priv->forw_bat_list_lock);
314 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
315 &bat_priv->forw_bat_list, list) {
318 * if purge_outstanding_packets() was called with an argument
319 * we delete only packets belonging to the given interface
322 (forw_packet->if_incoming != hard_iface))
325 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
328 * send_outstanding_bat_packet() will lock the list to
329 * delete the item from the list
331 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
332 spin_lock_bh(&bat_priv->forw_bat_list_lock);
335 hlist_del(&forw_packet->list);
336 forw_packet_free(forw_packet);
339 spin_unlock_bh(&bat_priv->forw_bat_list_lock);