2 * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
30 #include "aggregation.h"
32 /* apply hop penalty for a normal link */
33 static uint8_t hop_penalty(const uint8_t tq)
35 return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
38 /* when do we schedule our own packet to be sent */
39 static unsigned long own_send_time(void)
42 (((atomic_read(&originator_interval) - JITTER +
43 (random32() % 2*JITTER)) * HZ) / 1000);
46 /* when do we schedule a forwarded packet to be sent */
47 static unsigned long forward_send_time(void)
49 unsigned long send_time = jiffies; /* Starting now plus... */
51 if (atomic_read(&aggregation_enabled))
52 send_time += (((MAX_AGGREGATION_MS - (JITTER/2) +
53 (random32() % JITTER)) * HZ) / 1000);
55 send_time += (((random32() % (JITTER/2)) * HZ) / 1000);
60 /* send out an already prepared packet to the given address via the
61 * specified batman interface */
62 int send_skb_packet(struct sk_buff *skb,
63 struct batman_if *batman_if,
66 struct ethhdr *ethhdr;
68 if (batman_if->if_active != IF_ACTIVE)
71 if (unlikely(!batman_if->net_dev))
74 if (!(batman_if->net_dev->flags & IFF_UP)) {
76 "batman-adv:Interface %s is not up - can't send packet via that interface!\n",
81 /* push to the ethernet header. */
82 if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
85 skb_reset_mac_header(skb);
87 ethhdr = (struct ethhdr *) skb_mac_header(skb);
88 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
89 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
90 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
92 skb_set_network_header(skb, ETH_HLEN);
93 skb->priority = TC_PRIO_CONTROL;
94 skb->protocol = __constant_htons(ETH_P_BATMAN);
96 skb->dev = batman_if->net_dev;
98 /* dev_queue_xmit() returns a negative result on error. However on
99 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
100 * (which is > 0). This will not be treated as an error. */
102 return dev_queue_xmit(skb);
105 return NET_XMIT_DROP;
108 /* sends a raw packet. */
109 void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
110 struct batman_if *batman_if, uint8_t *dst_addr)
115 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
118 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
119 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
120 /* pull back to the batman "network header" */
121 skb_pull(skb, sizeof(struct ethhdr));
122 send_skb_packet(skb, batman_if, dst_addr);
125 /* Send a packet to a given interface */
126 static void send_packet_to_if(struct forw_packet *forw_packet,
127 struct batman_if *batman_if)
132 struct batman_packet *batman_packet;
134 if (batman_if->if_active != IF_ACTIVE)
137 packet_num = buff_pos = 0;
138 batman_packet = (struct batman_packet *)
139 (forw_packet->packet_buff);
141 /* adjust all flags and log packets */
142 while (aggregated_packet(buff_pos,
143 forw_packet->packet_len,
144 batman_packet->num_hna)) {
146 /* we might have aggregated direct link packets with an
147 * ordinary base packet */
148 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
149 (forw_packet->if_incoming == batman_if))
150 batman_packet->flags |= DIRECTLINK;
152 batman_packet->flags &= ~DIRECTLINK;
154 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
158 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n",
160 (packet_num > 0 ? "aggregated " : ""),
161 batman_packet->orig, ntohs(batman_packet->seqno),
162 batman_packet->tq, batman_packet->ttl,
163 (batman_packet->flags & DIRECTLINK ?
165 batman_if->dev, batman_if->addr_str);
167 buff_pos += sizeof(struct batman_packet) +
168 (batman_packet->num_hna * ETH_ALEN);
170 batman_packet = (struct batman_packet *)
171 (forw_packet->packet_buff + buff_pos);
174 send_raw_packet(forw_packet->packet_buff,
175 forw_packet->packet_len,
176 batman_if, broadcastAddr);
179 /* send a batman packet */
180 static void send_packet(struct forw_packet *forw_packet)
182 struct batman_if *batman_if;
183 struct batman_packet *batman_packet =
184 (struct batman_packet *)(forw_packet->packet_buff);
185 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
187 if (!forw_packet->if_incoming) {
188 printk(KERN_ERR "batman-adv: Error - can't forward packet: incoming iface not specified\n");
192 if (forw_packet->if_incoming->if_active != IF_ACTIVE)
195 /* multihomed peer assumed */
196 /* non-primary OGMs are only broadcasted on their interface */
197 if ((directlink && (batman_packet->ttl == 1)) ||
198 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
200 /* FIXME: what about aggregated packets ? */
202 "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%s]\n",
203 (forw_packet->own ? "Sending own" : "Forwarding"),
204 batman_packet->orig, ntohs(batman_packet->seqno),
205 batman_packet->ttl, forw_packet->if_incoming->dev,
206 forw_packet->if_incoming->addr_str);
208 send_raw_packet(forw_packet->packet_buff,
209 forw_packet->packet_len,
210 forw_packet->if_incoming,
215 /* broadcast on every interface */
217 list_for_each_entry_rcu(batman_if, &if_list, list)
218 send_packet_to_if(forw_packet, batman_if);
222 static void rebuild_batman_packet(struct batman_if *batman_if)
225 unsigned char *new_buff;
226 struct batman_packet *batman_packet;
228 new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
229 new_buff = kmalloc(new_len, GFP_ATOMIC);
231 /* keep old buffer if kmalloc should fail */
233 memcpy(new_buff, batman_if->packet_buff,
234 sizeof(struct batman_packet));
235 batman_packet = (struct batman_packet *)new_buff;
237 batman_packet->num_hna = hna_local_fill_buffer(
238 new_buff + sizeof(struct batman_packet),
239 new_len - sizeof(struct batman_packet));
241 kfree(batman_if->packet_buff);
242 batman_if->packet_buff = new_buff;
243 batman_if->packet_len = new_len;
247 void schedule_own_packet(struct batman_if *batman_if)
249 unsigned long send_time;
250 struct batman_packet *batman_packet;
251 int vis_server = atomic_read(&vis_mode);
254 * the interface gets activated here to avoid race conditions between
255 * the moment of activating the interface in
256 * hardif_activate_interface() where the originator mac is set and
257 * outdated packets (especially uninitialized mac addresses) in the
260 if (batman_if->if_active == IF_TO_BE_ACTIVATED)
261 batman_if->if_active = IF_ACTIVE;
263 /* if local hna has changed and interface is a primary interface */
264 if ((atomic_read(&hna_local_changed)) && (batman_if->if_num == 0))
265 rebuild_batman_packet(batman_if);
268 * NOTE: packet_buff might just have been re-allocated in
269 * rebuild_batman_packet()
271 batman_packet = (struct batman_packet *)batman_if->packet_buff;
273 /* change sequence number to network order */
274 batman_packet->seqno = htons((uint16_t)atomic_read(&batman_if->seqno));
276 if (vis_server == VIS_TYPE_SERVER_SYNC)
277 batman_packet->flags = VIS_SERVER;
279 batman_packet->flags = 0;
281 /* could be read by receive_bat_packet() */
282 atomic_inc(&batman_if->seqno);
284 slide_own_bcast_window(batman_if);
285 send_time = own_send_time();
286 add_bat_packet_to_list(batman_if->packet_buff,
287 batman_if->packet_len, batman_if, 1, send_time);
290 void schedule_forward_packet(struct orig_node *orig_node,
291 struct ethhdr *ethhdr,
292 struct batman_packet *batman_packet,
293 uint8_t directlink, int hna_buff_len,
294 struct batman_if *if_incoming)
296 unsigned char in_tq, in_ttl, tq_avg = 0;
297 unsigned long send_time;
299 if (batman_packet->ttl <= 1) {
300 bat_dbg(DBG_BATMAN, "ttl exceeded \n");
304 in_tq = batman_packet->tq;
305 in_ttl = batman_packet->ttl;
307 batman_packet->ttl--;
308 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
310 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
311 * of our best tq value */
312 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
314 /* rebroadcast ogm of best ranking neighbor as is */
315 if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
316 batman_packet->tq = orig_node->router->tq_avg;
318 if (orig_node->router->last_ttl)
319 batman_packet->ttl = orig_node->router->last_ttl - 1;
322 tq_avg = orig_node->router->tq_avg;
325 /* apply hop penalty */
326 batman_packet->tq = hop_penalty(batman_packet->tq);
328 bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i \n",
329 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
332 batman_packet->seqno = htons(batman_packet->seqno);
335 batman_packet->flags |= DIRECTLINK;
337 batman_packet->flags &= ~DIRECTLINK;
339 send_time = forward_send_time();
340 add_bat_packet_to_list((unsigned char *)batman_packet,
341 sizeof(struct batman_packet) + hna_buff_len,
342 if_incoming, 0, send_time);
345 static void forw_packet_free(struct forw_packet *forw_packet)
347 if (forw_packet->skb)
348 kfree_skb(forw_packet->skb);
349 kfree(forw_packet->packet_buff);
353 static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
354 unsigned long send_time)
357 INIT_HLIST_NODE(&forw_packet->list);
359 /* add new packet to packet list */
360 spin_lock_irqsave(&forw_bcast_list_lock, flags);
361 hlist_add_head(&forw_packet->list, &forw_bcast_list);
362 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
364 /* start timer for this packet */
365 INIT_DELAYED_WORK(&forw_packet->delayed_work,
366 send_outstanding_bcast_packet);
367 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
371 void add_bcast_packet_to_list(struct sk_buff *skb)
373 struct forw_packet *forw_packet;
375 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
379 skb = skb_copy(skb, GFP_ATOMIC);
385 skb_reset_mac_header(skb);
387 forw_packet->skb = skb;
388 forw_packet->packet_buff = NULL;
390 /* how often did we send the bcast packet ? */
391 forw_packet->num_packets = 0;
393 _add_bcast_packet_to_list(forw_packet, 1);
396 void send_outstanding_bcast_packet(struct work_struct *work)
398 struct batman_if *batman_if;
399 struct delayed_work *delayed_work =
400 container_of(work, struct delayed_work, work);
401 struct forw_packet *forw_packet =
402 container_of(delayed_work, struct forw_packet, delayed_work);
404 struct sk_buff *skb1;
406 spin_lock_irqsave(&forw_bcast_list_lock, flags);
407 hlist_del(&forw_packet->list);
408 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
410 /* rebroadcast packet */
412 list_for_each_entry_rcu(batman_if, &if_list, list) {
413 /* send a copy of the saved skb */
414 skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
416 send_skb_packet(skb1,
417 batman_if, broadcastAddr);
421 forw_packet->num_packets++;
423 /* if we still have some more bcasts to send and we are not shutting
425 if ((forw_packet->num_packets < 3) &&
426 (atomic_read(&module_state) != MODULE_DEACTIVATING))
427 _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
429 forw_packet_free(forw_packet);
432 void send_outstanding_bat_packet(struct work_struct *work)
434 struct delayed_work *delayed_work =
435 container_of(work, struct delayed_work, work);
436 struct forw_packet *forw_packet =
437 container_of(delayed_work, struct forw_packet, delayed_work);
440 spin_lock_irqsave(&forw_bat_list_lock, flags);
441 hlist_del(&forw_packet->list);
442 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
444 send_packet(forw_packet);
447 * we have to have at least one packet in the queue
448 * to determine the queues wake up time unless we are
451 if ((forw_packet->own) &&
452 (atomic_read(&module_state) != MODULE_DEACTIVATING))
453 schedule_own_packet(forw_packet->if_incoming);
455 forw_packet_free(forw_packet);
458 void purge_outstanding_packets(void)
460 struct forw_packet *forw_packet;
461 struct hlist_node *tmp_node, *safe_tmp_node;
464 bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
466 /* free bcast list */
467 spin_lock_irqsave(&forw_bcast_list_lock, flags);
468 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
469 &forw_bcast_list, list) {
471 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
474 * send_outstanding_bcast_packet() will lock the list to
475 * delete the item from the list
477 cancel_delayed_work_sync(&forw_packet->delayed_work);
478 spin_lock_irqsave(&forw_bcast_list_lock, flags);
480 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
482 /* free batman packet list */
483 spin_lock_irqsave(&forw_bat_list_lock, flags);
484 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
485 &forw_bat_list, list) {
487 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
490 * send_outstanding_bat_packet() will lock the list to
491 * delete the item from the list
493 cancel_delayed_work_sync(&forw_packet->delayed_work);
494 spin_lock_irqsave(&forw_bat_list_lock, flags);
496 spin_unlock_irqrestore(&forw_bat_list_lock, flags);