Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes...
[sfrench/cifs-2.6.git] / drivers / staging / batman-adv / send.c
1 /*
2  * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "types.h"
29 #include "vis.h"
30 #include "aggregation.h"
31
32 /* apply hop penalty for a normal link */
33 static uint8_t hop_penalty(const uint8_t tq)
34 {
35         return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
36 }
37
38 /* when do we schedule our own packet to be sent */
39 static unsigned long own_send_time(void)
40 {
41         return jiffies +
42                 (((atomic_read(&originator_interval) - JITTER +
43                    (random32() % 2*JITTER)) * HZ) / 1000);
44 }
45
46 /* when do we schedule a forwarded packet to be sent */
47 static unsigned long forward_send_time(void)
48 {
49         unsigned long send_time = jiffies; /* Starting now plus... */
50
51         if (atomic_read(&aggregation_enabled))
52                 send_time += (((MAX_AGGREGATION_MS - (JITTER/2) +
53                                 (random32() % JITTER)) * HZ) / 1000);
54         else
55                 send_time += (((random32() % (JITTER/2)) * HZ) / 1000);
56
57         return send_time;
58 }
59
60 /* send out an already prepared packet to the given address via the
61  * specified batman interface */
62 int send_skb_packet(struct sk_buff *skb,
63                                 struct batman_if *batman_if,
64                                 uint8_t *dst_addr)
65 {
66         struct ethhdr *ethhdr;
67
68         if (batman_if->if_active != IF_ACTIVE)
69                 goto send_skb_err;
70
71         if (unlikely(!batman_if->net_dev))
72                 goto send_skb_err;
73
74         if (!(batman_if->net_dev->flags & IFF_UP)) {
75                 printk(KERN_WARNING
76                        "batman-adv:Interface %s is not up - can't send packet via that interface!\n",
77                        batman_if->dev);
78                 goto send_skb_err;
79         }
80
81         /* push to the ethernet header. */
82         if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
83                 goto send_skb_err;
84
85         skb_reset_mac_header(skb);
86
87         ethhdr = (struct ethhdr *) skb_mac_header(skb);
88         memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
89         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
90         ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
91
92         skb_set_network_header(skb, ETH_HLEN);
93         skb->priority = TC_PRIO_CONTROL;
94         skb->protocol = __constant_htons(ETH_P_BATMAN);
95
96         skb->dev = batman_if->net_dev;
97
98         /* dev_queue_xmit() returns a negative result on error.  However on
99          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
100          * (which is > 0). This will not be treated as an error. */
101
102         return dev_queue_xmit(skb);
103 send_skb_err:
104         kfree_skb(skb);
105         return NET_XMIT_DROP;
106 }
107
108 /* sends a raw packet. */
109 void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
110                      struct batman_if *batman_if, uint8_t *dst_addr)
111 {
112         struct sk_buff *skb;
113         char *data;
114
115         skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
116         if (!skb)
117                 return;
118         data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
119         memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
120         /* pull back to the batman "network header" */
121         skb_pull(skb, sizeof(struct ethhdr));
122         send_skb_packet(skb, batman_if, dst_addr);
123 }
124
125 /* Send a packet to a given interface */
126 static void send_packet_to_if(struct forw_packet *forw_packet,
127                               struct batman_if *batman_if)
128 {
129         char *fwd_str;
130         uint8_t packet_num;
131         int16_t buff_pos;
132         struct batman_packet *batman_packet;
133
134         if (batman_if->if_active != IF_ACTIVE)
135                 return;
136
137         packet_num = buff_pos = 0;
138         batman_packet = (struct batman_packet *)
139                 (forw_packet->packet_buff);
140
141         /* adjust all flags and log packets */
142         while (aggregated_packet(buff_pos,
143                                  forw_packet->packet_len,
144                                  batman_packet->num_hna)) {
145
146                 /* we might have aggregated direct link packets with an
147                  * ordinary base packet */
148                 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
149                     (forw_packet->if_incoming == batman_if))
150                         batman_packet->flags |= DIRECTLINK;
151                 else
152                         batman_packet->flags &= ~DIRECTLINK;
153
154                 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
155                                                             "Sending own" :
156                                                             "Forwarding"));
157                 bat_dbg(DBG_BATMAN,
158                         "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n",
159                         fwd_str,
160                         (packet_num > 0 ? "aggregated " : ""),
161                         batman_packet->orig, ntohs(batman_packet->seqno),
162                         batman_packet->tq, batman_packet->ttl,
163                         (batman_packet->flags & DIRECTLINK ?
164                          "on" : "off"),
165                         batman_if->dev, batman_if->addr_str);
166
167                 buff_pos += sizeof(struct batman_packet) +
168                         (batman_packet->num_hna * ETH_ALEN);
169                 packet_num++;
170                 batman_packet = (struct batman_packet *)
171                         (forw_packet->packet_buff + buff_pos);
172         }
173
174         send_raw_packet(forw_packet->packet_buff,
175                         forw_packet->packet_len,
176                         batman_if, broadcastAddr);
177 }
178
179 /* send a batman packet */
180 static void send_packet(struct forw_packet *forw_packet)
181 {
182         struct batman_if *batman_if;
183         struct batman_packet *batman_packet =
184                 (struct batman_packet *)(forw_packet->packet_buff);
185         unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
186
187         if (!forw_packet->if_incoming) {
188                 printk(KERN_ERR "batman-adv: Error - can't forward packet: incoming iface not specified\n");
189                 return;
190         }
191
192         if (forw_packet->if_incoming->if_active != IF_ACTIVE)
193                 return;
194
195         /* multihomed peer assumed */
196         /* non-primary OGMs are only broadcasted on their interface */
197         if ((directlink && (batman_packet->ttl == 1)) ||
198             (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
199
200                 /* FIXME: what about aggregated packets ? */
201                 bat_dbg(DBG_BATMAN,
202                         "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%s]\n",
203                         (forw_packet->own ? "Sending own" : "Forwarding"),
204                         batman_packet->orig, ntohs(batman_packet->seqno),
205                         batman_packet->ttl, forw_packet->if_incoming->dev,
206                         forw_packet->if_incoming->addr_str);
207
208                 send_raw_packet(forw_packet->packet_buff,
209                                 forw_packet->packet_len,
210                                 forw_packet->if_incoming,
211                                 broadcastAddr);
212                 return;
213         }
214
215         /* broadcast on every interface */
216         rcu_read_lock();
217         list_for_each_entry_rcu(batman_if, &if_list, list)
218                 send_packet_to_if(forw_packet, batman_if);
219         rcu_read_unlock();
220 }
221
222 static void rebuild_batman_packet(struct batman_if *batman_if)
223 {
224         int new_len;
225         unsigned char *new_buff;
226         struct batman_packet *batman_packet;
227
228         new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
229         new_buff = kmalloc(new_len, GFP_ATOMIC);
230
231         /* keep old buffer if kmalloc should fail */
232         if (new_buff) {
233                 memcpy(new_buff, batman_if->packet_buff,
234                        sizeof(struct batman_packet));
235                 batman_packet = (struct batman_packet *)new_buff;
236
237                 batman_packet->num_hna = hna_local_fill_buffer(
238                         new_buff + sizeof(struct batman_packet),
239                         new_len - sizeof(struct batman_packet));
240
241                 kfree(batman_if->packet_buff);
242                 batman_if->packet_buff = new_buff;
243                 batman_if->packet_len = new_len;
244         }
245 }
246
247 void schedule_own_packet(struct batman_if *batman_if)
248 {
249         unsigned long send_time;
250         struct batman_packet *batman_packet;
251         int vis_server = atomic_read(&vis_mode);
252
253         /**
254          * the interface gets activated here to avoid race conditions between
255          * the moment of activating the interface in
256          * hardif_activate_interface() where the originator mac is set and
257          * outdated packets (especially uninitialized mac addresses) in the
258          * packet queue
259          */
260         if (batman_if->if_active == IF_TO_BE_ACTIVATED)
261                 batman_if->if_active = IF_ACTIVE;
262
263         /* if local hna has changed and interface is a primary interface */
264         if ((atomic_read(&hna_local_changed)) && (batman_if->if_num == 0))
265                 rebuild_batman_packet(batman_if);
266
267         /**
268          * NOTE: packet_buff might just have been re-allocated in
269          * rebuild_batman_packet()
270          */
271         batman_packet = (struct batman_packet *)batman_if->packet_buff;
272
273         /* change sequence number to network order */
274         batman_packet->seqno = htons((uint16_t)atomic_read(&batman_if->seqno));
275
276         if (vis_server == VIS_TYPE_SERVER_SYNC)
277                 batman_packet->flags = VIS_SERVER;
278         else
279                 batman_packet->flags = 0;
280
281         /* could be read by receive_bat_packet() */
282         atomic_inc(&batman_if->seqno);
283
284         slide_own_bcast_window(batman_if);
285         send_time = own_send_time();
286         add_bat_packet_to_list(batman_if->packet_buff,
287                                batman_if->packet_len, batman_if, 1, send_time);
288 }
289
290 void schedule_forward_packet(struct orig_node *orig_node,
291                              struct ethhdr *ethhdr,
292                              struct batman_packet *batman_packet,
293                              uint8_t directlink, int hna_buff_len,
294                              struct batman_if *if_incoming)
295 {
296         unsigned char in_tq, in_ttl, tq_avg = 0;
297         unsigned long send_time;
298
299         if (batman_packet->ttl <= 1) {
300                 bat_dbg(DBG_BATMAN, "ttl exceeded \n");
301                 return;
302         }
303
304         in_tq = batman_packet->tq;
305         in_ttl = batman_packet->ttl;
306
307         batman_packet->ttl--;
308         memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
309
310         /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
311          * of our best tq value */
312         if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
313
314                 /* rebroadcast ogm of best ranking neighbor as is */
315                 if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
316                         batman_packet->tq = orig_node->router->tq_avg;
317
318                         if (orig_node->router->last_ttl)
319                                 batman_packet->ttl = orig_node->router->last_ttl - 1;
320                 }
321
322                 tq_avg = orig_node->router->tq_avg;
323         }
324
325         /* apply hop penalty */
326         batman_packet->tq = hop_penalty(batman_packet->tq);
327
328         bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i \n",
329                 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
330                 batman_packet->ttl);
331
332         batman_packet->seqno = htons(batman_packet->seqno);
333
334         if (directlink)
335                 batman_packet->flags |= DIRECTLINK;
336         else
337                 batman_packet->flags &= ~DIRECTLINK;
338
339         send_time = forward_send_time();
340         add_bat_packet_to_list((unsigned char *)batman_packet,
341                                sizeof(struct batman_packet) + hna_buff_len,
342                                if_incoming, 0, send_time);
343 }
344
345 static void forw_packet_free(struct forw_packet *forw_packet)
346 {
347         if (forw_packet->skb)
348                 kfree_skb(forw_packet->skb);
349         kfree(forw_packet->packet_buff);
350         kfree(forw_packet);
351 }
352
353 static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
354                                       unsigned long send_time)
355 {
356         unsigned long flags;
357         INIT_HLIST_NODE(&forw_packet->list);
358
359         /* add new packet to packet list */
360         spin_lock_irqsave(&forw_bcast_list_lock, flags);
361         hlist_add_head(&forw_packet->list, &forw_bcast_list);
362         spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
363
364         /* start timer for this packet */
365         INIT_DELAYED_WORK(&forw_packet->delayed_work,
366                           send_outstanding_bcast_packet);
367         queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
368                            send_time);
369 }
370
371 void add_bcast_packet_to_list(struct sk_buff *skb)
372 {
373         struct forw_packet *forw_packet;
374
375         forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
376         if (!forw_packet)
377                 return;
378
379         skb = skb_copy(skb, GFP_ATOMIC);
380         if (!skb) {
381                 kfree(forw_packet);
382                 return;
383         }
384
385         skb_reset_mac_header(skb);
386
387         forw_packet->skb = skb;
388         forw_packet->packet_buff = NULL;
389
390         /* how often did we send the bcast packet ? */
391         forw_packet->num_packets = 0;
392
393         _add_bcast_packet_to_list(forw_packet, 1);
394 }
395
396 void send_outstanding_bcast_packet(struct work_struct *work)
397 {
398         struct batman_if *batman_if;
399         struct delayed_work *delayed_work =
400                 container_of(work, struct delayed_work, work);
401         struct forw_packet *forw_packet =
402                 container_of(delayed_work, struct forw_packet, delayed_work);
403         unsigned long flags;
404         struct sk_buff *skb1;
405
406         spin_lock_irqsave(&forw_bcast_list_lock, flags);
407         hlist_del(&forw_packet->list);
408         spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
409
410         /* rebroadcast packet */
411         rcu_read_lock();
412         list_for_each_entry_rcu(batman_if, &if_list, list) {
413                 /* send a copy of the saved skb */
414                 skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
415                 if (skb1)
416                         send_skb_packet(skb1,
417                                 batman_if, broadcastAddr);
418         }
419         rcu_read_unlock();
420
421         forw_packet->num_packets++;
422
423         /* if we still have some more bcasts to send and we are not shutting
424          * down */
425         if ((forw_packet->num_packets < 3) &&
426             (atomic_read(&module_state) != MODULE_DEACTIVATING))
427                 _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
428         else
429                 forw_packet_free(forw_packet);
430 }
431
432 void send_outstanding_bat_packet(struct work_struct *work)
433 {
434         struct delayed_work *delayed_work =
435                 container_of(work, struct delayed_work, work);
436         struct forw_packet *forw_packet =
437                 container_of(delayed_work, struct forw_packet, delayed_work);
438         unsigned long flags;
439
440         spin_lock_irqsave(&forw_bat_list_lock, flags);
441         hlist_del(&forw_packet->list);
442         spin_unlock_irqrestore(&forw_bat_list_lock, flags);
443
444         send_packet(forw_packet);
445
446         /**
447          * we have to have at least one packet in the queue
448          * to determine the queues wake up time unless we are
449          * shutting down
450          */
451         if ((forw_packet->own) &&
452             (atomic_read(&module_state) != MODULE_DEACTIVATING))
453                 schedule_own_packet(forw_packet->if_incoming);
454
455         forw_packet_free(forw_packet);
456 }
457
458 void purge_outstanding_packets(void)
459 {
460         struct forw_packet *forw_packet;
461         struct hlist_node *tmp_node, *safe_tmp_node;
462         unsigned long flags;
463
464         bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
465
466         /* free bcast list */
467         spin_lock_irqsave(&forw_bcast_list_lock, flags);
468         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
469                                   &forw_bcast_list, list) {
470
471                 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
472
473                 /**
474                  * send_outstanding_bcast_packet() will lock the list to
475                  * delete the item from the list
476                  */
477                 cancel_delayed_work_sync(&forw_packet->delayed_work);
478                 spin_lock_irqsave(&forw_bcast_list_lock, flags);
479         }
480         spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
481
482         /* free batman packet list */
483         spin_lock_irqsave(&forw_bat_list_lock, flags);
484         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
485                                   &forw_bat_list, list) {
486
487                 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
488
489                 /**
490                  * send_outstanding_bat_packet() will lock the list to
491                  * delete the item from the list
492                  */
493                 cancel_delayed_work_sync(&forw_packet->delayed_work);
494                 spin_lock_irqsave(&forw_bat_list_lock, flags);
495         }
496         spin_unlock_irqrestore(&forw_bat_list_lock, flags);
497 }