Merge branch 'for-3.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[sfrench/cifs-2.6.git] / drivers / staging / ozwpan / ozproto.c
1 /* -----------------------------------------------------------------------------
2  * Copyright (c) 2011 Ozmo Inc
3  * Released under the GNU General Public License Version 2 (GPLv2).
4  * -----------------------------------------------------------------------------
5  */
6
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/errno.h>
13 #include <linux/ieee80211.h>
14 #include "ozdbg.h"
15 #include "ozprotocol.h"
16 #include "ozeltbuf.h"
17 #include "ozpd.h"
18 #include "ozproto.h"
19 #include "ozusbsvc.h"
20
21 #include "ozappif.h"
22 #include <asm/unaligned.h>
23 #include <linux/uaccess.h>
24 #include <net/psnap.h>
25
26 #define OZ_CF_CONN_SUCCESS      1
27 #define OZ_CF_CONN_FAILURE      2
28
29 #define OZ_DO_STOP              1
30 #define OZ_DO_SLEEP             2
31
32 #define OZ_MAX_TIMER_POOL_SIZE  16
33
34 struct oz_binding {
35         struct packet_type ptype;
36         char name[OZ_MAX_BINDING_LEN];
37         struct list_head link;
38 };
39
40 /*
41  * Static external variables.
42  */
43 static DEFINE_SPINLOCK(g_polling_lock);
44 static LIST_HEAD(g_pd_list);
45 static LIST_HEAD(g_binding);
46 static DEFINE_SPINLOCK(g_binding_lock);
47 static struct sk_buff_head g_rx_queue;
48 static u8 g_session_id;
49 static u16 g_apps = 0x1;
50 static int g_processing_rx;
51
52 /*
53  * Context: softirq-serialized
54  */
55 static u8 oz_get_new_session_id(u8 exclude)
56 {
57         if (++g_session_id == 0)
58                 g_session_id = 1;
59         if (g_session_id == exclude) {
60                 if (++g_session_id == 0)
61                         g_session_id = 1;
62         }
63         return g_session_id;
64 }
65
66 /*
67  * Context: softirq-serialized
68  */
69 static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
70 {
71         struct sk_buff *skb;
72         struct net_device *dev = pd->net_dev;
73         struct oz_hdr *oz_hdr;
74         struct oz_elt *elt;
75         struct oz_elt_connect_rsp *body;
76
77         int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
78                         sizeof(struct oz_elt_connect_rsp);
79         skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
80         if (skb == NULL)
81                 return;
82         skb_reserve(skb, LL_RESERVED_SPACE(dev));
83         skb_reset_network_header(skb);
84         oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
85         elt = (struct oz_elt *)(oz_hdr+1);
86         body = (struct oz_elt_connect_rsp *)(elt+1);
87         skb->dev = dev;
88         skb->protocol = htons(OZ_ETHERTYPE);
89         /* Fill in device header */
90         if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
91                         dev->dev_addr, skb->len) < 0) {
92                 kfree_skb(skb);
93                 return;
94         }
95         oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
96         oz_hdr->last_pkt_num = 0;
97         put_unaligned(0, &oz_hdr->pkt_num);
98         elt->type = OZ_ELT_CONNECT_RSP;
99         elt->length = sizeof(struct oz_elt_connect_rsp);
100         memset(body, 0, sizeof(struct oz_elt_connect_rsp));
101         body->status = status;
102         if (status == 0) {
103                 body->mode = pd->mode;
104                 body->session_id = pd->session_id;
105                 put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
106         }
107         oz_dbg(ON, "TX: OZ_ELT_CONNECT_RSP %d", status);
108         dev_queue_xmit(skb);
109         return;
110 }
111
112 /*
113  * Context: softirq-serialized
114  */
115 static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
116 {
117         unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
118
119         switch (kalive & OZ_KALIVE_TYPE_MASK) {
120         case OZ_KALIVE_SPECIAL:
121                 pd->keep_alive = keep_alive * 1000*60*60*24*20;
122                 break;
123         case OZ_KALIVE_SECS:
124                 pd->keep_alive = keep_alive*1000;
125                 break;
126         case OZ_KALIVE_MINS:
127                 pd->keep_alive = keep_alive*1000*60;
128                 break;
129         case OZ_KALIVE_HOURS:
130                 pd->keep_alive = keep_alive*1000*60*60;
131                 break;
132         default:
133                 pd->keep_alive = 0;
134         }
135         oz_dbg(ON, "Keepalive = %lu mSec\n", pd->keep_alive);
136 }
137
138 /*
139  * Context: softirq-serialized
140  */
141 static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer)
142 {
143         if (presleep)
144                 pd->presleep = presleep*100;
145         else
146                 pd->presleep = OZ_PRESLEEP_TOUT;
147         if (start_timer) {
148                 spin_unlock(&g_polling_lock);
149                 oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
150                 spin_lock(&g_polling_lock);
151         }
152         oz_dbg(ON, "Presleep time = %lu mSec\n", pd->presleep);
153 }
154
155 /*
156  * Context: softirq-serialized
157  */
158 static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
159                         const u8 *pd_addr, struct net_device *net_dev)
160 {
161         struct oz_pd *pd;
162         struct oz_elt_connect_req *body =
163                         (struct oz_elt_connect_req *)(elt+1);
164         u8 rsp_status = OZ_STATUS_SUCCESS;
165         u8 stop_needed = 0;
166         u16 new_apps = g_apps;
167         struct net_device *old_net_dev = NULL;
168         struct oz_pd *free_pd = NULL;
169
170         if (cur_pd) {
171                 pd = cur_pd;
172                 spin_lock_bh(&g_polling_lock);
173         } else {
174                 struct oz_pd *pd2 = NULL;
175                 struct list_head *e;
176                 pd = oz_pd_alloc(pd_addr);
177                 if (pd == NULL)
178                         return NULL;
179                 getnstimeofday(&pd->last_rx_timestamp);
180                 spin_lock_bh(&g_polling_lock);
181                 list_for_each(e, &g_pd_list) {
182                         pd2 = container_of(e, struct oz_pd, link);
183                         if (ether_addr_equal(pd2->mac_addr, pd_addr)) {
184                                 free_pd = pd;
185                                 pd = pd2;
186                                 break;
187                         }
188                 }
189                 if (pd != pd2)
190                         list_add_tail(&pd->link, &g_pd_list);
191         }
192         if (pd == NULL) {
193                 spin_unlock_bh(&g_polling_lock);
194                 return NULL;
195         }
196         if (pd->net_dev != net_dev) {
197                 old_net_dev = pd->net_dev;
198                 dev_hold(net_dev);
199                 pd->net_dev = net_dev;
200         }
201         oz_dbg(ON, "Host vendor: %d\n", body->host_vendor);
202         pd->max_tx_size = OZ_MAX_TX_SIZE;
203         pd->mode = body->mode;
204         pd->pd_info = body->pd_info;
205         if (pd->mode & OZ_F_ISOC_NO_ELTS) {
206                 pd->ms_per_isoc = body->ms_per_isoc;
207                 if (!pd->ms_per_isoc)
208                         pd->ms_per_isoc = 4;
209
210                 switch (body->ms_isoc_latency & OZ_LATENCY_MASK) {
211                 case OZ_ONE_MS_LATENCY:
212                         pd->isoc_latency = (body->ms_isoc_latency &
213                                         ~OZ_LATENCY_MASK) / pd->ms_per_isoc;
214                         break;
215                 case OZ_TEN_MS_LATENCY:
216                         pd->isoc_latency = ((body->ms_isoc_latency &
217                                 ~OZ_LATENCY_MASK) * 10) / pd->ms_per_isoc;
218                         break;
219                 default:
220                         pd->isoc_latency = OZ_MAX_TX_QUEUE_ISOC;
221                 }
222         }
223         if (body->max_len_div16)
224                 pd->max_tx_size = ((u16)body->max_len_div16)<<4;
225         oz_dbg(ON, "Max frame:%u Ms per isoc:%u\n",
226                pd->max_tx_size, pd->ms_per_isoc);
227         pd->max_stream_buffering = 3*1024;
228         pd->pulse_period = OZ_QUANTUM;
229         pd_set_presleep(pd, body->presleep, 0);
230         pd_set_keepalive(pd, body->keep_alive);
231
232         new_apps &= le16_to_cpu(get_unaligned(&body->apps));
233         if ((new_apps & 0x1) && (body->session_id)) {
234                 if (pd->session_id) {
235                         if (pd->session_id != body->session_id) {
236                                 rsp_status = OZ_STATUS_SESSION_MISMATCH;
237                                 goto done;
238                         }
239                 } else {
240                         new_apps &= ~0x1;  /* Resume not permitted */
241                         pd->session_id =
242                                 oz_get_new_session_id(body->session_id);
243                 }
244         } else {
245                 if (pd->session_id && !body->session_id) {
246                         rsp_status = OZ_STATUS_SESSION_TEARDOWN;
247                         stop_needed = 1;
248                 } else {
249                         new_apps &= ~0x1;  /* Resume not permitted */
250                         pd->session_id =
251                                 oz_get_new_session_id(body->session_id);
252                 }
253         }
254 done:
255         if (rsp_status == OZ_STATUS_SUCCESS) {
256                 u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
257                 u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
258                 u16 resume_apps = new_apps & pd->paused_apps  & ~0x1;
259                 spin_unlock_bh(&g_polling_lock);
260                 oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
261                 oz_dbg(ON, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
262                        new_apps, pd->total_apps, pd->paused_apps);
263                 if (start_apps) {
264                         if (oz_services_start(pd, start_apps, 0))
265                                 rsp_status = OZ_STATUS_TOO_MANY_PDS;
266                 }
267                 if (resume_apps)
268                         if (oz_services_start(pd, resume_apps, 1))
269                                 rsp_status = OZ_STATUS_TOO_MANY_PDS;
270                 if (stop_apps)
271                         oz_services_stop(pd, stop_apps, 0);
272                 oz_pd_request_heartbeat(pd);
273         } else {
274                 spin_unlock_bh(&g_polling_lock);
275         }
276         oz_send_conn_rsp(pd, rsp_status);
277         if (rsp_status != OZ_STATUS_SUCCESS) {
278                 if (stop_needed)
279                         oz_pd_stop(pd);
280                 oz_pd_put(pd);
281                 pd = NULL;
282         }
283         if (old_net_dev)
284                 dev_put(old_net_dev);
285         if (free_pd)
286                 oz_pd_destroy(free_pd);
287         return pd;
288 }
289
290 /*
291  * Context: softirq-serialized
292  */
293 static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
294                         const u8 *report, u8 len)
295 {
296         struct oz_farewell *f;
297         struct oz_farewell *f2;
298         int found = 0;
299
300         f = kmalloc(sizeof(struct oz_farewell) + len, GFP_ATOMIC);
301         if (!f)
302                 return;
303         f->ep_num = ep_num;
304         f->index = index;
305         f->len = len;
306         memcpy(f->report, report, len);
307         oz_dbg(ON, "RX: Adding farewell report\n");
308         spin_lock(&g_polling_lock);
309         list_for_each_entry(f2, &pd->farewell_list, link) {
310                 if ((f2->ep_num == ep_num) && (f2->index == index)) {
311                         found = 1;
312                         list_del(&f2->link);
313                         break;
314                 }
315         }
316         list_add_tail(&f->link, &pd->farewell_list);
317         spin_unlock(&g_polling_lock);
318         if (found)
319                 kfree(f2);
320 }
321
322 /*
323  * Context: softirq-serialized
324  */
325 static void oz_rx_frame(struct sk_buff *skb)
326 {
327         u8 *mac_hdr;
328         u8 *src_addr;
329         struct oz_elt *elt;
330         int length;
331         struct oz_pd *pd = NULL;
332         struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
333         struct timespec current_time;
334         int dup = 0;
335         u32 pkt_num;
336
337         oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
338                oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
339         mac_hdr = skb_mac_header(skb);
340         src_addr = &mac_hdr[ETH_ALEN];
341         length = skb->len;
342
343         /* Check the version field */
344         if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
345                 oz_dbg(ON, "Incorrect protocol version: %d\n",
346                        oz_get_prot_ver(oz_hdr->control));
347                 goto done;
348         }
349
350         pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
351
352         pd = oz_pd_find(src_addr);
353         if (pd) {
354                 if (!(pd->state & OZ_PD_S_CONNECTED))
355                         oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
356                 getnstimeofday(&current_time);
357                 if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) ||
358                         (pd->presleep < MSEC_PER_SEC))  {
359                         oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
360                         pd->last_rx_timestamp = current_time;
361                 }
362                 if (pkt_num != pd->last_rx_pkt_num) {
363                         pd->last_rx_pkt_num = pkt_num;
364                 } else {
365                         dup = 1;
366                         oz_dbg(ON, "Duplicate frame\n");
367                 }
368         }
369
370         if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
371                 oz_dbg(RX_FRAMES, "Received TRIGGER Frame\n");
372                 pd->last_sent_frame = &pd->tx_queue;
373                 if (oz_hdr->control & OZ_F_ACK) {
374                         /* Retire completed frames */
375                         oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
376                 }
377                 if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
378                                 (pd->state == OZ_PD_S_CONNECTED)) {
379                         int backlog = pd->nb_queued_frames;
380                         pd->trigger_pkt_num = pkt_num;
381                         /* Send queued frames */
382                         oz_send_queued_frames(pd, backlog);
383                 }
384         }
385
386         length -= sizeof(struct oz_hdr);
387         elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
388
389         while (length >= sizeof(struct oz_elt)) {
390                 length -= sizeof(struct oz_elt) + elt->length;
391                 if (length < 0)
392                         break;
393                 switch (elt->type) {
394                 case OZ_ELT_CONNECT_REQ:
395                         oz_dbg(ON, "RX: OZ_ELT_CONNECT_REQ\n");
396                         pd = oz_connect_req(pd, elt, src_addr, skb->dev);
397                         break;
398                 case OZ_ELT_DISCONNECT:
399                         oz_dbg(ON, "RX: OZ_ELT_DISCONNECT\n");
400                         if (pd)
401                                 oz_pd_sleep(pd);
402                         break;
403                 case OZ_ELT_UPDATE_PARAM_REQ: {
404                                 struct oz_elt_update_param *body =
405                                         (struct oz_elt_update_param *)(elt + 1);
406                                 oz_dbg(ON, "RX: OZ_ELT_UPDATE_PARAM_REQ\n");
407                                 if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
408                                         spin_lock(&g_polling_lock);
409                                         pd_set_keepalive(pd, body->keepalive);
410                                         pd_set_presleep(pd, body->presleep, 1);
411                                         spin_unlock(&g_polling_lock);
412                                 }
413                         }
414                         break;
415                 case OZ_ELT_FAREWELL_REQ: {
416                                 struct oz_elt_farewell *body =
417                                         (struct oz_elt_farewell *)(elt + 1);
418                                 oz_dbg(ON, "RX: OZ_ELT_FAREWELL_REQ\n");
419                                 oz_add_farewell(pd, body->ep_num,
420                                         body->index, body->report,
421                                         elt->length + 1 - sizeof(*body));
422                         }
423                         break;
424                 case OZ_ELT_APP_DATA:
425                         if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
426                                 struct oz_app_hdr *app_hdr =
427                                         (struct oz_app_hdr *)(elt+1);
428                                 if (dup)
429                                         break;
430                                 oz_handle_app_elt(pd, app_hdr->app_id, elt);
431                         }
432                         break;
433                 default:
434                         oz_dbg(ON, "RX: Unknown elt %02x\n", elt->type);
435                 }
436                 elt = oz_next_elt(elt);
437         }
438 done:
439         if (pd)
440                 oz_pd_put(pd);
441         consume_skb(skb);
442 }
443
444 /*
445  * Context: process
446  */
447 void oz_protocol_term(void)
448 {
449         struct oz_binding *b, *t;
450
451         /* Walk the list of bindings and remove each one.
452          */
453         spin_lock_bh(&g_binding_lock);
454         list_for_each_entry_safe(b, t, &g_binding, link) {
455                 list_del(&b->link);
456                 spin_unlock_bh(&g_binding_lock);
457                 dev_remove_pack(&b->ptype);
458                 if (b->ptype.dev)
459                         dev_put(b->ptype.dev);
460                 kfree(b);
461                 spin_lock_bh(&g_binding_lock);
462         }
463         spin_unlock_bh(&g_binding_lock);
464         /* Walk the list of PDs and stop each one. This causes the PD to be
465          * removed from the list so we can just pull each one from the head
466          * of the list.
467          */
468         spin_lock_bh(&g_polling_lock);
469         while (!list_empty(&g_pd_list)) {
470                 struct oz_pd *pd =
471                         list_first_entry(&g_pd_list, struct oz_pd, link);
472                 oz_pd_get(pd);
473                 spin_unlock_bh(&g_polling_lock);
474                 oz_pd_stop(pd);
475                 oz_pd_put(pd);
476                 spin_lock_bh(&g_polling_lock);
477         }
478         spin_unlock_bh(&g_polling_lock);
479         oz_dbg(ON, "Protocol stopped\n");
480 }
481
482 /*
483  * Context: softirq
484  */
485 void oz_pd_heartbeat_handler(unsigned long data)
486 {
487         struct oz_pd *pd = (struct oz_pd *)data;
488         u16 apps = 0;
489
490         spin_lock_bh(&g_polling_lock);
491         if (pd->state & OZ_PD_S_CONNECTED)
492                 apps = pd->total_apps;
493         spin_unlock_bh(&g_polling_lock);
494         if (apps)
495                 oz_pd_heartbeat(pd, apps);
496         oz_pd_put(pd);
497 }
498
499 /*
500  * Context: softirq
501  */
502 void oz_pd_timeout_handler(unsigned long data)
503 {
504         int type;
505         struct oz_pd *pd = (struct oz_pd *)data;
506
507         spin_lock_bh(&g_polling_lock);
508         type = pd->timeout_type;
509         spin_unlock_bh(&g_polling_lock);
510         switch (type) {
511         case OZ_TIMER_TOUT:
512                 oz_pd_sleep(pd);
513                 break;
514         case OZ_TIMER_STOP:
515                 oz_pd_stop(pd);
516                 break;
517         }
518         oz_pd_put(pd);
519 }
520
521 /*
522  * Context: Interrupt
523  */
524 enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer)
525 {
526         struct oz_pd *pd;
527
528         pd = container_of(timer, struct oz_pd, heartbeat);
529         hrtimer_forward_now(timer, ktime_set(pd->pulse_period /
530         MSEC_PER_SEC, (pd->pulse_period % MSEC_PER_SEC) * NSEC_PER_MSEC));
531         oz_pd_get(pd);
532         tasklet_schedule(&pd->heartbeat_tasklet);
533         return HRTIMER_RESTART;
534 }
535
536 /*
537  * Context: Interrupt
538  */
539 enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer)
540 {
541         struct oz_pd *pd;
542
543         pd = container_of(timer, struct oz_pd, timeout);
544         oz_pd_get(pd);
545         tasklet_schedule(&pd->timeout_tasklet);
546         return HRTIMER_NORESTART;
547 }
548
549 /*
550  * Context: softirq or process
551  */
552 void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time)
553 {
554         spin_lock_bh(&g_polling_lock);
555         switch (type) {
556         case OZ_TIMER_TOUT:
557         case OZ_TIMER_STOP:
558                 if (hrtimer_active(&pd->timeout)) {
559                         hrtimer_set_expires(&pd->timeout, ktime_set(due_time /
560                         MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
561                                                         NSEC_PER_MSEC));
562                         hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL);
563                 } else {
564                         hrtimer_start(&pd->timeout, ktime_set(due_time /
565                         MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
566                                         NSEC_PER_MSEC), HRTIMER_MODE_REL);
567                 }
568                 pd->timeout_type = type;
569                 break;
570         case OZ_TIMER_HEARTBEAT:
571                 if (!hrtimer_active(&pd->heartbeat))
572                         hrtimer_start(&pd->heartbeat, ktime_set(due_time /
573                         MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
574                                         NSEC_PER_MSEC), HRTIMER_MODE_REL);
575                 break;
576         }
577         spin_unlock_bh(&g_polling_lock);
578 }
579
580 /*
581  * Context: softirq or process
582  */
583 void oz_pd_request_heartbeat(struct oz_pd *pd)
584 {
585         oz_timer_add(pd, OZ_TIMER_HEARTBEAT, pd->pulse_period > 0 ?
586                                         pd->pulse_period : OZ_QUANTUM);
587 }
588
589 /*
590  * Context: softirq or process
591  */
592 struct oz_pd *oz_pd_find(const u8 *mac_addr)
593 {
594         struct oz_pd *pd;
595         struct list_head *e;
596
597         spin_lock_bh(&g_polling_lock);
598         list_for_each(e, &g_pd_list) {
599                 pd = container_of(e, struct oz_pd, link);
600                 if (ether_addr_equal(pd->mac_addr, mac_addr)) {
601                         atomic_inc(&pd->ref_count);
602                         spin_unlock_bh(&g_polling_lock);
603                         return pd;
604                 }
605         }
606         spin_unlock_bh(&g_polling_lock);
607         return NULL;
608 }
609
610 /*
611  * Context: process
612  */
613 void oz_app_enable(int app_id, int enable)
614 {
615         if (app_id <= OZ_APPID_MAX) {
616                 spin_lock_bh(&g_polling_lock);
617                 if (enable)
618                         g_apps |= (1<<app_id);
619                 else
620                         g_apps &= ~(1<<app_id);
621                 spin_unlock_bh(&g_polling_lock);
622         }
623 }
624
625 /*
626  * Context: softirq
627  */
628 static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
629                 struct packet_type *pt, struct net_device *orig_dev)
630 {
631         skb = skb_share_check(skb, GFP_ATOMIC);
632         if (skb == NULL)
633                 return 0;
634         spin_lock_bh(&g_rx_queue.lock);
635         if (g_processing_rx) {
636                 /* We already hold the lock so use __ variant.
637                  */
638                 __skb_queue_head(&g_rx_queue, skb);
639                 spin_unlock_bh(&g_rx_queue.lock);
640         } else {
641                 g_processing_rx = 1;
642                 do {
643
644                         spin_unlock_bh(&g_rx_queue.lock);
645                         oz_rx_frame(skb);
646                         spin_lock_bh(&g_rx_queue.lock);
647                         if (skb_queue_empty(&g_rx_queue)) {
648                                 g_processing_rx = 0;
649                                 spin_unlock_bh(&g_rx_queue.lock);
650                                 break;
651                         }
652                         /* We already hold the lock so use __ variant.
653                          */
654                         skb = __skb_dequeue(&g_rx_queue);
655                 } while (1);
656         }
657         return 0;
658 }
659
660 /*
661  * Context: process
662  */
663 void oz_binding_add(const char *net_dev)
664 {
665         struct oz_binding *binding;
666
667         binding = kmalloc(sizeof(struct oz_binding), GFP_KERNEL);
668         if (binding) {
669                 binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
670                 binding->ptype.func = oz_pkt_recv;
671                 if (net_dev && *net_dev) {
672                         memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
673                         oz_dbg(ON, "Adding binding: %s\n", net_dev);
674                         binding->ptype.dev =
675                                 dev_get_by_name(&init_net, net_dev);
676                         if (binding->ptype.dev == NULL) {
677                                 oz_dbg(ON, "Netdev %s not found\n", net_dev);
678                                 kfree(binding);
679                                 binding = NULL;
680                         }
681                 } else {
682                         oz_dbg(ON, "Binding to all netcards\n");
683                         memset(binding->name, 0, OZ_MAX_BINDING_LEN);
684                         binding->ptype.dev = NULL;
685                 }
686                 if (binding) {
687                         dev_add_pack(&binding->ptype);
688                         spin_lock_bh(&g_binding_lock);
689                         list_add_tail(&binding->link, &g_binding);
690                         spin_unlock_bh(&g_binding_lock);
691                 }
692         }
693 }
694
695 /*
696  * Context: process
697  */
698 static void pd_stop_all_for_device(struct net_device *net_dev)
699 {
700         struct list_head h;
701         struct oz_pd *pd;
702         struct oz_pd *n;
703
704         INIT_LIST_HEAD(&h);
705         spin_lock_bh(&g_polling_lock);
706         list_for_each_entry_safe(pd, n, &g_pd_list, link) {
707                 if (pd->net_dev == net_dev) {
708                         list_move(&pd->link, &h);
709                         oz_pd_get(pd);
710                 }
711         }
712         spin_unlock_bh(&g_polling_lock);
713         while (!list_empty(&h)) {
714                 pd = list_first_entry(&h, struct oz_pd, link);
715                 oz_pd_stop(pd);
716                 oz_pd_put(pd);
717         }
718 }
719
720 /*
721  * Context: process
722  */
723 void oz_binding_remove(const char *net_dev)
724 {
725         struct oz_binding *binding;
726         int found = 0;
727
728         oz_dbg(ON, "Removing binding: %s\n", net_dev);
729         spin_lock_bh(&g_binding_lock);
730         list_for_each_entry(binding, &g_binding, link) {
731                 if (strncmp(binding->name, net_dev, OZ_MAX_BINDING_LEN) == 0) {
732                         oz_dbg(ON, "Binding '%s' found\n", net_dev);
733                         found = 1;
734                         break;
735                 }
736         }
737         spin_unlock_bh(&g_binding_lock);
738         if (found) {
739                 dev_remove_pack(&binding->ptype);
740                 if (binding->ptype.dev) {
741                         dev_put(binding->ptype.dev);
742                         pd_stop_all_for_device(binding->ptype.dev);
743                 }
744                 list_del(&binding->link);
745                 kfree(binding);
746         }
747 }
748
749 /*
750  * Context: process
751  */
752 static char *oz_get_next_device_name(char *s, char *dname, int max_size)
753 {
754         while (*s == ',')
755                 s++;
756         while (*s && (*s != ',') && max_size > 1) {
757                 *dname++ = *s++;
758                 max_size--;
759         }
760         *dname = 0;
761         return s;
762 }
763
764 /*
765  * Context: process
766  */
767 int oz_protocol_init(char *devs)
768 {
769         skb_queue_head_init(&g_rx_queue);
770         if (devs && (devs[0] == '*')) {
771                 oz_binding_add(NULL);
772         } else {
773                 char d[32];
774                 while (*devs) {
775                         devs = oz_get_next_device_name(devs, d, sizeof(d));
776                         if (d[0])
777                                 oz_binding_add(d);
778                 }
779         }
780         return 0;
781 }
782
783 /*
784  * Context: process
785  */
786 int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
787 {
788         struct oz_pd *pd;
789         struct list_head *e;
790         int count = 0;
791
792         spin_lock_bh(&g_polling_lock);
793         list_for_each(e, &g_pd_list) {
794                 if (count >= max_count)
795                         break;
796                 pd = container_of(e, struct oz_pd, link);
797                 memcpy(&addr[count++], pd->mac_addr, ETH_ALEN);
798         }
799         spin_unlock_bh(&g_polling_lock);
800         return count;
801 }
802
803 void oz_polling_lock_bh(void)
804 {
805         spin_lock_bh(&g_polling_lock);
806 }
807
808 void oz_polling_unlock_bh(void)
809 {
810         spin_unlock_bh(&g_polling_lock);
811 }