Merge branch 'next' into for-linus
[sfrench/cifs-2.6.git] / drivers / net / caif / caif_hsi.c
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4  * Author:  Daniel Martensson / daniel.martensson@stericsson.com
5  *          Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
6  * License terms: GNU General Public License (GPL) version 2.
7  */
8
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/device.h>
12 #include <linux/platform_device.h>
13 #include <linux/netdevice.h>
14 #include <linux/string.h>
15 #include <linux/list.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/sched.h>
19 #include <linux/if_arp.h>
20 #include <linux/timer.h>
21 #include <linux/rtnetlink.h>
22 #include <net/caif/caif_layer.h>
23 #include <net/caif/caif_hsi.h>
24
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
27 MODULE_DESCRIPTION("CAIF HSI driver");
28
29 /* Returns the number of padding bytes for alignment. */
30 #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
31                                 (((pow)-((x)&((pow)-1)))))
32
33 static int inactivity_timeout = 1000;
34 module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
35 MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
36
37 /*
38  * HSI padding options.
39  * Warning: must be a base of 2 (& operation used) and can not be zero !
40  */
41 static int hsi_head_align = 4;
42 module_param(hsi_head_align, int, S_IRUGO);
43 MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
44
45 static int hsi_tail_align = 4;
46 module_param(hsi_tail_align, int, S_IRUGO);
47 MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
48
49 /*
50  * HSI link layer flowcontrol thresholds.
51  * Warning: A high threshold value migth increase throughput but it will at
52  * the same time prevent channel prioritization and increase the risk of
53  * flooding the modem. The high threshold should be above the low.
54  */
55 static int hsi_high_threshold = 100;
56 module_param(hsi_high_threshold, int, S_IRUGO);
57 MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
58
59 static int hsi_low_threshold = 50;
60 module_param(hsi_low_threshold, int, S_IRUGO);
61 MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
62
63 #define ON 1
64 #define OFF 0
65
66 /*
67  * Threshold values for the HSI packet queue. Flowcontrol will be asserted
68  * when the number of packets exceeds HIGH_WATER_MARK. It will not be
69  * de-asserted before the number of packets drops below LOW_WATER_MARK.
70  */
71 #define LOW_WATER_MARK   hsi_low_threshold
72 #define HIGH_WATER_MARK  hsi_high_threshold
73
74 static LIST_HEAD(cfhsi_list);
75 static spinlock_t cfhsi_list_lock;
76
77 static void cfhsi_inactivity_tout(unsigned long arg)
78 {
79         struct cfhsi *cfhsi = (struct cfhsi *)arg;
80
81         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
82                 __func__);
83
84         /* Schedule power down work queue. */
85         if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
86                 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
87 }
88
89 static void cfhsi_abort_tx(struct cfhsi *cfhsi)
90 {
91         struct sk_buff *skb;
92
93         for (;;) {
94                 spin_lock_bh(&cfhsi->lock);
95                 skb = skb_dequeue(&cfhsi->qhead);
96                 if (!skb)
97                         break;
98
99                 cfhsi->ndev->stats.tx_errors++;
100                 cfhsi->ndev->stats.tx_dropped++;
101                 spin_unlock_bh(&cfhsi->lock);
102                 kfree_skb(skb);
103         }
104         cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
105         if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
106                 mod_timer(&cfhsi->timer,
107                         jiffies + cfhsi->inactivity_timeout);
108         spin_unlock_bh(&cfhsi->lock);
109 }
110
111 static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
112 {
113         char buffer[32]; /* Any reasonable value */
114         size_t fifo_occupancy;
115         int ret;
116
117         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
118                 __func__);
119
120         do {
121                 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
122                                 &fifo_occupancy);
123                 if (ret) {
124                         dev_warn(&cfhsi->ndev->dev,
125                                 "%s: can't get FIFO occupancy: %d.\n",
126                                 __func__, ret);
127                         break;
128                 } else if (!fifo_occupancy)
129                         /* No more data, exitting normally */
130                         break;
131
132                 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
133                 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
134                 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
135                                 cfhsi->dev);
136                 if (ret) {
137                         clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
138                         dev_warn(&cfhsi->ndev->dev,
139                                 "%s: can't read data: %d.\n",
140                                 __func__, ret);
141                         break;
142                 }
143
144                 ret = 5 * HZ;
145                 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
146                          !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
147
148                 if (ret < 0) {
149                         dev_warn(&cfhsi->ndev->dev,
150                                 "%s: can't wait for flush complete: %d.\n",
151                                 __func__, ret);
152                         break;
153                 } else if (!ret) {
154                         ret = -ETIMEDOUT;
155                         dev_warn(&cfhsi->ndev->dev,
156                                 "%s: timeout waiting for flush complete.\n",
157                                 __func__);
158                         break;
159                 }
160         } while (1);
161
162         return ret;
163 }
164
165 static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
166 {
167         int nfrms = 0;
168         int pld_len = 0;
169         struct sk_buff *skb;
170         u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
171
172         skb = skb_dequeue(&cfhsi->qhead);
173         if (!skb)
174                 return 0;
175
176         /* Clear offset. */
177         desc->offset = 0;
178
179         /* Check if we can embed a CAIF frame. */
180         if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
181                 struct caif_payload_info *info;
182                 int hpad = 0;
183                 int tpad = 0;
184
185                 /* Calculate needed head alignment and tail alignment. */
186                 info = (struct caif_payload_info *)&skb->cb;
187
188                 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
189                 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
190
191                 /* Check if frame still fits with added alignment. */
192                 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
193                         u8 *pemb = desc->emb_frm;
194                         desc->offset = CFHSI_DESC_SHORT_SZ;
195                         *pemb = (u8)(hpad - 1);
196                         pemb += hpad;
197
198                         /* Update network statistics. */
199                         cfhsi->ndev->stats.tx_packets++;
200                         cfhsi->ndev->stats.tx_bytes += skb->len;
201
202                         /* Copy in embedded CAIF frame. */
203                         skb_copy_bits(skb, 0, pemb, skb->len);
204                         consume_skb(skb);
205                         skb = NULL;
206                 }
207         }
208
209         /* Create payload CAIF frames. */
210         pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
211         while (nfrms < CFHSI_MAX_PKTS) {
212                 struct caif_payload_info *info;
213                 int hpad = 0;
214                 int tpad = 0;
215
216                 if (!skb)
217                         skb = skb_dequeue(&cfhsi->qhead);
218
219                 if (!skb)
220                         break;
221
222                 /* Calculate needed head alignment and tail alignment. */
223                 info = (struct caif_payload_info *)&skb->cb;
224
225                 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
226                 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
227
228                 /* Fill in CAIF frame length in descriptor. */
229                 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
230
231                 /* Fill head padding information. */
232                 *pfrm = (u8)(hpad - 1);
233                 pfrm += hpad;
234
235                 /* Update network statistics. */
236                 cfhsi->ndev->stats.tx_packets++;
237                 cfhsi->ndev->stats.tx_bytes += skb->len;
238
239                 /* Copy in CAIF frame. */
240                 skb_copy_bits(skb, 0, pfrm, skb->len);
241
242                 /* Update payload length. */
243                 pld_len += desc->cffrm_len[nfrms];
244
245                 /* Update frame pointer. */
246                 pfrm += skb->len + tpad;
247                 consume_skb(skb);
248                 skb = NULL;
249
250                 /* Update number of frames. */
251                 nfrms++;
252         }
253
254         /* Unused length fields should be zero-filled (according to SPEC). */
255         while (nfrms < CFHSI_MAX_PKTS) {
256                 desc->cffrm_len[nfrms] = 0x0000;
257                 nfrms++;
258         }
259
260         /* Check if we can piggy-back another descriptor. */
261         skb = skb_peek(&cfhsi->qhead);
262         if (skb)
263                 desc->header |= CFHSI_PIGGY_DESC;
264         else
265                 desc->header &= ~CFHSI_PIGGY_DESC;
266
267         return CFHSI_DESC_SZ + pld_len;
268 }
269
270 static void cfhsi_tx_done(struct cfhsi *cfhsi)
271 {
272         struct cfhsi_desc *desc = NULL;
273         int len = 0;
274         int res;
275
276         dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
277
278         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
279                 return;
280
281         desc = (struct cfhsi_desc *)cfhsi->tx_buf;
282
283         do {
284                 /*
285                  * Send flow on if flow off has been previously signalled
286                  * and number of packets is below low water mark.
287                  */
288                 spin_lock_bh(&cfhsi->lock);
289                 if (cfhsi->flow_off_sent &&
290                                 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
291                                 cfhsi->cfdev.flowctrl) {
292
293                         cfhsi->flow_off_sent = 0;
294                         cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
295                 }
296                 spin_unlock_bh(&cfhsi->lock);
297
298                 /* Create HSI frame. */
299                 do {
300                         len = cfhsi_tx_frm(desc, cfhsi);
301                         if (!len) {
302                                 spin_lock_bh(&cfhsi->lock);
303                                 if (unlikely(skb_peek(&cfhsi->qhead))) {
304                                         spin_unlock_bh(&cfhsi->lock);
305                                         continue;
306                                 }
307                                 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
308                                 /* Start inactivity timer. */
309                                 mod_timer(&cfhsi->timer,
310                                         jiffies + cfhsi->inactivity_timeout);
311                                 spin_unlock_bh(&cfhsi->lock);
312                                 goto done;
313                         }
314                 } while (!len);
315
316                 /* Set up new transfer. */
317                 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
318                 if (WARN_ON(res < 0)) {
319                         dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
320                                 __func__, res);
321                 }
322         } while (res < 0);
323
324 done:
325         return;
326 }
327
328 static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
329 {
330         struct cfhsi *cfhsi;
331
332         cfhsi = container_of(drv, struct cfhsi, drv);
333         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
334                 __func__);
335
336         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
337                 return;
338         cfhsi_tx_done(cfhsi);
339 }
340
341 static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
342 {
343         int xfer_sz = 0;
344         int nfrms = 0;
345         u16 *plen = NULL;
346         u8 *pfrm = NULL;
347
348         if ((desc->header & ~CFHSI_PIGGY_DESC) ||
349                         (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
350                 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
351                         __func__);
352                 return -EPROTO;
353         }
354
355         /* Check for embedded CAIF frame. */
356         if (desc->offset) {
357                 struct sk_buff *skb;
358                 u8 *dst = NULL;
359                 int len = 0;
360                 pfrm = ((u8 *)desc) + desc->offset;
361
362                 /* Remove offset padding. */
363                 pfrm += *pfrm + 1;
364
365                 /* Read length of CAIF frame (little endian). */
366                 len = *pfrm;
367                 len |= ((*(pfrm+1)) << 8) & 0xFF00;
368                 len += 2;       /* Add FCS fields. */
369
370                 /* Sanity check length of CAIF frame. */
371                 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
372                         dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
373                                 __func__);
374                         return -EPROTO;
375                 }
376
377                 /* Allocate SKB (OK even in IRQ context). */
378                 skb = alloc_skb(len + 1, GFP_ATOMIC);
379                 if (!skb) {
380                         dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
381                                 __func__);
382                         return -ENOMEM;
383                 }
384                 caif_assert(skb != NULL);
385
386                 dst = skb_put(skb, len);
387                 memcpy(dst, pfrm, len);
388
389                 skb->protocol = htons(ETH_P_CAIF);
390                 skb_reset_mac_header(skb);
391                 skb->dev = cfhsi->ndev;
392
393                 /*
394                  * We are called from a arch specific platform device.
395                  * Unfortunately we don't know what context we're
396                  * running in.
397                  */
398                 if (in_interrupt())
399                         netif_rx(skb);
400                 else
401                         netif_rx_ni(skb);
402
403                 /* Update network statistics. */
404                 cfhsi->ndev->stats.rx_packets++;
405                 cfhsi->ndev->stats.rx_bytes += len;
406         }
407
408         /* Calculate transfer length. */
409         plen = desc->cffrm_len;
410         while (nfrms < CFHSI_MAX_PKTS && *plen) {
411                 xfer_sz += *plen;
412                 plen++;
413                 nfrms++;
414         }
415
416         /* Check for piggy-backed descriptor. */
417         if (desc->header & CFHSI_PIGGY_DESC)
418                 xfer_sz += CFHSI_DESC_SZ;
419
420         if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
421                 dev_err(&cfhsi->ndev->dev,
422                                 "%s: Invalid payload len: %d, ignored.\n",
423                         __func__, xfer_sz);
424                 return -EPROTO;
425         }
426         return xfer_sz;
427 }
428
429 static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
430 {
431         int xfer_sz = 0;
432         int nfrms = 0;
433         u16 *plen;
434
435         if ((desc->header & ~CFHSI_PIGGY_DESC) ||
436                         (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
437
438                 pr_err("Invalid descriptor. %x %x\n", desc->header,
439                                 desc->offset);
440                 return -EPROTO;
441         }
442
443         /* Calculate transfer length. */
444         plen = desc->cffrm_len;
445         while (nfrms < CFHSI_MAX_PKTS && *plen) {
446                 xfer_sz += *plen;
447                 plen++;
448                 nfrms++;
449         }
450
451         if (xfer_sz % 4) {
452                 pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
453                 return -EPROTO;
454         }
455         return xfer_sz;
456 }
457
458 static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
459 {
460         int rx_sz = 0;
461         int nfrms = 0;
462         u16 *plen = NULL;
463         u8 *pfrm = NULL;
464
465         /* Sanity check header and offset. */
466         if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
467                         (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
468                 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
469                         __func__);
470                 return -EPROTO;
471         }
472
473         /* Set frame pointer to start of payload. */
474         pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
475         plen = desc->cffrm_len;
476
477         /* Skip already processed frames. */
478         while (nfrms < cfhsi->rx_state.nfrms) {
479                 pfrm += *plen;
480                 rx_sz += *plen;
481                 plen++;
482                 nfrms++;
483         }
484
485         /* Parse payload. */
486         while (nfrms < CFHSI_MAX_PKTS && *plen) {
487                 struct sk_buff *skb;
488                 u8 *dst = NULL;
489                 u8 *pcffrm = NULL;
490                 int len = 0;
491
492                 /* CAIF frame starts after head padding. */
493                 pcffrm = pfrm + *pfrm + 1;
494
495                 /* Read length of CAIF frame (little endian). */
496                 len = *pcffrm;
497                 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
498                 len += 2;       /* Add FCS fields. */
499
500                 /* Sanity check length of CAIF frames. */
501                 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
502                         dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
503                                 __func__);
504                         return -EPROTO;
505                 }
506
507                 /* Allocate SKB (OK even in IRQ context). */
508                 skb = alloc_skb(len + 1, GFP_ATOMIC);
509                 if (!skb) {
510                         dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
511                                 __func__);
512                         cfhsi->rx_state.nfrms = nfrms;
513                         return -ENOMEM;
514                 }
515                 caif_assert(skb != NULL);
516
517                 dst = skb_put(skb, len);
518                 memcpy(dst, pcffrm, len);
519
520                 skb->protocol = htons(ETH_P_CAIF);
521                 skb_reset_mac_header(skb);
522                 skb->dev = cfhsi->ndev;
523
524                 /*
525                  * We're called from a platform device,
526                  * and don't know the context we're running in.
527                  */
528                 if (in_interrupt())
529                         netif_rx(skb);
530                 else
531                         netif_rx_ni(skb);
532
533                 /* Update network statistics. */
534                 cfhsi->ndev->stats.rx_packets++;
535                 cfhsi->ndev->stats.rx_bytes += len;
536
537                 pfrm += *plen;
538                 rx_sz += *plen;
539                 plen++;
540                 nfrms++;
541         }
542
543         return rx_sz;
544 }
545
546 static void cfhsi_rx_done(struct cfhsi *cfhsi)
547 {
548         int res;
549         int desc_pld_len = 0, rx_len, rx_state;
550         struct cfhsi_desc *desc = NULL;
551         u8 *rx_ptr, *rx_buf;
552         struct cfhsi_desc *piggy_desc = NULL;
553
554         desc = (struct cfhsi_desc *)cfhsi->rx_buf;
555
556         dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
557
558         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
559                 return;
560
561         /* Update inactivity timer if pending. */
562         spin_lock_bh(&cfhsi->lock);
563         mod_timer_pending(&cfhsi->timer,
564                         jiffies + cfhsi->inactivity_timeout);
565         spin_unlock_bh(&cfhsi->lock);
566
567         if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
568                 desc_pld_len = cfhsi_rx_desc_len(desc);
569
570                 if (desc_pld_len < 0)
571                         goto out_of_sync;
572
573                 rx_buf = cfhsi->rx_buf;
574                 rx_len = desc_pld_len;
575                 if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
576                         rx_len += CFHSI_DESC_SZ;
577                 if (desc_pld_len == 0)
578                         rx_buf = cfhsi->rx_flip_buf;
579         } else {
580                 rx_buf = cfhsi->rx_flip_buf;
581
582                 rx_len = CFHSI_DESC_SZ;
583                 if (cfhsi->rx_state.pld_len > 0 &&
584                                 (desc->header & CFHSI_PIGGY_DESC)) {
585
586                         piggy_desc = (struct cfhsi_desc *)
587                                 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
588                                                 cfhsi->rx_state.pld_len);
589
590                         cfhsi->rx_state.piggy_desc = true;
591
592                         /* Extract payload len from piggy-backed descriptor. */
593                         desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
594                         if (desc_pld_len < 0)
595                                 goto out_of_sync;
596
597                         if (desc_pld_len > 0)
598                                 rx_len = desc_pld_len;
599
600                         if (desc_pld_len > 0 &&
601                                         (piggy_desc->header & CFHSI_PIGGY_DESC))
602                                 rx_len += CFHSI_DESC_SZ;
603
604                         /*
605                          * Copy needed information from the piggy-backed
606                          * descriptor to the descriptor in the start.
607                          */
608                         memcpy(rx_buf, (u8 *)piggy_desc,
609                                         CFHSI_DESC_SHORT_SZ);
610                         /* Mark no embedded frame here */
611                         piggy_desc->offset = 0;
612                         if (desc_pld_len == -EPROTO)
613                                 goto out_of_sync;
614                 }
615         }
616
617         if (desc_pld_len) {
618                 rx_state = CFHSI_RX_STATE_PAYLOAD;
619                 rx_ptr = rx_buf + CFHSI_DESC_SZ;
620         } else {
621                 rx_state = CFHSI_RX_STATE_DESC;
622                 rx_ptr = rx_buf;
623                 rx_len = CFHSI_DESC_SZ;
624         }
625
626         /* Initiate next read */
627         if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
628                 /* Set up new transfer. */
629                 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
630                                 __func__);
631
632                 res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len,
633                                 cfhsi->dev);
634                 if (WARN_ON(res < 0)) {
635                         dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
636                                 __func__, res);
637                         cfhsi->ndev->stats.rx_errors++;
638                         cfhsi->ndev->stats.rx_dropped++;
639                 }
640         }
641
642         if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
643                 /* Extract payload from descriptor */
644                 if (cfhsi_rx_desc(desc, cfhsi) < 0)
645                         goto out_of_sync;
646         } else {
647                 /* Extract payload */
648                 if (cfhsi_rx_pld(desc, cfhsi) < 0)
649                         goto out_of_sync;
650                 if (piggy_desc) {
651                         /* Extract any payload in piggyback descriptor. */
652                         if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
653                                 goto out_of_sync;
654                 }
655         }
656
657         /* Update state info */
658         memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
659         cfhsi->rx_state.state = rx_state;
660         cfhsi->rx_ptr = rx_ptr;
661         cfhsi->rx_len = rx_len;
662         cfhsi->rx_state.pld_len = desc_pld_len;
663         cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
664
665         if (rx_buf != cfhsi->rx_buf)
666                 swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
667         return;
668
669 out_of_sync:
670         dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
671         print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
672                         cfhsi->rx_buf, CFHSI_DESC_SZ);
673         schedule_work(&cfhsi->out_of_sync_work);
674 }
675
676 static void cfhsi_rx_slowpath(unsigned long arg)
677 {
678         struct cfhsi *cfhsi = (struct cfhsi *)arg;
679
680         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
681                 __func__);
682
683         cfhsi_rx_done(cfhsi);
684 }
685
686 static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
687 {
688         struct cfhsi *cfhsi;
689
690         cfhsi = container_of(drv, struct cfhsi, drv);
691         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
692                 __func__);
693
694         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
695                 return;
696
697         if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
698                 wake_up_interruptible(&cfhsi->flush_fifo_wait);
699         else
700                 cfhsi_rx_done(cfhsi);
701 }
702
703 static void cfhsi_wake_up(struct work_struct *work)
704 {
705         struct cfhsi *cfhsi = NULL;
706         int res;
707         int len;
708         long ret;
709
710         cfhsi = container_of(work, struct cfhsi, wake_up_work);
711
712         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
713                 return;
714
715         if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
716                 /* It happenes when wakeup is requested by
717                  * both ends at the same time. */
718                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
719                 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
720                 return;
721         }
722
723         /* Activate wake line. */
724         cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
725
726         dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
727                 __func__);
728
729         /* Wait for acknowledge. */
730         ret = CFHSI_WAKE_TOUT;
731         ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
732                                         test_and_clear_bit(CFHSI_WAKE_UP_ACK,
733                                                         &cfhsi->bits), ret);
734         if (unlikely(ret < 0)) {
735                 /* Interrupted by signal. */
736                 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
737                         __func__, ret);
738
739                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
740                 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
741                 return;
742         } else if (!ret) {
743                 bool ca_wake = false;
744                 size_t fifo_occupancy = 0;
745
746                 /* Wakeup timeout */
747                 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
748                         __func__);
749
750                 /* Check FIFO to check if modem has sent something. */
751                 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
752                                         &fifo_occupancy));
753
754                 dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
755                                 __func__, (unsigned) fifo_occupancy);
756
757                 /* Check if we misssed the interrupt. */
758                 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
759                                                         &ca_wake));
760
761                 if (ca_wake) {
762                         dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
763                                 __func__);
764
765                         /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
766                         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
767
768                         /* Continue execution. */
769                         goto wake_ack;
770                 }
771
772                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
773                 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
774                 return;
775         }
776 wake_ack:
777         dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
778                 __func__);
779
780         /* Clear power up bit. */
781         set_bit(CFHSI_AWAKE, &cfhsi->bits);
782         clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
783
784         /* Resume read operation. */
785         dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
786         res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
787
788         if (WARN_ON(res < 0))
789                 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
790
791         /* Clear power up acknowledment. */
792         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
793
794         spin_lock_bh(&cfhsi->lock);
795
796         /* Resume transmit if queue is not empty. */
797         if (!skb_peek(&cfhsi->qhead)) {
798                 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
799                         __func__);
800                 /* Start inactivity timer. */
801                 mod_timer(&cfhsi->timer,
802                                 jiffies + cfhsi->inactivity_timeout);
803                 spin_unlock_bh(&cfhsi->lock);
804                 return;
805         }
806
807         dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
808                 __func__);
809
810         spin_unlock_bh(&cfhsi->lock);
811
812         /* Create HSI frame. */
813         len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
814
815         if (likely(len > 0)) {
816                 /* Set up new transfer. */
817                 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
818                 if (WARN_ON(res < 0)) {
819                         dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
820                                 __func__, res);
821                         cfhsi_abort_tx(cfhsi);
822                 }
823         } else {
824                 dev_err(&cfhsi->ndev->dev,
825                                 "%s: Failed to create HSI frame: %d.\n",
826                                 __func__, len);
827         }
828 }
829
830 static void cfhsi_wake_down(struct work_struct *work)
831 {
832         long ret;
833         struct cfhsi *cfhsi = NULL;
834         size_t fifo_occupancy = 0;
835         int retry = CFHSI_WAKE_TOUT;
836
837         cfhsi = container_of(work, struct cfhsi, wake_down_work);
838         dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
839
840         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
841                 return;
842
843         /* Deactivate wake line. */
844         cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
845
846         /* Wait for acknowledge. */
847         ret = CFHSI_WAKE_TOUT;
848         ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
849                                         test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
850                                                         &cfhsi->bits), ret);
851         if (ret < 0) {
852                 /* Interrupted by signal. */
853                 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
854                         __func__, ret);
855                 return;
856         } else if (!ret) {
857                 bool ca_wake = true;
858
859                 /* Timeout */
860                 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
861
862                 /* Check if we misssed the interrupt. */
863                 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
864                                                         &ca_wake));
865                 if (!ca_wake)
866                         dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
867                                 __func__);
868         }
869
870         /* Check FIFO occupancy. */
871         while (retry) {
872                 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
873                                                         &fifo_occupancy));
874
875                 if (!fifo_occupancy)
876                         break;
877
878                 set_current_state(TASK_INTERRUPTIBLE);
879                 schedule_timeout(1);
880                 retry--;
881         }
882
883         if (!retry)
884                 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
885
886         /* Clear AWAKE condition. */
887         clear_bit(CFHSI_AWAKE, &cfhsi->bits);
888
889         /* Cancel pending RX requests. */
890         cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
891
892 }
893
894 static void cfhsi_out_of_sync(struct work_struct *work)
895 {
896         struct cfhsi *cfhsi = NULL;
897
898         cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
899
900         rtnl_lock();
901         dev_close(cfhsi->ndev);
902         rtnl_unlock();
903 }
904
905 static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
906 {
907         struct cfhsi *cfhsi = NULL;
908
909         cfhsi = container_of(drv, struct cfhsi, drv);
910         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
911                 __func__);
912
913         set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
914         wake_up_interruptible(&cfhsi->wake_up_wait);
915
916         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
917                 return;
918
919         /* Schedule wake up work queue if the peer initiates. */
920         if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
921                 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
922 }
923
924 static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
925 {
926         struct cfhsi *cfhsi = NULL;
927
928         cfhsi = container_of(drv, struct cfhsi, drv);
929         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
930                 __func__);
931
932         /* Initiating low power is only permitted by the host (us). */
933         set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
934         wake_up_interruptible(&cfhsi->wake_down_wait);
935 }
936
937 static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
938 {
939         struct cfhsi *cfhsi = NULL;
940         int start_xfer = 0;
941         int timer_active;
942
943         if (!dev)
944                 return -EINVAL;
945
946         cfhsi = netdev_priv(dev);
947
948         spin_lock_bh(&cfhsi->lock);
949
950         skb_queue_tail(&cfhsi->qhead, skb);
951
952         /* Sanity check; xmit should not be called after unregister_netdev */
953         if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
954                 spin_unlock_bh(&cfhsi->lock);
955                 cfhsi_abort_tx(cfhsi);
956                 return -EINVAL;
957         }
958
959         /* Send flow off if number of packets is above high water mark. */
960         if (!cfhsi->flow_off_sent &&
961                 cfhsi->qhead.qlen > cfhsi->q_high_mark &&
962                 cfhsi->cfdev.flowctrl) {
963                 cfhsi->flow_off_sent = 1;
964                 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
965         }
966
967         if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
968                 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
969                 start_xfer = 1;
970         }
971
972         if (!start_xfer) {
973                 spin_unlock_bh(&cfhsi->lock);
974                 return 0;
975         }
976
977         /* Delete inactivity timer if started. */
978         timer_active = del_timer_sync(&cfhsi->timer);
979
980         spin_unlock_bh(&cfhsi->lock);
981
982         if (timer_active) {
983                 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
984                 int len;
985                 int res;
986
987                 /* Create HSI frame. */
988                 len = cfhsi_tx_frm(desc, cfhsi);
989                 WARN_ON(!len);
990
991                 /* Set up new transfer. */
992                 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
993                 if (WARN_ON(res < 0)) {
994                         dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
995                                 __func__, res);
996                         cfhsi_abort_tx(cfhsi);
997                 }
998         } else {
999                 /* Schedule wake up work queue if the we initiate. */
1000                 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1001                         queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1002         }
1003
1004         return 0;
1005 }
1006
1007 static int cfhsi_open(struct net_device *dev)
1008 {
1009         netif_wake_queue(dev);
1010
1011         return 0;
1012 }
1013
1014 static int cfhsi_close(struct net_device *dev)
1015 {
1016         netif_stop_queue(dev);
1017
1018         return 0;
1019 }
1020
1021 static const struct net_device_ops cfhsi_ops = {
1022         .ndo_open = cfhsi_open,
1023         .ndo_stop = cfhsi_close,
1024         .ndo_start_xmit = cfhsi_xmit
1025 };
1026
1027 static void cfhsi_setup(struct net_device *dev)
1028 {
1029         struct cfhsi *cfhsi = netdev_priv(dev);
1030         dev->features = 0;
1031         dev->netdev_ops = &cfhsi_ops;
1032         dev->type = ARPHRD_CAIF;
1033         dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1034         dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1035         dev->tx_queue_len = 0;
1036         dev->destructor = free_netdev;
1037         skb_queue_head_init(&cfhsi->qhead);
1038         cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1039         cfhsi->cfdev.use_frag = false;
1040         cfhsi->cfdev.use_stx = false;
1041         cfhsi->cfdev.use_fcs = false;
1042         cfhsi->ndev = dev;
1043 }
1044
1045 int cfhsi_probe(struct platform_device *pdev)
1046 {
1047         struct cfhsi *cfhsi = NULL;
1048         struct net_device *ndev;
1049         struct cfhsi_dev *dev;
1050         int res;
1051
1052         ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
1053         if (!ndev)
1054                 return -ENODEV;
1055
1056         cfhsi = netdev_priv(ndev);
1057         cfhsi->ndev = ndev;
1058         cfhsi->pdev = pdev;
1059
1060         /* Initialize state vaiables. */
1061         cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1062         cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
1063
1064         /* Set flow info */
1065         cfhsi->flow_off_sent = 0;
1066         cfhsi->q_low_mark = LOW_WATER_MARK;
1067         cfhsi->q_high_mark = HIGH_WATER_MARK;
1068
1069         /* Assign the HSI device. */
1070         dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1071         cfhsi->dev = dev;
1072
1073         /* Assign the driver to this HSI device. */
1074         dev->drv = &cfhsi->drv;
1075
1076         /*
1077          * Allocate a TX buffer with the size of a HSI packet descriptors
1078          * and the necessary room for CAIF payload frames.
1079          */
1080         cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1081         if (!cfhsi->tx_buf) {
1082                 res = -ENODEV;
1083                 goto err_alloc_tx;
1084         }
1085
1086         /*
1087          * Allocate a RX buffer with the size of two HSI packet descriptors and
1088          * the necessary room for CAIF payload frames.
1089          */
1090         cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1091         if (!cfhsi->rx_buf) {
1092                 res = -ENODEV;
1093                 goto err_alloc_rx;
1094         }
1095
1096         cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1097         if (!cfhsi->rx_flip_buf) {
1098                 res = -ENODEV;
1099                 goto err_alloc_rx_flip;
1100         }
1101
1102         /* Pre-calculate inactivity timeout. */
1103         if (inactivity_timeout != -1) {
1104                 cfhsi->inactivity_timeout =
1105                                 inactivity_timeout * HZ / 1000;
1106                 if (!cfhsi->inactivity_timeout)
1107                         cfhsi->inactivity_timeout = 1;
1108                 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1109                         cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1110         } else {
1111                 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1112         }
1113
1114         /* Initialize recieve vaiables. */
1115         cfhsi->rx_ptr = cfhsi->rx_buf;
1116         cfhsi->rx_len = CFHSI_DESC_SZ;
1117
1118         /* Initialize spin locks. */
1119         spin_lock_init(&cfhsi->lock);
1120
1121         /* Set up the driver. */
1122         cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
1123         cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
1124         cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1125         cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
1126
1127         /* Initialize the work queues. */
1128         INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1129         INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1130         INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
1131
1132         /* Clear all bit fields. */
1133         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1134         clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1135         clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1136         clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1137
1138         /* Create work thread. */
1139         cfhsi->wq = create_singlethread_workqueue(pdev->name);
1140         if (!cfhsi->wq) {
1141                 dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
1142                         __func__);
1143                 res = -ENODEV;
1144                 goto err_create_wq;
1145         }
1146
1147         /* Initialize wait queues. */
1148         init_waitqueue_head(&cfhsi->wake_up_wait);
1149         init_waitqueue_head(&cfhsi->wake_down_wait);
1150         init_waitqueue_head(&cfhsi->flush_fifo_wait);
1151
1152         /* Setup the inactivity timer. */
1153         init_timer(&cfhsi->timer);
1154         cfhsi->timer.data = (unsigned long)cfhsi;
1155         cfhsi->timer.function = cfhsi_inactivity_tout;
1156         /* Setup the slowpath RX timer. */
1157         init_timer(&cfhsi->rx_slowpath_timer);
1158         cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1159         cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1160
1161         /* Add CAIF HSI device to list. */
1162         spin_lock(&cfhsi_list_lock);
1163         list_add_tail(&cfhsi->list, &cfhsi_list);
1164         spin_unlock(&cfhsi_list_lock);
1165
1166         /* Activate HSI interface. */
1167         res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1168         if (res) {
1169                 dev_err(&cfhsi->ndev->dev,
1170                         "%s: can't activate HSI interface: %d.\n",
1171                         __func__, res);
1172                 goto err_activate;
1173         }
1174
1175         /* Flush FIFO */
1176         res = cfhsi_flush_fifo(cfhsi);
1177         if (res) {
1178                 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
1179                         __func__, res);
1180                 goto err_net_reg;
1181         }
1182
1183         /* Register network device. */
1184         res = register_netdev(ndev);
1185         if (res) {
1186                 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1187                         __func__, res);
1188                 goto err_net_reg;
1189         }
1190
1191         netif_stop_queue(ndev);
1192
1193         return res;
1194
1195  err_net_reg:
1196         cfhsi->dev->cfhsi_down(cfhsi->dev);
1197  err_activate:
1198         destroy_workqueue(cfhsi->wq);
1199  err_create_wq:
1200         kfree(cfhsi->rx_flip_buf);
1201  err_alloc_rx_flip:
1202         kfree(cfhsi->rx_buf);
1203  err_alloc_rx:
1204         kfree(cfhsi->tx_buf);
1205  err_alloc_tx:
1206         free_netdev(ndev);
1207
1208         return res;
1209 }
1210
1211 static void cfhsi_shutdown(struct cfhsi *cfhsi)
1212 {
1213         u8 *tx_buf, *rx_buf;
1214
1215         /* Stop TXing */
1216         netif_tx_stop_all_queues(cfhsi->ndev);
1217
1218         /* going to shutdown driver */
1219         set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1220
1221         /* Flush workqueue */
1222         flush_workqueue(cfhsi->wq);
1223
1224         /* Delete timers if pending */
1225         del_timer_sync(&cfhsi->timer);
1226         del_timer_sync(&cfhsi->rx_slowpath_timer);
1227
1228         /* Cancel pending RX request (if any) */
1229         cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1230
1231         /* Destroy workqueue */
1232         destroy_workqueue(cfhsi->wq);
1233
1234         /* Store bufferes: will be freed later. */
1235         tx_buf = cfhsi->tx_buf;
1236         rx_buf = cfhsi->rx_buf;
1237
1238         /* Flush transmit queues. */
1239         cfhsi_abort_tx(cfhsi);
1240
1241         /* Deactivate interface */
1242         cfhsi->dev->cfhsi_down(cfhsi->dev);
1243
1244         /* Finally unregister the network device. */
1245         unregister_netdev(cfhsi->ndev);
1246
1247         /* Free buffers. */
1248         kfree(tx_buf);
1249         kfree(rx_buf);
1250 }
1251
1252 int cfhsi_remove(struct platform_device *pdev)
1253 {
1254         struct list_head *list_node;
1255         struct list_head *n;
1256         struct cfhsi *cfhsi = NULL;
1257         struct cfhsi_dev *dev;
1258
1259         dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1260         spin_lock(&cfhsi_list_lock);
1261         list_for_each_safe(list_node, n, &cfhsi_list) {
1262                 cfhsi = list_entry(list_node, struct cfhsi, list);
1263                 /* Find the corresponding device. */
1264                 if (cfhsi->dev == dev) {
1265                         /* Remove from list. */
1266                         list_del(list_node);
1267                         spin_unlock(&cfhsi_list_lock);
1268
1269                         /* Shutdown driver. */
1270                         cfhsi_shutdown(cfhsi);
1271
1272                         return 0;
1273                 }
1274         }
1275         spin_unlock(&cfhsi_list_lock);
1276         return -ENODEV;
1277 }
1278
1279 struct platform_driver cfhsi_plat_drv = {
1280         .probe = cfhsi_probe,
1281         .remove = cfhsi_remove,
1282         .driver = {
1283                    .name = "cfhsi",
1284                    .owner = THIS_MODULE,
1285                    },
1286 };
1287
1288 static void __exit cfhsi_exit_module(void)
1289 {
1290         struct list_head *list_node;
1291         struct list_head *n;
1292         struct cfhsi *cfhsi = NULL;
1293
1294         spin_lock(&cfhsi_list_lock);
1295         list_for_each_safe(list_node, n, &cfhsi_list) {
1296                 cfhsi = list_entry(list_node, struct cfhsi, list);
1297
1298                 /* Remove from list. */
1299                 list_del(list_node);
1300                 spin_unlock(&cfhsi_list_lock);
1301
1302                 /* Shutdown driver. */
1303                 cfhsi_shutdown(cfhsi);
1304
1305                 spin_lock(&cfhsi_list_lock);
1306         }
1307         spin_unlock(&cfhsi_list_lock);
1308
1309         /* Unregister platform driver. */
1310         platform_driver_unregister(&cfhsi_plat_drv);
1311 }
1312
1313 static int __init cfhsi_init_module(void)
1314 {
1315         int result;
1316
1317         /* Initialize spin lock. */
1318         spin_lock_init(&cfhsi_list_lock);
1319
1320         /* Register platform driver. */
1321         result = platform_driver_register(&cfhsi_plat_drv);
1322         if (result) {
1323                 printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1324                         result);
1325                 goto err_dev_register;
1326         }
1327
1328         return result;
1329
1330  err_dev_register:
1331         return result;
1332 }
1333
1334 module_init(cfhsi_init_module);
1335 module_exit(cfhsi_exit_module);