2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/device.h>
12 #include <linux/platform_device.h>
13 #include <linux/netdevice.h>
14 #include <linux/string.h>
15 #include <linux/list.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/sched.h>
19 #include <linux/if_arp.h>
20 #include <linux/timer.h>
21 #include <linux/rtnetlink.h>
22 #include <net/caif/caif_layer.h>
23 #include <net/caif/caif_hsi.h>
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
27 MODULE_DESCRIPTION("CAIF HSI driver");
29 /* Returns the number of padding bytes for alignment. */
30 #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
31 (((pow)-((x)&((pow)-1)))))
33 static int inactivity_timeout = 1000;
34 module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
35 MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
38 * HSI padding options.
39 * Warning: must be a base of 2 (& operation used) and can not be zero !
41 static int hsi_head_align = 4;
42 module_param(hsi_head_align, int, S_IRUGO);
43 MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
45 static int hsi_tail_align = 4;
46 module_param(hsi_tail_align, int, S_IRUGO);
47 MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
50 * HSI link layer flowcontrol thresholds.
51 * Warning: A high threshold value migth increase throughput but it will at
52 * the same time prevent channel prioritization and increase the risk of
53 * flooding the modem. The high threshold should be above the low.
55 static int hsi_high_threshold = 100;
56 module_param(hsi_high_threshold, int, S_IRUGO);
57 MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
59 static int hsi_low_threshold = 50;
60 module_param(hsi_low_threshold, int, S_IRUGO);
61 MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
67 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
68 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
69 * de-asserted before the number of packets drops below LOW_WATER_MARK.
71 #define LOW_WATER_MARK hsi_low_threshold
72 #define HIGH_WATER_MARK hsi_high_threshold
74 static LIST_HEAD(cfhsi_list);
75 static spinlock_t cfhsi_list_lock;
77 static void cfhsi_inactivity_tout(unsigned long arg)
79 struct cfhsi *cfhsi = (struct cfhsi *)arg;
81 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
84 /* Schedule power down work queue. */
85 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
86 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
89 static void cfhsi_abort_tx(struct cfhsi *cfhsi)
94 spin_lock_bh(&cfhsi->lock);
95 skb = skb_dequeue(&cfhsi->qhead);
99 cfhsi->ndev->stats.tx_errors++;
100 cfhsi->ndev->stats.tx_dropped++;
101 spin_unlock_bh(&cfhsi->lock);
104 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
105 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
106 mod_timer(&cfhsi->timer,
107 jiffies + cfhsi->inactivity_timeout);
108 spin_unlock_bh(&cfhsi->lock);
111 static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
113 char buffer[32]; /* Any reasonable value */
114 size_t fifo_occupancy;
117 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
121 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
124 dev_warn(&cfhsi->ndev->dev,
125 "%s: can't get FIFO occupancy: %d.\n",
128 } else if (!fifo_occupancy)
129 /* No more data, exitting normally */
132 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
133 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
134 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
137 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
138 dev_warn(&cfhsi->ndev->dev,
139 "%s: can't read data: %d.\n",
145 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
146 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
149 dev_warn(&cfhsi->ndev->dev,
150 "%s: can't wait for flush complete: %d.\n",
155 dev_warn(&cfhsi->ndev->dev,
156 "%s: timeout waiting for flush complete.\n",
165 static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
170 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
172 skb = skb_dequeue(&cfhsi->qhead);
179 /* Check if we can embed a CAIF frame. */
180 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
181 struct caif_payload_info *info;
185 /* Calculate needed head alignment and tail alignment. */
186 info = (struct caif_payload_info *)&skb->cb;
188 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
189 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
191 /* Check if frame still fits with added alignment. */
192 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
193 u8 *pemb = desc->emb_frm;
194 desc->offset = CFHSI_DESC_SHORT_SZ;
195 *pemb = (u8)(hpad - 1);
198 /* Update network statistics. */
199 cfhsi->ndev->stats.tx_packets++;
200 cfhsi->ndev->stats.tx_bytes += skb->len;
202 /* Copy in embedded CAIF frame. */
203 skb_copy_bits(skb, 0, pemb, skb->len);
209 /* Create payload CAIF frames. */
210 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
211 while (nfrms < CFHSI_MAX_PKTS) {
212 struct caif_payload_info *info;
217 skb = skb_dequeue(&cfhsi->qhead);
222 /* Calculate needed head alignment and tail alignment. */
223 info = (struct caif_payload_info *)&skb->cb;
225 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
226 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
228 /* Fill in CAIF frame length in descriptor. */
229 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
231 /* Fill head padding information. */
232 *pfrm = (u8)(hpad - 1);
235 /* Update network statistics. */
236 cfhsi->ndev->stats.tx_packets++;
237 cfhsi->ndev->stats.tx_bytes += skb->len;
239 /* Copy in CAIF frame. */
240 skb_copy_bits(skb, 0, pfrm, skb->len);
242 /* Update payload length. */
243 pld_len += desc->cffrm_len[nfrms];
245 /* Update frame pointer. */
246 pfrm += skb->len + tpad;
250 /* Update number of frames. */
254 /* Unused length fields should be zero-filled (according to SPEC). */
255 while (nfrms < CFHSI_MAX_PKTS) {
256 desc->cffrm_len[nfrms] = 0x0000;
260 /* Check if we can piggy-back another descriptor. */
261 skb = skb_peek(&cfhsi->qhead);
263 desc->header |= CFHSI_PIGGY_DESC;
265 desc->header &= ~CFHSI_PIGGY_DESC;
267 return CFHSI_DESC_SZ + pld_len;
270 static void cfhsi_tx_done(struct cfhsi *cfhsi)
272 struct cfhsi_desc *desc = NULL;
276 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
278 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
281 desc = (struct cfhsi_desc *)cfhsi->tx_buf;
285 * Send flow on if flow off has been previously signalled
286 * and number of packets is below low water mark.
288 spin_lock_bh(&cfhsi->lock);
289 if (cfhsi->flow_off_sent &&
290 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
291 cfhsi->cfdev.flowctrl) {
293 cfhsi->flow_off_sent = 0;
294 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
296 spin_unlock_bh(&cfhsi->lock);
298 /* Create HSI frame. */
300 len = cfhsi_tx_frm(desc, cfhsi);
302 spin_lock_bh(&cfhsi->lock);
303 if (unlikely(skb_peek(&cfhsi->qhead))) {
304 spin_unlock_bh(&cfhsi->lock);
307 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
308 /* Start inactivity timer. */
309 mod_timer(&cfhsi->timer,
310 jiffies + cfhsi->inactivity_timeout);
311 spin_unlock_bh(&cfhsi->lock);
316 /* Set up new transfer. */
317 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
318 if (WARN_ON(res < 0)) {
319 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
328 static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
332 cfhsi = container_of(drv, struct cfhsi, drv);
333 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
336 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
338 cfhsi_tx_done(cfhsi);
341 static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
348 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
349 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
350 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
355 /* Check for embedded CAIF frame. */
360 pfrm = ((u8 *)desc) + desc->offset;
362 /* Remove offset padding. */
365 /* Read length of CAIF frame (little endian). */
367 len |= ((*(pfrm+1)) << 8) & 0xFF00;
368 len += 2; /* Add FCS fields. */
370 /* Sanity check length of CAIF frame. */
371 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
372 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
377 /* Allocate SKB (OK even in IRQ context). */
378 skb = alloc_skb(len + 1, GFP_ATOMIC);
380 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
384 caif_assert(skb != NULL);
386 dst = skb_put(skb, len);
387 memcpy(dst, pfrm, len);
389 skb->protocol = htons(ETH_P_CAIF);
390 skb_reset_mac_header(skb);
391 skb->dev = cfhsi->ndev;
394 * We are called from a arch specific platform device.
395 * Unfortunately we don't know what context we're
403 /* Update network statistics. */
404 cfhsi->ndev->stats.rx_packets++;
405 cfhsi->ndev->stats.rx_bytes += len;
408 /* Calculate transfer length. */
409 plen = desc->cffrm_len;
410 while (nfrms < CFHSI_MAX_PKTS && *plen) {
416 /* Check for piggy-backed descriptor. */
417 if (desc->header & CFHSI_PIGGY_DESC)
418 xfer_sz += CFHSI_DESC_SZ;
420 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
421 dev_err(&cfhsi->ndev->dev,
422 "%s: Invalid payload len: %d, ignored.\n",
429 static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
435 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
436 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
438 pr_err("Invalid descriptor. %x %x\n", desc->header,
443 /* Calculate transfer length. */
444 plen = desc->cffrm_len;
445 while (nfrms < CFHSI_MAX_PKTS && *plen) {
452 pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
458 static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
465 /* Sanity check header and offset. */
466 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
467 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
468 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
473 /* Set frame pointer to start of payload. */
474 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
475 plen = desc->cffrm_len;
477 /* Skip already processed frames. */
478 while (nfrms < cfhsi->rx_state.nfrms) {
486 while (nfrms < CFHSI_MAX_PKTS && *plen) {
492 /* CAIF frame starts after head padding. */
493 pcffrm = pfrm + *pfrm + 1;
495 /* Read length of CAIF frame (little endian). */
497 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
498 len += 2; /* Add FCS fields. */
500 /* Sanity check length of CAIF frames. */
501 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
502 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
507 /* Allocate SKB (OK even in IRQ context). */
508 skb = alloc_skb(len + 1, GFP_ATOMIC);
510 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
512 cfhsi->rx_state.nfrms = nfrms;
515 caif_assert(skb != NULL);
517 dst = skb_put(skb, len);
518 memcpy(dst, pcffrm, len);
520 skb->protocol = htons(ETH_P_CAIF);
521 skb_reset_mac_header(skb);
522 skb->dev = cfhsi->ndev;
525 * We're called from a platform device,
526 * and don't know the context we're running in.
533 /* Update network statistics. */
534 cfhsi->ndev->stats.rx_packets++;
535 cfhsi->ndev->stats.rx_bytes += len;
546 static void cfhsi_rx_done(struct cfhsi *cfhsi)
549 int desc_pld_len = 0, rx_len, rx_state;
550 struct cfhsi_desc *desc = NULL;
552 struct cfhsi_desc *piggy_desc = NULL;
554 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
556 dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
558 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
561 /* Update inactivity timer if pending. */
562 spin_lock_bh(&cfhsi->lock);
563 mod_timer_pending(&cfhsi->timer,
564 jiffies + cfhsi->inactivity_timeout);
565 spin_unlock_bh(&cfhsi->lock);
567 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
568 desc_pld_len = cfhsi_rx_desc_len(desc);
570 if (desc_pld_len < 0)
573 rx_buf = cfhsi->rx_buf;
574 rx_len = desc_pld_len;
575 if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
576 rx_len += CFHSI_DESC_SZ;
577 if (desc_pld_len == 0)
578 rx_buf = cfhsi->rx_flip_buf;
580 rx_buf = cfhsi->rx_flip_buf;
582 rx_len = CFHSI_DESC_SZ;
583 if (cfhsi->rx_state.pld_len > 0 &&
584 (desc->header & CFHSI_PIGGY_DESC)) {
586 piggy_desc = (struct cfhsi_desc *)
587 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
588 cfhsi->rx_state.pld_len);
590 cfhsi->rx_state.piggy_desc = true;
592 /* Extract payload len from piggy-backed descriptor. */
593 desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
594 if (desc_pld_len < 0)
597 if (desc_pld_len > 0)
598 rx_len = desc_pld_len;
600 if (desc_pld_len > 0 &&
601 (piggy_desc->header & CFHSI_PIGGY_DESC))
602 rx_len += CFHSI_DESC_SZ;
605 * Copy needed information from the piggy-backed
606 * descriptor to the descriptor in the start.
608 memcpy(rx_buf, (u8 *)piggy_desc,
609 CFHSI_DESC_SHORT_SZ);
610 /* Mark no embedded frame here */
611 piggy_desc->offset = 0;
612 if (desc_pld_len == -EPROTO)
618 rx_state = CFHSI_RX_STATE_PAYLOAD;
619 rx_ptr = rx_buf + CFHSI_DESC_SZ;
621 rx_state = CFHSI_RX_STATE_DESC;
623 rx_len = CFHSI_DESC_SZ;
626 /* Initiate next read */
627 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
628 /* Set up new transfer. */
629 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
632 res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len,
634 if (WARN_ON(res < 0)) {
635 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
637 cfhsi->ndev->stats.rx_errors++;
638 cfhsi->ndev->stats.rx_dropped++;
642 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
643 /* Extract payload from descriptor */
644 if (cfhsi_rx_desc(desc, cfhsi) < 0)
647 /* Extract payload */
648 if (cfhsi_rx_pld(desc, cfhsi) < 0)
651 /* Extract any payload in piggyback descriptor. */
652 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
657 /* Update state info */
658 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
659 cfhsi->rx_state.state = rx_state;
660 cfhsi->rx_ptr = rx_ptr;
661 cfhsi->rx_len = rx_len;
662 cfhsi->rx_state.pld_len = desc_pld_len;
663 cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
665 if (rx_buf != cfhsi->rx_buf)
666 swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
670 dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
671 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
672 cfhsi->rx_buf, CFHSI_DESC_SZ);
673 schedule_work(&cfhsi->out_of_sync_work);
676 static void cfhsi_rx_slowpath(unsigned long arg)
678 struct cfhsi *cfhsi = (struct cfhsi *)arg;
680 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
683 cfhsi_rx_done(cfhsi);
686 static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
690 cfhsi = container_of(drv, struct cfhsi, drv);
691 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
694 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
697 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
698 wake_up_interruptible(&cfhsi->flush_fifo_wait);
700 cfhsi_rx_done(cfhsi);
703 static void cfhsi_wake_up(struct work_struct *work)
705 struct cfhsi *cfhsi = NULL;
710 cfhsi = container_of(work, struct cfhsi, wake_up_work);
712 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
715 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
716 /* It happenes when wakeup is requested by
717 * both ends at the same time. */
718 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
719 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
723 /* Activate wake line. */
724 cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
726 dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
729 /* Wait for acknowledge. */
730 ret = CFHSI_WAKE_TOUT;
731 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
732 test_and_clear_bit(CFHSI_WAKE_UP_ACK,
734 if (unlikely(ret < 0)) {
735 /* Interrupted by signal. */
736 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
739 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
740 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
743 bool ca_wake = false;
744 size_t fifo_occupancy = 0;
747 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
750 /* Check FIFO to check if modem has sent something. */
751 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
754 dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
755 __func__, (unsigned) fifo_occupancy);
757 /* Check if we misssed the interrupt. */
758 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
762 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
765 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
766 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
768 /* Continue execution. */
772 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
773 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
777 dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
780 /* Clear power up bit. */
781 set_bit(CFHSI_AWAKE, &cfhsi->bits);
782 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
784 /* Resume read operation. */
785 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
786 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
788 if (WARN_ON(res < 0))
789 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
791 /* Clear power up acknowledment. */
792 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
794 spin_lock_bh(&cfhsi->lock);
796 /* Resume transmit if queue is not empty. */
797 if (!skb_peek(&cfhsi->qhead)) {
798 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
800 /* Start inactivity timer. */
801 mod_timer(&cfhsi->timer,
802 jiffies + cfhsi->inactivity_timeout);
803 spin_unlock_bh(&cfhsi->lock);
807 dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
810 spin_unlock_bh(&cfhsi->lock);
812 /* Create HSI frame. */
813 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
815 if (likely(len > 0)) {
816 /* Set up new transfer. */
817 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
818 if (WARN_ON(res < 0)) {
819 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
821 cfhsi_abort_tx(cfhsi);
824 dev_err(&cfhsi->ndev->dev,
825 "%s: Failed to create HSI frame: %d.\n",
830 static void cfhsi_wake_down(struct work_struct *work)
833 struct cfhsi *cfhsi = NULL;
834 size_t fifo_occupancy = 0;
835 int retry = CFHSI_WAKE_TOUT;
837 cfhsi = container_of(work, struct cfhsi, wake_down_work);
838 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
840 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
843 /* Deactivate wake line. */
844 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
846 /* Wait for acknowledge. */
847 ret = CFHSI_WAKE_TOUT;
848 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
849 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
852 /* Interrupted by signal. */
853 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
860 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
862 /* Check if we misssed the interrupt. */
863 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
866 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
870 /* Check FIFO occupancy. */
872 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
878 set_current_state(TASK_INTERRUPTIBLE);
884 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
886 /* Clear AWAKE condition. */
887 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
889 /* Cancel pending RX requests. */
890 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
894 static void cfhsi_out_of_sync(struct work_struct *work)
896 struct cfhsi *cfhsi = NULL;
898 cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
901 dev_close(cfhsi->ndev);
905 static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
907 struct cfhsi *cfhsi = NULL;
909 cfhsi = container_of(drv, struct cfhsi, drv);
910 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
913 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
914 wake_up_interruptible(&cfhsi->wake_up_wait);
916 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
919 /* Schedule wake up work queue if the peer initiates. */
920 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
921 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
924 static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
926 struct cfhsi *cfhsi = NULL;
928 cfhsi = container_of(drv, struct cfhsi, drv);
929 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
932 /* Initiating low power is only permitted by the host (us). */
933 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
934 wake_up_interruptible(&cfhsi->wake_down_wait);
937 static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
939 struct cfhsi *cfhsi = NULL;
946 cfhsi = netdev_priv(dev);
948 spin_lock_bh(&cfhsi->lock);
950 skb_queue_tail(&cfhsi->qhead, skb);
952 /* Sanity check; xmit should not be called after unregister_netdev */
953 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
954 spin_unlock_bh(&cfhsi->lock);
955 cfhsi_abort_tx(cfhsi);
959 /* Send flow off if number of packets is above high water mark. */
960 if (!cfhsi->flow_off_sent &&
961 cfhsi->qhead.qlen > cfhsi->q_high_mark &&
962 cfhsi->cfdev.flowctrl) {
963 cfhsi->flow_off_sent = 1;
964 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
967 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
968 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
973 spin_unlock_bh(&cfhsi->lock);
977 /* Delete inactivity timer if started. */
978 timer_active = del_timer_sync(&cfhsi->timer);
980 spin_unlock_bh(&cfhsi->lock);
983 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
987 /* Create HSI frame. */
988 len = cfhsi_tx_frm(desc, cfhsi);
991 /* Set up new transfer. */
992 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
993 if (WARN_ON(res < 0)) {
994 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
996 cfhsi_abort_tx(cfhsi);
999 /* Schedule wake up work queue if the we initiate. */
1000 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1001 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1007 static int cfhsi_open(struct net_device *dev)
1009 netif_wake_queue(dev);
1014 static int cfhsi_close(struct net_device *dev)
1016 netif_stop_queue(dev);
1021 static const struct net_device_ops cfhsi_ops = {
1022 .ndo_open = cfhsi_open,
1023 .ndo_stop = cfhsi_close,
1024 .ndo_start_xmit = cfhsi_xmit
1027 static void cfhsi_setup(struct net_device *dev)
1029 struct cfhsi *cfhsi = netdev_priv(dev);
1031 dev->netdev_ops = &cfhsi_ops;
1032 dev->type = ARPHRD_CAIF;
1033 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1034 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1035 dev->tx_queue_len = 0;
1036 dev->destructor = free_netdev;
1037 skb_queue_head_init(&cfhsi->qhead);
1038 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1039 cfhsi->cfdev.use_frag = false;
1040 cfhsi->cfdev.use_stx = false;
1041 cfhsi->cfdev.use_fcs = false;
1045 int cfhsi_probe(struct platform_device *pdev)
1047 struct cfhsi *cfhsi = NULL;
1048 struct net_device *ndev;
1049 struct cfhsi_dev *dev;
1052 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
1056 cfhsi = netdev_priv(ndev);
1060 /* Initialize state vaiables. */
1061 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1062 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
1065 cfhsi->flow_off_sent = 0;
1066 cfhsi->q_low_mark = LOW_WATER_MARK;
1067 cfhsi->q_high_mark = HIGH_WATER_MARK;
1069 /* Assign the HSI device. */
1070 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1073 /* Assign the driver to this HSI device. */
1074 dev->drv = &cfhsi->drv;
1077 * Allocate a TX buffer with the size of a HSI packet descriptors
1078 * and the necessary room for CAIF payload frames.
1080 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1081 if (!cfhsi->tx_buf) {
1087 * Allocate a RX buffer with the size of two HSI packet descriptors and
1088 * the necessary room for CAIF payload frames.
1090 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1091 if (!cfhsi->rx_buf) {
1096 cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1097 if (!cfhsi->rx_flip_buf) {
1099 goto err_alloc_rx_flip;
1102 /* Pre-calculate inactivity timeout. */
1103 if (inactivity_timeout != -1) {
1104 cfhsi->inactivity_timeout =
1105 inactivity_timeout * HZ / 1000;
1106 if (!cfhsi->inactivity_timeout)
1107 cfhsi->inactivity_timeout = 1;
1108 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1109 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1111 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1114 /* Initialize recieve vaiables. */
1115 cfhsi->rx_ptr = cfhsi->rx_buf;
1116 cfhsi->rx_len = CFHSI_DESC_SZ;
1118 /* Initialize spin locks. */
1119 spin_lock_init(&cfhsi->lock);
1121 /* Set up the driver. */
1122 cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
1123 cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
1124 cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1125 cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
1127 /* Initialize the work queues. */
1128 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1129 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1130 INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
1132 /* Clear all bit fields. */
1133 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1134 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1135 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1136 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1138 /* Create work thread. */
1139 cfhsi->wq = create_singlethread_workqueue(pdev->name);
1141 dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
1147 /* Initialize wait queues. */
1148 init_waitqueue_head(&cfhsi->wake_up_wait);
1149 init_waitqueue_head(&cfhsi->wake_down_wait);
1150 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1152 /* Setup the inactivity timer. */
1153 init_timer(&cfhsi->timer);
1154 cfhsi->timer.data = (unsigned long)cfhsi;
1155 cfhsi->timer.function = cfhsi_inactivity_tout;
1156 /* Setup the slowpath RX timer. */
1157 init_timer(&cfhsi->rx_slowpath_timer);
1158 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1159 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1161 /* Add CAIF HSI device to list. */
1162 spin_lock(&cfhsi_list_lock);
1163 list_add_tail(&cfhsi->list, &cfhsi_list);
1164 spin_unlock(&cfhsi_list_lock);
1166 /* Activate HSI interface. */
1167 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1169 dev_err(&cfhsi->ndev->dev,
1170 "%s: can't activate HSI interface: %d.\n",
1176 res = cfhsi_flush_fifo(cfhsi);
1178 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
1183 /* Register network device. */
1184 res = register_netdev(ndev);
1186 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1191 netif_stop_queue(ndev);
1196 cfhsi->dev->cfhsi_down(cfhsi->dev);
1198 destroy_workqueue(cfhsi->wq);
1200 kfree(cfhsi->rx_flip_buf);
1202 kfree(cfhsi->rx_buf);
1204 kfree(cfhsi->tx_buf);
1211 static void cfhsi_shutdown(struct cfhsi *cfhsi)
1213 u8 *tx_buf, *rx_buf;
1216 netif_tx_stop_all_queues(cfhsi->ndev);
1218 /* going to shutdown driver */
1219 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1221 /* Flush workqueue */
1222 flush_workqueue(cfhsi->wq);
1224 /* Delete timers if pending */
1225 del_timer_sync(&cfhsi->timer);
1226 del_timer_sync(&cfhsi->rx_slowpath_timer);
1228 /* Cancel pending RX request (if any) */
1229 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1231 /* Destroy workqueue */
1232 destroy_workqueue(cfhsi->wq);
1234 /* Store bufferes: will be freed later. */
1235 tx_buf = cfhsi->tx_buf;
1236 rx_buf = cfhsi->rx_buf;
1238 /* Flush transmit queues. */
1239 cfhsi_abort_tx(cfhsi);
1241 /* Deactivate interface */
1242 cfhsi->dev->cfhsi_down(cfhsi->dev);
1244 /* Finally unregister the network device. */
1245 unregister_netdev(cfhsi->ndev);
1252 int cfhsi_remove(struct platform_device *pdev)
1254 struct list_head *list_node;
1255 struct list_head *n;
1256 struct cfhsi *cfhsi = NULL;
1257 struct cfhsi_dev *dev;
1259 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1260 spin_lock(&cfhsi_list_lock);
1261 list_for_each_safe(list_node, n, &cfhsi_list) {
1262 cfhsi = list_entry(list_node, struct cfhsi, list);
1263 /* Find the corresponding device. */
1264 if (cfhsi->dev == dev) {
1265 /* Remove from list. */
1266 list_del(list_node);
1267 spin_unlock(&cfhsi_list_lock);
1269 /* Shutdown driver. */
1270 cfhsi_shutdown(cfhsi);
1275 spin_unlock(&cfhsi_list_lock);
1279 struct platform_driver cfhsi_plat_drv = {
1280 .probe = cfhsi_probe,
1281 .remove = cfhsi_remove,
1284 .owner = THIS_MODULE,
1288 static void __exit cfhsi_exit_module(void)
1290 struct list_head *list_node;
1291 struct list_head *n;
1292 struct cfhsi *cfhsi = NULL;
1294 spin_lock(&cfhsi_list_lock);
1295 list_for_each_safe(list_node, n, &cfhsi_list) {
1296 cfhsi = list_entry(list_node, struct cfhsi, list);
1298 /* Remove from list. */
1299 list_del(list_node);
1300 spin_unlock(&cfhsi_list_lock);
1302 /* Shutdown driver. */
1303 cfhsi_shutdown(cfhsi);
1305 spin_lock(&cfhsi_list_lock);
1307 spin_unlock(&cfhsi_list_lock);
1309 /* Unregister platform driver. */
1310 platform_driver_unregister(&cfhsi_plat_drv);
1313 static int __init cfhsi_init_module(void)
1317 /* Initialize spin lock. */
1318 spin_lock_init(&cfhsi_list_lock);
1320 /* Register platform driver. */
1321 result = platform_driver_register(&cfhsi_plat_drv);
1323 printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1325 goto err_dev_register;
1334 module_init(cfhsi_init_module);
1335 module_exit(cfhsi_exit_module);