1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
19 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
20 void __iomem *base, u32 offset, u32 *out)
22 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
25 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
26 void __iomem *base, u32 offset,
32 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
36 *out = (tmp & mask) >> __ffs(mask);
41 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
42 void __iomem *base, u32 offset,
43 u32 mask, u32 val, u32 delayus)
46 u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
49 ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
62 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
65 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
68 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
69 u32 offset, u32 mask, u32 val)
74 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
79 tmp |= (val << __ffs(mask));
80 mhi_write_reg(mhi_cntrl, base, offset, tmp);
83 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
86 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
87 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
90 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
91 struct db_cfg *db_cfg,
92 void __iomem *db_addr,
95 if (db_cfg->db_mode) {
96 db_cfg->db_val = db_val;
97 mhi_write_db(mhi_cntrl, db_addr, db_val);
102 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
103 struct db_cfg *db_cfg,
104 void __iomem *db_addr,
107 db_cfg->db_val = db_val;
108 mhi_write_db(mhi_cntrl, db_addr, db_val);
111 void mhi_ring_er_db(struct mhi_event *mhi_event)
113 struct mhi_ring *ring = &mhi_event->ring;
115 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
116 ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
119 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
122 struct mhi_ring *ring = &mhi_cmd->ring;
124 db = ring->iommu_base + (ring->wp - ring->base);
125 *ring->ctxt_wp = cpu_to_le64(db);
126 mhi_write_db(mhi_cntrl, ring->db_addr, db);
129 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
130 struct mhi_chan *mhi_chan)
132 struct mhi_ring *ring = &mhi_chan->tre_ring;
135 db = ring->iommu_base + (ring->wp - ring->base);
138 * Writes to the new ring element must be visible to the hardware
139 * before letting h/w know there is new element to fetch.
142 *ring->ctxt_wp = cpu_to_le64(db);
144 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
148 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
151 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
153 return (ret) ? MHI_EE_MAX : exec;
155 EXPORT_SYMBOL_GPL(mhi_get_exec_env);
157 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
160 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
161 MHISTATUS_MHISTATE_MASK, &state);
162 return ret ? MHI_STATE_MAX : state;
164 EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
166 void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
168 if (mhi_cntrl->reset) {
169 mhi_cntrl->reset(mhi_cntrl);
173 /* Generic MHI SoC reset */
174 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
177 EXPORT_SYMBOL_GPL(mhi_soc_reset);
179 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
180 struct mhi_buf_info *buf_info)
182 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
183 buf_info->v_addr, buf_info->len,
185 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
191 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
192 struct mhi_buf_info *buf_info)
194 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
195 &buf_info->p_addr, GFP_ATOMIC);
200 if (buf_info->dir == DMA_TO_DEVICE)
201 memcpy(buf, buf_info->v_addr, buf_info->len);
203 buf_info->bb_addr = buf;
208 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
209 struct mhi_buf_info *buf_info)
211 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
215 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
216 struct mhi_buf_info *buf_info)
218 if (buf_info->dir == DMA_FROM_DEVICE)
219 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
221 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
222 buf_info->bb_addr, buf_info->p_addr);
225 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
226 struct mhi_ring *ring)
230 if (ring->wp < ring->rp) {
231 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
233 nr_el = (ring->rp - ring->base) / ring->el_size;
234 nr_el += ((ring->base + ring->len - ring->wp) /
241 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
243 return (addr - ring->iommu_base) + ring->base;
246 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
247 struct mhi_ring *ring)
249 ring->wp += ring->el_size;
250 if (ring->wp >= (ring->base + ring->len))
251 ring->wp = ring->base;
256 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
257 struct mhi_ring *ring)
259 ring->rp += ring->el_size;
260 if (ring->rp >= (ring->base + ring->len))
261 ring->rp = ring->base;
266 static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
268 return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
271 int mhi_destroy_device(struct device *dev, void *data)
273 struct mhi_chan *ul_chan, *dl_chan;
274 struct mhi_device *mhi_dev;
275 struct mhi_controller *mhi_cntrl;
276 enum mhi_ee_type ee = MHI_EE_MAX;
278 if (dev->bus != &mhi_bus_type)
281 mhi_dev = to_mhi_device(dev);
282 mhi_cntrl = mhi_dev->mhi_cntrl;
284 /* Only destroy virtual devices thats attached to bus */
285 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
288 ul_chan = mhi_dev->ul_chan;
289 dl_chan = mhi_dev->dl_chan;
292 * If execution environment is specified, remove only those devices that
293 * started in them based on ee_mask for the channels as we move on to a
294 * different execution environment
297 ee = *(enum mhi_ee_type *)data;
300 * For the suspend and resume case, this function will get called
301 * without mhi_unregister_controller(). Hence, we need to drop the
302 * references to mhi_dev created for ul and dl channels. We can
303 * be sure that there will be no instances of mhi_dev left after
307 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
310 put_device(&ul_chan->mhi_dev->dev);
314 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
317 put_device(&dl_chan->mhi_dev->dev);
320 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
323 /* Notify the client and remove the device from MHI bus */
330 int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
331 enum dma_data_direction dir)
333 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
334 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
335 mhi_dev->ul_chan : mhi_dev->dl_chan;
336 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
338 return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
340 EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
342 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
344 struct mhi_driver *mhi_drv;
346 if (!mhi_dev->dev.driver)
349 mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
351 if (mhi_drv->status_cb)
352 mhi_drv->status_cb(mhi_dev, cb_reason);
354 EXPORT_SYMBOL_GPL(mhi_notify);
356 /* Bind MHI channels to MHI devices */
357 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
359 struct mhi_chan *mhi_chan;
360 struct mhi_device *mhi_dev;
361 struct device *dev = &mhi_cntrl->mhi_dev->dev;
364 mhi_chan = mhi_cntrl->mhi_chan;
365 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
366 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
367 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
369 mhi_dev = mhi_alloc_device(mhi_cntrl);
373 mhi_dev->dev_type = MHI_DEVICE_XFER;
374 switch (mhi_chan->dir) {
376 mhi_dev->ul_chan = mhi_chan;
377 mhi_dev->ul_chan_id = mhi_chan->chan;
379 case DMA_FROM_DEVICE:
380 /* We use dl_chan as offload channels */
381 mhi_dev->dl_chan = mhi_chan;
382 mhi_dev->dl_chan_id = mhi_chan->chan;
385 dev_err(dev, "Direction not supported\n");
386 put_device(&mhi_dev->dev);
390 get_device(&mhi_dev->dev);
391 mhi_chan->mhi_dev = mhi_dev;
393 /* Check next channel if it matches */
394 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
395 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
398 if (mhi_chan->dir == DMA_TO_DEVICE) {
399 mhi_dev->ul_chan = mhi_chan;
400 mhi_dev->ul_chan_id = mhi_chan->chan;
402 mhi_dev->dl_chan = mhi_chan;
403 mhi_dev->dl_chan_id = mhi_chan->chan;
405 get_device(&mhi_dev->dev);
406 mhi_chan->mhi_dev = mhi_dev;
410 /* Channel name is same for both UL and DL */
411 mhi_dev->name = mhi_chan->name;
412 dev_set_name(&mhi_dev->dev, "%s_%s",
413 dev_name(&mhi_cntrl->mhi_dev->dev),
416 /* Init wakeup source if available */
417 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
418 device_init_wakeup(&mhi_dev->dev, true);
420 ret = device_add(&mhi_dev->dev);
422 put_device(&mhi_dev->dev);
426 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
428 struct mhi_event *mhi_event = dev;
429 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
430 struct mhi_event_ctxt *er_ctxt =
431 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
432 struct mhi_ring *ev_ring = &mhi_event->ring;
433 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
436 if (!is_valid_ring_ptr(ev_ring, ptr)) {
437 dev_err(&mhi_cntrl->mhi_dev->dev,
438 "Event ring rp points outside of the event ring\n");
442 dev_rp = mhi_to_virtual(ev_ring, ptr);
444 /* Only proceed if event ring has pending events */
445 if (ev_ring->rp == dev_rp)
448 /* For client managed event ring, notify pending data */
449 if (mhi_event->cl_manage) {
450 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
451 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
454 mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
456 tasklet_schedule(&mhi_event->task);
462 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
464 struct mhi_controller *mhi_cntrl = priv;
465 struct device *dev = &mhi_cntrl->mhi_dev->dev;
466 enum mhi_state state;
467 enum mhi_pm_state pm_state = 0;
470 write_lock_irq(&mhi_cntrl->pm_lock);
471 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
472 write_unlock_irq(&mhi_cntrl->pm_lock);
476 state = mhi_get_mhi_state(mhi_cntrl);
477 ee = mhi_get_exec_env(mhi_cntrl);
478 dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
479 TO_MHI_EXEC_STR(mhi_cntrl->ee),
480 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
481 TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state));
483 if (state == MHI_STATE_SYS_ERR) {
484 dev_dbg(dev, "System error detected\n");
485 pm_state = mhi_tryset_pm_state(mhi_cntrl,
486 MHI_PM_SYS_ERR_DETECT);
488 write_unlock_irq(&mhi_cntrl->pm_lock);
490 if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
495 /* proceed if power down is not already in progress */
496 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
497 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
499 wake_up_all(&mhi_cntrl->state_event);
505 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
507 wake_up_all(&mhi_cntrl->state_event);
508 mhi_pm_sys_err_handler(mhi_cntrl);
511 wake_up_all(&mhi_cntrl->state_event);
512 mhi_pm_sys_err_handler(mhi_cntrl);
521 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
523 struct mhi_controller *mhi_cntrl = dev;
525 /* Wake up events waiting for state change */
526 wake_up_all(&mhi_cntrl->state_event);
528 return IRQ_WAKE_THREAD;
531 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
532 struct mhi_ring *ring)
537 ring->wp += ring->el_size;
538 ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size;
540 if (ring->wp >= (ring->base + ring->len)) {
541 ring->wp = ring->base;
542 ctxt_wp = ring->iommu_base;
545 *ring->ctxt_wp = cpu_to_le64(ctxt_wp);
548 ring->rp += ring->el_size;
549 if (ring->rp >= (ring->base + ring->len))
550 ring->rp = ring->base;
552 /* Update to all cores */
556 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
557 struct mhi_ring_element *event,
558 struct mhi_chan *mhi_chan)
560 struct mhi_ring *buf_ring, *tre_ring;
561 struct device *dev = &mhi_cntrl->mhi_dev->dev;
562 struct mhi_result result;
563 unsigned long flags = 0;
566 ev_code = MHI_TRE_GET_EV_CODE(event);
567 buf_ring = &mhi_chan->buf_ring;
568 tre_ring = &mhi_chan->tre_ring;
570 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
574 * If it's a DB Event then we need to grab the lock
575 * with preemption disabled and as a write because we
576 * have to update db register and there are chances that
577 * another thread could be doing the same.
579 if (ev_code >= MHI_EV_CC_OOB)
580 write_lock_irqsave(&mhi_chan->lock, flags);
582 read_lock_bh(&mhi_chan->lock);
584 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
585 goto end_process_tx_event;
588 case MHI_EV_CC_OVERFLOW:
592 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
593 struct mhi_ring_element *local_rp, *ev_tre;
595 struct mhi_buf_info *buf_info;
598 if (!is_valid_ring_ptr(tre_ring, ptr)) {
599 dev_err(&mhi_cntrl->mhi_dev->dev,
600 "Event element points outside of the tre ring\n");
603 /* Get the TRB this event points to */
604 ev_tre = mhi_to_virtual(tre_ring, ptr);
607 if (dev_rp >= (tre_ring->base + tre_ring->len))
608 dev_rp = tre_ring->base;
610 result.dir = mhi_chan->dir;
612 local_rp = tre_ring->rp;
613 while (local_rp != dev_rp) {
614 buf_info = buf_ring->rp;
615 /* If it's the last TRE, get length from the event */
616 if (local_rp == ev_tre)
617 xfer_len = MHI_TRE_GET_EV_LEN(event);
619 xfer_len = buf_info->len;
621 /* Unmap if it's not pre-mapped by client */
622 if (likely(!buf_info->pre_mapped))
623 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
625 result.buf_addr = buf_info->cb_buf;
627 /* truncate to buf len if xfer_len is larger */
629 min_t(u16, xfer_len, buf_info->len);
630 mhi_del_ring_element(mhi_cntrl, buf_ring);
631 mhi_del_ring_element(mhi_cntrl, tre_ring);
632 local_rp = tre_ring->rp;
635 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
637 if (mhi_chan->dir == DMA_TO_DEVICE) {
638 atomic_dec(&mhi_cntrl->pending_pkts);
639 /* Release the reference got from mhi_queue() */
640 mhi_cntrl->runtime_put(mhi_cntrl);
644 * Recycle the buffer if buffer is pre-allocated,
645 * if there is an error, not much we can do apart
646 * from dropping the packet
648 if (mhi_chan->pre_alloc) {
649 if (mhi_queue_buf(mhi_chan->mhi_dev,
652 buf_info->len, MHI_EOT)) {
654 "Error recycling buffer for chan:%d\n",
656 kfree(buf_info->cb_buf);
663 case MHI_EV_CC_DB_MODE:
665 unsigned long pm_lock_flags;
667 mhi_chan->db_cfg.db_mode = 1;
668 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
669 if (tre_ring->wp != tre_ring->rp &&
670 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
671 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
673 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
676 case MHI_EV_CC_BAD_TRE:
678 dev_err(dev, "Unknown event 0x%x\n", ev_code);
680 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
682 end_process_tx_event:
683 if (ev_code >= MHI_EV_CC_OOB)
684 write_unlock_irqrestore(&mhi_chan->lock, flags);
686 read_unlock_bh(&mhi_chan->lock);
691 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
692 struct mhi_ring_element *event,
693 struct mhi_chan *mhi_chan)
695 struct mhi_ring *buf_ring, *tre_ring;
696 struct mhi_buf_info *buf_info;
697 struct mhi_result result;
699 u32 cookie; /* offset to local descriptor */
702 buf_ring = &mhi_chan->buf_ring;
703 tre_ring = &mhi_chan->tre_ring;
705 ev_code = MHI_TRE_GET_EV_CODE(event);
706 cookie = MHI_TRE_GET_EV_COOKIE(event);
707 xfer_len = MHI_TRE_GET_EV_LEN(event);
709 /* Received out of bound cookie */
710 WARN_ON(cookie >= buf_ring->len);
712 buf_info = buf_ring->base + cookie;
714 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
717 /* truncate to buf len if xfer_len is larger */
718 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
719 result.buf_addr = buf_info->cb_buf;
720 result.dir = mhi_chan->dir;
722 read_lock_bh(&mhi_chan->lock);
724 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
725 goto end_process_rsc_event;
727 WARN_ON(!buf_info->used);
729 /* notify the client */
730 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
733 * Note: We're arbitrarily incrementing RP even though, completion
734 * packet we processed might not be the same one, reason we can do this
735 * is because device guaranteed to cache descriptors in order it
736 * receive, so even though completion event is different we can re-use
737 * all descriptors in between.
739 * Transfer Ring has descriptors: A, B, C, D
740 * Last descriptor host queue is D (WP) and first descriptor
741 * host queue is A (RP).
742 * The completion event we just serviced is descriptor C.
743 * Then we can safely queue descriptors to replace A, B, and C
744 * even though host did not receive any completions.
746 mhi_del_ring_element(mhi_cntrl, tre_ring);
747 buf_info->used = false;
749 end_process_rsc_event:
750 read_unlock_bh(&mhi_chan->lock);
755 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
756 struct mhi_ring_element *tre)
758 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
759 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
760 struct mhi_ring *mhi_ring = &cmd_ring->ring;
761 struct mhi_ring_element *cmd_pkt;
762 struct mhi_chan *mhi_chan;
765 if (!is_valid_ring_ptr(mhi_ring, ptr)) {
766 dev_err(&mhi_cntrl->mhi_dev->dev,
767 "Event element points outside of the cmd ring\n");
771 cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
773 chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
775 if (chan < mhi_cntrl->max_chan &&
776 mhi_cntrl->mhi_chan[chan].configured) {
777 mhi_chan = &mhi_cntrl->mhi_chan[chan];
778 write_lock_bh(&mhi_chan->lock);
779 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
780 complete(&mhi_chan->completion);
781 write_unlock_bh(&mhi_chan->lock);
783 dev_err(&mhi_cntrl->mhi_dev->dev,
784 "Completion packet for invalid channel ID: %d\n", chan);
787 mhi_del_ring_element(mhi_cntrl, mhi_ring);
790 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
791 struct mhi_event *mhi_event,
794 struct mhi_ring_element *dev_rp, *local_rp;
795 struct mhi_ring *ev_ring = &mhi_event->ring;
796 struct mhi_event_ctxt *er_ctxt =
797 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
798 struct mhi_chan *mhi_chan;
799 struct device *dev = &mhi_cntrl->mhi_dev->dev;
802 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
805 * This is a quick check to avoid unnecessary event processing
806 * in case MHI is already in error state, but it's still possible
807 * to transition to error state while processing events
809 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
812 if (!is_valid_ring_ptr(ev_ring, ptr)) {
813 dev_err(&mhi_cntrl->mhi_dev->dev,
814 "Event ring rp points outside of the event ring\n");
818 dev_rp = mhi_to_virtual(ev_ring, ptr);
819 local_rp = ev_ring->rp;
821 while (dev_rp != local_rp) {
822 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
825 case MHI_PKT_TYPE_BW_REQ_EVENT:
827 struct mhi_link_info *link_info;
829 link_info = &mhi_cntrl->mhi_link_info;
830 write_lock_irq(&mhi_cntrl->pm_lock);
831 link_info->target_link_speed =
832 MHI_TRE_GET_EV_LINKSPEED(local_rp);
833 link_info->target_link_width =
834 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
835 write_unlock_irq(&mhi_cntrl->pm_lock);
836 dev_dbg(dev, "Received BW_REQ event\n");
837 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
840 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
842 enum mhi_state new_state;
844 new_state = MHI_TRE_GET_EV_STATE(local_rp);
846 dev_dbg(dev, "State change event to state: %s\n",
847 TO_MHI_STATE_STR(new_state));
851 mhi_pm_m0_transition(mhi_cntrl);
854 mhi_pm_m1_transition(mhi_cntrl);
857 mhi_pm_m3_transition(mhi_cntrl);
859 case MHI_STATE_SYS_ERR:
861 enum mhi_pm_state pm_state;
863 dev_dbg(dev, "System error detected\n");
864 write_lock_irq(&mhi_cntrl->pm_lock);
865 pm_state = mhi_tryset_pm_state(mhi_cntrl,
866 MHI_PM_SYS_ERR_DETECT);
867 write_unlock_irq(&mhi_cntrl->pm_lock);
868 if (pm_state == MHI_PM_SYS_ERR_DETECT)
869 mhi_pm_sys_err_handler(mhi_cntrl);
873 dev_err(dev, "Invalid state: %s\n",
874 TO_MHI_STATE_STR(new_state));
879 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
880 mhi_process_cmd_completion(mhi_cntrl, local_rp);
882 case MHI_PKT_TYPE_EE_EVENT:
884 enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
885 enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
887 dev_dbg(dev, "Received EE event: %s\n",
888 TO_MHI_EXEC_STR(event));
891 st = DEV_ST_TRANSITION_SBL;
895 st = DEV_ST_TRANSITION_MISSION_MODE;
898 st = DEV_ST_TRANSITION_FP;
901 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
902 write_lock_irq(&mhi_cntrl->pm_lock);
903 mhi_cntrl->ee = event;
904 write_unlock_irq(&mhi_cntrl->pm_lock);
905 wake_up_all(&mhi_cntrl->state_event);
909 "Unhandled EE event: 0x%x\n", type);
911 if (st != DEV_ST_TRANSITION_MAX)
912 mhi_queue_state_transition(mhi_cntrl, st);
916 case MHI_PKT_TYPE_TX_EVENT:
917 chan = MHI_TRE_GET_EV_CHID(local_rp);
919 WARN_ON(chan >= mhi_cntrl->max_chan);
922 * Only process the event ring elements whose channel
923 * ID is within the maximum supported range.
925 if (chan < mhi_cntrl->max_chan) {
926 mhi_chan = &mhi_cntrl->mhi_chan[chan];
927 if (!mhi_chan->configured)
929 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
934 dev_err(dev, "Unhandled event type: %d\n", type);
938 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
939 local_rp = ev_ring->rp;
941 ptr = le64_to_cpu(er_ctxt->rp);
942 if (!is_valid_ring_ptr(ev_ring, ptr)) {
943 dev_err(&mhi_cntrl->mhi_dev->dev,
944 "Event ring rp points outside of the event ring\n");
948 dev_rp = mhi_to_virtual(ev_ring, ptr);
952 read_lock_bh(&mhi_cntrl->pm_lock);
953 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
954 mhi_ring_er_db(mhi_event);
955 read_unlock_bh(&mhi_cntrl->pm_lock);
960 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
961 struct mhi_event *mhi_event,
964 struct mhi_ring_element *dev_rp, *local_rp;
965 struct mhi_ring *ev_ring = &mhi_event->ring;
966 struct mhi_event_ctxt *er_ctxt =
967 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
970 struct mhi_chan *mhi_chan;
971 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
973 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
976 if (!is_valid_ring_ptr(ev_ring, ptr)) {
977 dev_err(&mhi_cntrl->mhi_dev->dev,
978 "Event ring rp points outside of the event ring\n");
982 dev_rp = mhi_to_virtual(ev_ring, ptr);
983 local_rp = ev_ring->rp;
985 while (dev_rp != local_rp && event_quota > 0) {
986 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
988 chan = MHI_TRE_GET_EV_CHID(local_rp);
990 WARN_ON(chan >= mhi_cntrl->max_chan);
993 * Only process the event ring elements whose channel
994 * ID is within the maximum supported range.
996 if (chan < mhi_cntrl->max_chan &&
997 mhi_cntrl->mhi_chan[chan].configured) {
998 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1000 if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
1001 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
1003 } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
1004 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
1009 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
1010 local_rp = ev_ring->rp;
1012 ptr = le64_to_cpu(er_ctxt->rp);
1013 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1014 dev_err(&mhi_cntrl->mhi_dev->dev,
1015 "Event ring rp points outside of the event ring\n");
1019 dev_rp = mhi_to_virtual(ev_ring, ptr);
1022 read_lock_bh(&mhi_cntrl->pm_lock);
1023 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1024 mhi_ring_er_db(mhi_event);
1025 read_unlock_bh(&mhi_cntrl->pm_lock);
1030 void mhi_ev_task(unsigned long data)
1032 struct mhi_event *mhi_event = (struct mhi_event *)data;
1033 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1035 /* process all pending events */
1036 spin_lock_bh(&mhi_event->lock);
1037 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1038 spin_unlock_bh(&mhi_event->lock);
1041 void mhi_ctrl_ev_task(unsigned long data)
1043 struct mhi_event *mhi_event = (struct mhi_event *)data;
1044 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1045 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1046 enum mhi_state state;
1047 enum mhi_pm_state pm_state = 0;
1051 * We can check PM state w/o a lock here because there is no way
1052 * PM state can change from reg access valid to no access while this
1053 * thread being executed.
1055 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
1057 * We may have a pending event but not allowed to
1058 * process it since we are probably in a suspended state,
1059 * so trigger a resume.
1061 mhi_trigger_resume(mhi_cntrl);
1066 /* Process ctrl events */
1067 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1070 * We received an IRQ but no events to process, maybe device went to
1071 * SYS_ERR state? Check the state to confirm.
1074 write_lock_irq(&mhi_cntrl->pm_lock);
1075 state = mhi_get_mhi_state(mhi_cntrl);
1076 if (state == MHI_STATE_SYS_ERR) {
1077 dev_dbg(dev, "System error detected\n");
1078 pm_state = mhi_tryset_pm_state(mhi_cntrl,
1079 MHI_PM_SYS_ERR_DETECT);
1081 write_unlock_irq(&mhi_cntrl->pm_lock);
1082 if (pm_state == MHI_PM_SYS_ERR_DETECT)
1083 mhi_pm_sys_err_handler(mhi_cntrl);
1087 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1088 struct mhi_ring *ring)
1090 void *tmp = ring->wp + ring->el_size;
1092 if (tmp >= (ring->base + ring->len))
1095 return (tmp == ring->rp);
1098 static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
1099 enum dma_data_direction dir, enum mhi_flags mflags)
1101 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1102 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1104 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1105 unsigned long flags;
1108 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1111 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1113 ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1114 if (unlikely(ret)) {
1119 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1123 /* Packet is queued, take a usage ref to exit M3 if necessary
1124 * for host->device buffer, balanced put is done on buffer completion
1125 * for device->host buffer, balanced put is after ringing the DB
1127 mhi_cntrl->runtime_get(mhi_cntrl);
1129 /* Assert dev_wake (to exit/prevent M1/M2)*/
1130 mhi_cntrl->wake_toggle(mhi_cntrl);
1132 if (mhi_chan->dir == DMA_TO_DEVICE)
1133 atomic_inc(&mhi_cntrl->pending_pkts);
1135 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1136 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1138 if (dir == DMA_FROM_DEVICE)
1139 mhi_cntrl->runtime_put(mhi_cntrl);
1142 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1147 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1148 struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1150 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1152 struct mhi_buf_info buf_info = { };
1154 buf_info.v_addr = skb->data;
1155 buf_info.cb_buf = skb;
1158 if (unlikely(mhi_chan->pre_alloc))
1161 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1163 EXPORT_SYMBOL_GPL(mhi_queue_skb);
1165 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1166 struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1168 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1170 struct mhi_buf_info buf_info = { };
1172 buf_info.p_addr = mhi_buf->dma_addr;
1173 buf_info.cb_buf = mhi_buf;
1174 buf_info.pre_mapped = true;
1177 if (unlikely(mhi_chan->pre_alloc))
1180 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1182 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1184 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1185 struct mhi_buf_info *info, enum mhi_flags flags)
1187 struct mhi_ring *buf_ring, *tre_ring;
1188 struct mhi_ring_element *mhi_tre;
1189 struct mhi_buf_info *buf_info;
1190 int eot, eob, chain, bei;
1193 buf_ring = &mhi_chan->buf_ring;
1194 tre_ring = &mhi_chan->tre_ring;
1196 buf_info = buf_ring->wp;
1197 WARN_ON(buf_info->used);
1198 buf_info->pre_mapped = info->pre_mapped;
1199 if (info->pre_mapped)
1200 buf_info->p_addr = info->p_addr;
1202 buf_info->v_addr = info->v_addr;
1203 buf_info->cb_buf = info->cb_buf;
1204 buf_info->wp = tre_ring->wp;
1205 buf_info->dir = mhi_chan->dir;
1206 buf_info->len = info->len;
1208 if (!info->pre_mapped) {
1209 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1214 eob = !!(flags & MHI_EOB);
1215 eot = !!(flags & MHI_EOT);
1216 chain = !!(flags & MHI_CHAIN);
1217 bei = !!(mhi_chan->intmod);
1219 mhi_tre = tre_ring->wp;
1220 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1221 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1222 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1225 mhi_add_ring_element(mhi_cntrl, tre_ring);
1226 mhi_add_ring_element(mhi_cntrl, buf_ring);
1231 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1232 void *buf, size_t len, enum mhi_flags mflags)
1234 struct mhi_buf_info buf_info = { };
1236 buf_info.v_addr = buf;
1237 buf_info.cb_buf = buf;
1240 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1242 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1244 bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
1246 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1247 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1248 mhi_dev->ul_chan : mhi_dev->dl_chan;
1249 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1251 return mhi_is_ring_full(mhi_cntrl, tre_ring);
1253 EXPORT_SYMBOL_GPL(mhi_queue_is_full);
1255 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1256 struct mhi_chan *mhi_chan,
1257 enum mhi_cmd_type cmd)
1259 struct mhi_ring_element *cmd_tre = NULL;
1260 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1261 struct mhi_ring *ring = &mhi_cmd->ring;
1262 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1266 chan = mhi_chan->chan;
1268 spin_lock_bh(&mhi_cmd->lock);
1269 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1270 spin_unlock_bh(&mhi_cmd->lock);
1274 /* prepare the cmd tre */
1277 case MHI_CMD_RESET_CHAN:
1278 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1279 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1280 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1282 case MHI_CMD_STOP_CHAN:
1283 cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
1284 cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
1285 cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
1287 case MHI_CMD_START_CHAN:
1288 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1289 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1290 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1293 dev_err(dev, "Command not supported\n");
1297 /* queue to hardware */
1298 mhi_add_ring_element(mhi_cntrl, ring);
1299 read_lock_bh(&mhi_cntrl->pm_lock);
1300 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1301 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1302 read_unlock_bh(&mhi_cntrl->pm_lock);
1303 spin_unlock_bh(&mhi_cmd->lock);
1308 static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
1309 struct mhi_chan *mhi_chan,
1310 enum mhi_ch_state_type to_state)
1312 struct device *dev = &mhi_chan->mhi_dev->dev;
1313 enum mhi_cmd_type cmd = MHI_CMD_NOP;
1316 dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
1317 TO_CH_STATE_TYPE_STR(to_state));
1320 case MHI_CH_STATE_TYPE_RESET:
1321 write_lock_irq(&mhi_chan->lock);
1322 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1323 mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1324 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1325 write_unlock_irq(&mhi_chan->lock);
1328 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1329 write_unlock_irq(&mhi_chan->lock);
1331 cmd = MHI_CMD_RESET_CHAN;
1333 case MHI_CH_STATE_TYPE_STOP:
1334 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
1337 cmd = MHI_CMD_STOP_CHAN;
1339 case MHI_CH_STATE_TYPE_START:
1340 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1341 mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
1344 cmd = MHI_CMD_START_CHAN;
1347 dev_err(dev, "%d: Channel state update to %s not allowed\n",
1348 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1352 /* bring host and device out of suspended states */
1353 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
1356 mhi_cntrl->runtime_get(mhi_cntrl);
1358 reinit_completion(&mhi_chan->completion);
1359 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
1361 dev_err(dev, "%d: Failed to send %s channel command\n",
1362 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1363 goto exit_channel_update;
1366 ret = wait_for_completion_timeout(&mhi_chan->completion,
1367 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1368 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1370 "%d: Failed to receive %s channel command completion\n",
1371 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1373 goto exit_channel_update;
1378 if (to_state != MHI_CH_STATE_TYPE_RESET) {
1379 write_lock_irq(&mhi_chan->lock);
1380 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
1381 MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
1382 write_unlock_irq(&mhi_chan->lock);
1385 dev_dbg(dev, "%d: Channel state change to %s successful\n",
1386 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1388 exit_channel_update:
1389 mhi_cntrl->runtime_put(mhi_cntrl);
1390 mhi_device_put(mhi_cntrl->mhi_dev);
1395 static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1396 struct mhi_chan *mhi_chan)
1399 struct device *dev = &mhi_chan->mhi_dev->dev;
1401 mutex_lock(&mhi_chan->mutex);
1403 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1404 dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1405 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1406 goto exit_unprepare_channel;
1409 /* no more processing events for this channel */
1410 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1411 MHI_CH_STATE_TYPE_RESET);
1413 dev_err(dev, "%d: Failed to reset channel, still resetting\n",
1416 exit_unprepare_channel:
1417 write_lock_irq(&mhi_chan->lock);
1418 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1419 write_unlock_irq(&mhi_chan->lock);
1421 if (!mhi_chan->offload_ch) {
1422 mhi_reset_chan(mhi_cntrl, mhi_chan);
1423 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1425 dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
1427 mutex_unlock(&mhi_chan->mutex);
1430 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1431 struct mhi_chan *mhi_chan, unsigned int flags)
1434 struct device *dev = &mhi_chan->mhi_dev->dev;
1436 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1437 dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1438 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1442 mutex_lock(&mhi_chan->mutex);
1444 /* Check of client manages channel context for offload channels */
1445 if (!mhi_chan->offload_ch) {
1446 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1448 goto error_init_chan;
1451 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1452 MHI_CH_STATE_TYPE_START);
1454 goto error_pm_state;
1456 if (mhi_chan->dir == DMA_FROM_DEVICE)
1457 mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
1459 /* Pre-allocate buffer for xfer ring */
1460 if (mhi_chan->pre_alloc) {
1461 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1462 &mhi_chan->tre_ring);
1463 size_t len = mhi_cntrl->buffer_len;
1467 struct mhi_buf_info info = { };
1469 buf = kmalloc(len, GFP_KERNEL);
1472 goto error_pre_alloc;
1475 /* Prepare transfer descriptors */
1479 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1482 goto error_pre_alloc;
1486 read_lock_bh(&mhi_cntrl->pm_lock);
1487 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1488 read_lock_irq(&mhi_chan->lock);
1489 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1490 read_unlock_irq(&mhi_chan->lock);
1492 read_unlock_bh(&mhi_cntrl->pm_lock);
1495 mutex_unlock(&mhi_chan->mutex);
1500 if (!mhi_chan->offload_ch)
1501 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1504 mutex_unlock(&mhi_chan->mutex);
1509 mutex_unlock(&mhi_chan->mutex);
1510 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1515 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1516 struct mhi_event *mhi_event,
1517 struct mhi_event_ctxt *er_ctxt,
1521 struct mhi_ring_element *dev_rp, *local_rp;
1522 struct mhi_ring *ev_ring;
1523 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1524 unsigned long flags;
1527 dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1529 ev_ring = &mhi_event->ring;
1531 /* mark all stale events related to channel as STALE event */
1532 spin_lock_irqsave(&mhi_event->lock, flags);
1534 ptr = le64_to_cpu(er_ctxt->rp);
1535 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1536 dev_err(&mhi_cntrl->mhi_dev->dev,
1537 "Event ring rp points outside of the event ring\n");
1538 dev_rp = ev_ring->rp;
1540 dev_rp = mhi_to_virtual(ev_ring, ptr);
1543 local_rp = ev_ring->rp;
1544 while (dev_rp != local_rp) {
1545 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1546 chan == MHI_TRE_GET_EV_CHID(local_rp))
1547 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1548 MHI_PKT_TYPE_STALE_EVENT);
1550 if (local_rp == (ev_ring->base + ev_ring->len))
1551 local_rp = ev_ring->base;
1554 dev_dbg(dev, "Finished marking events as stale events\n");
1555 spin_unlock_irqrestore(&mhi_event->lock, flags);
1558 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1559 struct mhi_chan *mhi_chan)
1561 struct mhi_ring *buf_ring, *tre_ring;
1562 struct mhi_result result;
1564 /* Reset any pending buffers */
1565 buf_ring = &mhi_chan->buf_ring;
1566 tre_ring = &mhi_chan->tre_ring;
1567 result.transaction_status = -ENOTCONN;
1568 result.bytes_xferd = 0;
1569 while (tre_ring->rp != tre_ring->wp) {
1570 struct mhi_buf_info *buf_info = buf_ring->rp;
1572 if (mhi_chan->dir == DMA_TO_DEVICE) {
1573 atomic_dec(&mhi_cntrl->pending_pkts);
1574 /* Release the reference got from mhi_queue() */
1575 mhi_cntrl->runtime_put(mhi_cntrl);
1578 if (!buf_info->pre_mapped)
1579 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1581 mhi_del_ring_element(mhi_cntrl, buf_ring);
1582 mhi_del_ring_element(mhi_cntrl, tre_ring);
1584 if (mhi_chan->pre_alloc) {
1585 kfree(buf_info->cb_buf);
1587 result.buf_addr = buf_info->cb_buf;
1588 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1593 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1595 struct mhi_event *mhi_event;
1596 struct mhi_event_ctxt *er_ctxt;
1597 int chan = mhi_chan->chan;
1599 /* Nothing to reset, client doesn't queue buffers */
1600 if (mhi_chan->offload_ch)
1603 read_lock_bh(&mhi_cntrl->pm_lock);
1604 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1605 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1607 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1609 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1611 read_unlock_bh(&mhi_cntrl->pm_lock);
1614 static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
1617 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1618 struct mhi_chan *mhi_chan;
1620 for (dir = 0; dir < 2; dir++) {
1621 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1625 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
1627 goto error_open_chan;
1633 for (--dir; dir >= 0; dir--) {
1634 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1638 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1644 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1646 return __mhi_prepare_for_transfer(mhi_dev, 0);
1648 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1650 int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
1652 return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
1654 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
1656 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1658 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1659 struct mhi_chan *mhi_chan;
1662 for (dir = 0; dir < 2; dir++) {
1663 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1667 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1670 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1672 int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1674 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1675 struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1676 struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1679 spin_lock_bh(&mhi_event->lock);
1680 ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1681 spin_unlock_bh(&mhi_event->lock);
1685 EXPORT_SYMBOL_GPL(mhi_poll);