bus: mhi: host: Rename "struct mhi_tre" to "struct mhi_ring_element"
[sfrench/cifs-2.6.git] / drivers / bus / mhi / host / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
17 #include "internal.h"
18
19 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
20                               void __iomem *base, u32 offset, u32 *out)
21 {
22         return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
23 }
24
25 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
26                                     void __iomem *base, u32 offset,
27                                     u32 mask, u32 *out)
28 {
29         u32 tmp;
30         int ret;
31
32         ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
33         if (ret)
34                 return ret;
35
36         *out = (tmp & mask) >> __ffs(mask);
37
38         return 0;
39 }
40
41 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
42                                     void __iomem *base, u32 offset,
43                                     u32 mask, u32 val, u32 delayus)
44 {
45         int ret;
46         u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
47
48         while (retry--) {
49                 ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
50                 if (ret)
51                         return ret;
52
53                 if (out == val)
54                         return 0;
55
56                 fsleep(delayus);
57         }
58
59         return -ETIMEDOUT;
60 }
61
62 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
63                    u32 offset, u32 val)
64 {
65         mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
66 }
67
68 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
69                          u32 offset, u32 mask, u32 val)
70 {
71         int ret;
72         u32 tmp;
73
74         ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
75         if (ret)
76                 return;
77
78         tmp &= ~mask;
79         tmp |= (val << __ffs(mask));
80         mhi_write_reg(mhi_cntrl, base, offset, tmp);
81 }
82
83 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
84                   dma_addr_t db_val)
85 {
86         mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
87         mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
88 }
89
90 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
91                      struct db_cfg *db_cfg,
92                      void __iomem *db_addr,
93                      dma_addr_t db_val)
94 {
95         if (db_cfg->db_mode) {
96                 db_cfg->db_val = db_val;
97                 mhi_write_db(mhi_cntrl, db_addr, db_val);
98                 db_cfg->db_mode = 0;
99         }
100 }
101
102 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
103                              struct db_cfg *db_cfg,
104                              void __iomem *db_addr,
105                              dma_addr_t db_val)
106 {
107         db_cfg->db_val = db_val;
108         mhi_write_db(mhi_cntrl, db_addr, db_val);
109 }
110
111 void mhi_ring_er_db(struct mhi_event *mhi_event)
112 {
113         struct mhi_ring *ring = &mhi_event->ring;
114
115         mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
116                                      ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
117 }
118
119 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
120 {
121         dma_addr_t db;
122         struct mhi_ring *ring = &mhi_cmd->ring;
123
124         db = ring->iommu_base + (ring->wp - ring->base);
125         *ring->ctxt_wp = cpu_to_le64(db);
126         mhi_write_db(mhi_cntrl, ring->db_addr, db);
127 }
128
129 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
130                       struct mhi_chan *mhi_chan)
131 {
132         struct mhi_ring *ring = &mhi_chan->tre_ring;
133         dma_addr_t db;
134
135         db = ring->iommu_base + (ring->wp - ring->base);
136
137         /*
138          * Writes to the new ring element must be visible to the hardware
139          * before letting h/w know there is new element to fetch.
140          */
141         dma_wmb();
142         *ring->ctxt_wp = cpu_to_le64(db);
143
144         mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
145                                     ring->db_addr, db);
146 }
147
148 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
149 {
150         u32 exec;
151         int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
152
153         return (ret) ? MHI_EE_MAX : exec;
154 }
155 EXPORT_SYMBOL_GPL(mhi_get_exec_env);
156
157 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
158 {
159         u32 state;
160         int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
161                                      MHISTATUS_MHISTATE_MASK, &state);
162         return ret ? MHI_STATE_MAX : state;
163 }
164 EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
165
166 void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
167 {
168         if (mhi_cntrl->reset) {
169                 mhi_cntrl->reset(mhi_cntrl);
170                 return;
171         }
172
173         /* Generic MHI SoC reset */
174         mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
175                       MHI_SOC_RESET_REQ);
176 }
177 EXPORT_SYMBOL_GPL(mhi_soc_reset);
178
179 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
180                          struct mhi_buf_info *buf_info)
181 {
182         buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
183                                           buf_info->v_addr, buf_info->len,
184                                           buf_info->dir);
185         if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
186                 return -ENOMEM;
187
188         return 0;
189 }
190
191 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
192                           struct mhi_buf_info *buf_info)
193 {
194         void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
195                                        &buf_info->p_addr, GFP_ATOMIC);
196
197         if (!buf)
198                 return -ENOMEM;
199
200         if (buf_info->dir == DMA_TO_DEVICE)
201                 memcpy(buf, buf_info->v_addr, buf_info->len);
202
203         buf_info->bb_addr = buf;
204
205         return 0;
206 }
207
208 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
209                             struct mhi_buf_info *buf_info)
210 {
211         dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
212                          buf_info->dir);
213 }
214
215 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
216                              struct mhi_buf_info *buf_info)
217 {
218         if (buf_info->dir == DMA_FROM_DEVICE)
219                 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
220
221         dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
222                           buf_info->bb_addr, buf_info->p_addr);
223 }
224
225 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
226                                       struct mhi_ring *ring)
227 {
228         int nr_el;
229
230         if (ring->wp < ring->rp) {
231                 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
232         } else {
233                 nr_el = (ring->rp - ring->base) / ring->el_size;
234                 nr_el += ((ring->base + ring->len - ring->wp) /
235                           ring->el_size) - 1;
236         }
237
238         return nr_el;
239 }
240
241 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
242 {
243         return (addr - ring->iommu_base) + ring->base;
244 }
245
246 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
247                                  struct mhi_ring *ring)
248 {
249         ring->wp += ring->el_size;
250         if (ring->wp >= (ring->base + ring->len))
251                 ring->wp = ring->base;
252         /* smp update */
253         smp_wmb();
254 }
255
256 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
257                                  struct mhi_ring *ring)
258 {
259         ring->rp += ring->el_size;
260         if (ring->rp >= (ring->base + ring->len))
261                 ring->rp = ring->base;
262         /* smp update */
263         smp_wmb();
264 }
265
266 static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
267 {
268         return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
269 }
270
271 int mhi_destroy_device(struct device *dev, void *data)
272 {
273         struct mhi_chan *ul_chan, *dl_chan;
274         struct mhi_device *mhi_dev;
275         struct mhi_controller *mhi_cntrl;
276         enum mhi_ee_type ee = MHI_EE_MAX;
277
278         if (dev->bus != &mhi_bus_type)
279                 return 0;
280
281         mhi_dev = to_mhi_device(dev);
282         mhi_cntrl = mhi_dev->mhi_cntrl;
283
284         /* Only destroy virtual devices thats attached to bus */
285         if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
286                 return 0;
287
288         ul_chan = mhi_dev->ul_chan;
289         dl_chan = mhi_dev->dl_chan;
290
291         /*
292          * If execution environment is specified, remove only those devices that
293          * started in them based on ee_mask for the channels as we move on to a
294          * different execution environment
295          */
296         if (data)
297                 ee = *(enum mhi_ee_type *)data;
298
299         /*
300          * For the suspend and resume case, this function will get called
301          * without mhi_unregister_controller(). Hence, we need to drop the
302          * references to mhi_dev created for ul and dl channels. We can
303          * be sure that there will be no instances of mhi_dev left after
304          * this.
305          */
306         if (ul_chan) {
307                 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
308                         return 0;
309
310                 put_device(&ul_chan->mhi_dev->dev);
311         }
312
313         if (dl_chan) {
314                 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
315                         return 0;
316
317                 put_device(&dl_chan->mhi_dev->dev);
318         }
319
320         dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
321                  mhi_dev->name);
322
323         /* Notify the client and remove the device from MHI bus */
324         device_del(dev);
325         put_device(dev);
326
327         return 0;
328 }
329
330 int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
331                                 enum dma_data_direction dir)
332 {
333         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
334         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
335                 mhi_dev->ul_chan : mhi_dev->dl_chan;
336         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
337
338         return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
339 }
340 EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
341
342 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
343 {
344         struct mhi_driver *mhi_drv;
345
346         if (!mhi_dev->dev.driver)
347                 return;
348
349         mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
350
351         if (mhi_drv->status_cb)
352                 mhi_drv->status_cb(mhi_dev, cb_reason);
353 }
354 EXPORT_SYMBOL_GPL(mhi_notify);
355
356 /* Bind MHI channels to MHI devices */
357 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
358 {
359         struct mhi_chan *mhi_chan;
360         struct mhi_device *mhi_dev;
361         struct device *dev = &mhi_cntrl->mhi_dev->dev;
362         int i, ret;
363
364         mhi_chan = mhi_cntrl->mhi_chan;
365         for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
366                 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
367                     !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
368                         continue;
369                 mhi_dev = mhi_alloc_device(mhi_cntrl);
370                 if (IS_ERR(mhi_dev))
371                         return;
372
373                 mhi_dev->dev_type = MHI_DEVICE_XFER;
374                 switch (mhi_chan->dir) {
375                 case DMA_TO_DEVICE:
376                         mhi_dev->ul_chan = mhi_chan;
377                         mhi_dev->ul_chan_id = mhi_chan->chan;
378                         break;
379                 case DMA_FROM_DEVICE:
380                         /* We use dl_chan as offload channels */
381                         mhi_dev->dl_chan = mhi_chan;
382                         mhi_dev->dl_chan_id = mhi_chan->chan;
383                         break;
384                 default:
385                         dev_err(dev, "Direction not supported\n");
386                         put_device(&mhi_dev->dev);
387                         return;
388                 }
389
390                 get_device(&mhi_dev->dev);
391                 mhi_chan->mhi_dev = mhi_dev;
392
393                 /* Check next channel if it matches */
394                 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
395                         if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
396                                 i++;
397                                 mhi_chan++;
398                                 if (mhi_chan->dir == DMA_TO_DEVICE) {
399                                         mhi_dev->ul_chan = mhi_chan;
400                                         mhi_dev->ul_chan_id = mhi_chan->chan;
401                                 } else {
402                                         mhi_dev->dl_chan = mhi_chan;
403                                         mhi_dev->dl_chan_id = mhi_chan->chan;
404                                 }
405                                 get_device(&mhi_dev->dev);
406                                 mhi_chan->mhi_dev = mhi_dev;
407                         }
408                 }
409
410                 /* Channel name is same for both UL and DL */
411                 mhi_dev->name = mhi_chan->name;
412                 dev_set_name(&mhi_dev->dev, "%s_%s",
413                              dev_name(&mhi_cntrl->mhi_dev->dev),
414                              mhi_dev->name);
415
416                 /* Init wakeup source if available */
417                 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
418                         device_init_wakeup(&mhi_dev->dev, true);
419
420                 ret = device_add(&mhi_dev->dev);
421                 if (ret)
422                         put_device(&mhi_dev->dev);
423         }
424 }
425
426 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
427 {
428         struct mhi_event *mhi_event = dev;
429         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
430         struct mhi_event_ctxt *er_ctxt =
431                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
432         struct mhi_ring *ev_ring = &mhi_event->ring;
433         dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
434         void *dev_rp;
435
436         if (!is_valid_ring_ptr(ev_ring, ptr)) {
437                 dev_err(&mhi_cntrl->mhi_dev->dev,
438                         "Event ring rp points outside of the event ring\n");
439                 return IRQ_HANDLED;
440         }
441
442         dev_rp = mhi_to_virtual(ev_ring, ptr);
443
444         /* Only proceed if event ring has pending events */
445         if (ev_ring->rp == dev_rp)
446                 return IRQ_HANDLED;
447
448         /* For client managed event ring, notify pending data */
449         if (mhi_event->cl_manage) {
450                 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
451                 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
452
453                 if (mhi_dev)
454                         mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
455         } else {
456                 tasklet_schedule(&mhi_event->task);
457         }
458
459         return IRQ_HANDLED;
460 }
461
462 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
463 {
464         struct mhi_controller *mhi_cntrl = priv;
465         struct device *dev = &mhi_cntrl->mhi_dev->dev;
466         enum mhi_state state;
467         enum mhi_pm_state pm_state = 0;
468         enum mhi_ee_type ee;
469
470         write_lock_irq(&mhi_cntrl->pm_lock);
471         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
472                 write_unlock_irq(&mhi_cntrl->pm_lock);
473                 goto exit_intvec;
474         }
475
476         state = mhi_get_mhi_state(mhi_cntrl);
477         ee = mhi_get_exec_env(mhi_cntrl);
478         dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
479                 TO_MHI_EXEC_STR(mhi_cntrl->ee),
480                 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
481                 TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state));
482
483         if (state == MHI_STATE_SYS_ERR) {
484                 dev_dbg(dev, "System error detected\n");
485                 pm_state = mhi_tryset_pm_state(mhi_cntrl,
486                                                MHI_PM_SYS_ERR_DETECT);
487         }
488         write_unlock_irq(&mhi_cntrl->pm_lock);
489
490         if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
491                 goto exit_intvec;
492
493         switch (ee) {
494         case MHI_EE_RDDM:
495                 /* proceed if power down is not already in progress */
496                 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
497                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
498                         mhi_cntrl->ee = ee;
499                         wake_up_all(&mhi_cntrl->state_event);
500                 }
501                 break;
502         case MHI_EE_PBL:
503         case MHI_EE_EDL:
504         case MHI_EE_PTHRU:
505                 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
506                 mhi_cntrl->ee = ee;
507                 wake_up_all(&mhi_cntrl->state_event);
508                 mhi_pm_sys_err_handler(mhi_cntrl);
509                 break;
510         default:
511                 wake_up_all(&mhi_cntrl->state_event);
512                 mhi_pm_sys_err_handler(mhi_cntrl);
513                 break;
514         }
515
516 exit_intvec:
517
518         return IRQ_HANDLED;
519 }
520
521 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
522 {
523         struct mhi_controller *mhi_cntrl = dev;
524
525         /* Wake up events waiting for state change */
526         wake_up_all(&mhi_cntrl->state_event);
527
528         return IRQ_WAKE_THREAD;
529 }
530
531 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
532                                         struct mhi_ring *ring)
533 {
534         dma_addr_t ctxt_wp;
535
536         /* Update the WP */
537         ring->wp += ring->el_size;
538         ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size;
539
540         if (ring->wp >= (ring->base + ring->len)) {
541                 ring->wp = ring->base;
542                 ctxt_wp = ring->iommu_base;
543         }
544
545         *ring->ctxt_wp = cpu_to_le64(ctxt_wp);
546
547         /* Update the RP */
548         ring->rp += ring->el_size;
549         if (ring->rp >= (ring->base + ring->len))
550                 ring->rp = ring->base;
551
552         /* Update to all cores */
553         smp_wmb();
554 }
555
556 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
557                             struct mhi_ring_element *event,
558                             struct mhi_chan *mhi_chan)
559 {
560         struct mhi_ring *buf_ring, *tre_ring;
561         struct device *dev = &mhi_cntrl->mhi_dev->dev;
562         struct mhi_result result;
563         unsigned long flags = 0;
564         u32 ev_code;
565
566         ev_code = MHI_TRE_GET_EV_CODE(event);
567         buf_ring = &mhi_chan->buf_ring;
568         tre_ring = &mhi_chan->tre_ring;
569
570         result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
571                 -EOVERFLOW : 0;
572
573         /*
574          * If it's a DB Event then we need to grab the lock
575          * with preemption disabled and as a write because we
576          * have to update db register and there are chances that
577          * another thread could be doing the same.
578          */
579         if (ev_code >= MHI_EV_CC_OOB)
580                 write_lock_irqsave(&mhi_chan->lock, flags);
581         else
582                 read_lock_bh(&mhi_chan->lock);
583
584         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
585                 goto end_process_tx_event;
586
587         switch (ev_code) {
588         case MHI_EV_CC_OVERFLOW:
589         case MHI_EV_CC_EOB:
590         case MHI_EV_CC_EOT:
591         {
592                 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
593                 struct mhi_ring_element *local_rp, *ev_tre;
594                 void *dev_rp;
595                 struct mhi_buf_info *buf_info;
596                 u16 xfer_len;
597
598                 if (!is_valid_ring_ptr(tre_ring, ptr)) {
599                         dev_err(&mhi_cntrl->mhi_dev->dev,
600                                 "Event element points outside of the tre ring\n");
601                         break;
602                 }
603                 /* Get the TRB this event points to */
604                 ev_tre = mhi_to_virtual(tre_ring, ptr);
605
606                 dev_rp = ev_tre + 1;
607                 if (dev_rp >= (tre_ring->base + tre_ring->len))
608                         dev_rp = tre_ring->base;
609
610                 result.dir = mhi_chan->dir;
611
612                 local_rp = tre_ring->rp;
613                 while (local_rp != dev_rp) {
614                         buf_info = buf_ring->rp;
615                         /* If it's the last TRE, get length from the event */
616                         if (local_rp == ev_tre)
617                                 xfer_len = MHI_TRE_GET_EV_LEN(event);
618                         else
619                                 xfer_len = buf_info->len;
620
621                         /* Unmap if it's not pre-mapped by client */
622                         if (likely(!buf_info->pre_mapped))
623                                 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
624
625                         result.buf_addr = buf_info->cb_buf;
626
627                         /* truncate to buf len if xfer_len is larger */
628                         result.bytes_xferd =
629                                 min_t(u16, xfer_len, buf_info->len);
630                         mhi_del_ring_element(mhi_cntrl, buf_ring);
631                         mhi_del_ring_element(mhi_cntrl, tre_ring);
632                         local_rp = tre_ring->rp;
633
634                         /* notify client */
635                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
636
637                         if (mhi_chan->dir == DMA_TO_DEVICE) {
638                                 atomic_dec(&mhi_cntrl->pending_pkts);
639                                 /* Release the reference got from mhi_queue() */
640                                 mhi_cntrl->runtime_put(mhi_cntrl);
641                         }
642
643                         /*
644                          * Recycle the buffer if buffer is pre-allocated,
645                          * if there is an error, not much we can do apart
646                          * from dropping the packet
647                          */
648                         if (mhi_chan->pre_alloc) {
649                                 if (mhi_queue_buf(mhi_chan->mhi_dev,
650                                                   mhi_chan->dir,
651                                                   buf_info->cb_buf,
652                                                   buf_info->len, MHI_EOT)) {
653                                         dev_err(dev,
654                                                 "Error recycling buffer for chan:%d\n",
655                                                 mhi_chan->chan);
656                                         kfree(buf_info->cb_buf);
657                                 }
658                         }
659                 }
660                 break;
661         } /* CC_EOT */
662         case MHI_EV_CC_OOB:
663         case MHI_EV_CC_DB_MODE:
664         {
665                 unsigned long pm_lock_flags;
666
667                 mhi_chan->db_cfg.db_mode = 1;
668                 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
669                 if (tre_ring->wp != tre_ring->rp &&
670                     MHI_DB_ACCESS_VALID(mhi_cntrl)) {
671                         mhi_ring_chan_db(mhi_cntrl, mhi_chan);
672                 }
673                 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
674                 break;
675         }
676         case MHI_EV_CC_BAD_TRE:
677         default:
678                 dev_err(dev, "Unknown event 0x%x\n", ev_code);
679                 break;
680         } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
681
682 end_process_tx_event:
683         if (ev_code >= MHI_EV_CC_OOB)
684                 write_unlock_irqrestore(&mhi_chan->lock, flags);
685         else
686                 read_unlock_bh(&mhi_chan->lock);
687
688         return 0;
689 }
690
691 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
692                            struct mhi_ring_element *event,
693                            struct mhi_chan *mhi_chan)
694 {
695         struct mhi_ring *buf_ring, *tre_ring;
696         struct mhi_buf_info *buf_info;
697         struct mhi_result result;
698         int ev_code;
699         u32 cookie; /* offset to local descriptor */
700         u16 xfer_len;
701
702         buf_ring = &mhi_chan->buf_ring;
703         tre_ring = &mhi_chan->tre_ring;
704
705         ev_code = MHI_TRE_GET_EV_CODE(event);
706         cookie = MHI_TRE_GET_EV_COOKIE(event);
707         xfer_len = MHI_TRE_GET_EV_LEN(event);
708
709         /* Received out of bound cookie */
710         WARN_ON(cookie >= buf_ring->len);
711
712         buf_info = buf_ring->base + cookie;
713
714         result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
715                 -EOVERFLOW : 0;
716
717         /* truncate to buf len if xfer_len is larger */
718         result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
719         result.buf_addr = buf_info->cb_buf;
720         result.dir = mhi_chan->dir;
721
722         read_lock_bh(&mhi_chan->lock);
723
724         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
725                 goto end_process_rsc_event;
726
727         WARN_ON(!buf_info->used);
728
729         /* notify the client */
730         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
731
732         /*
733          * Note: We're arbitrarily incrementing RP even though, completion
734          * packet we processed might not be the same one, reason we can do this
735          * is because device guaranteed to cache descriptors in order it
736          * receive, so even though completion event is different we can re-use
737          * all descriptors in between.
738          * Example:
739          * Transfer Ring has descriptors: A, B, C, D
740          * Last descriptor host queue is D (WP) and first descriptor
741          * host queue is A (RP).
742          * The completion event we just serviced is descriptor C.
743          * Then we can safely queue descriptors to replace A, B, and C
744          * even though host did not receive any completions.
745          */
746         mhi_del_ring_element(mhi_cntrl, tre_ring);
747         buf_info->used = false;
748
749 end_process_rsc_event:
750         read_unlock_bh(&mhi_chan->lock);
751
752         return 0;
753 }
754
755 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
756                                        struct mhi_ring_element *tre)
757 {
758         dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
759         struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
760         struct mhi_ring *mhi_ring = &cmd_ring->ring;
761         struct mhi_ring_element *cmd_pkt;
762         struct mhi_chan *mhi_chan;
763         u32 chan;
764
765         if (!is_valid_ring_ptr(mhi_ring, ptr)) {
766                 dev_err(&mhi_cntrl->mhi_dev->dev,
767                         "Event element points outside of the cmd ring\n");
768                 return;
769         }
770
771         cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
772
773         chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
774
775         if (chan < mhi_cntrl->max_chan &&
776             mhi_cntrl->mhi_chan[chan].configured) {
777                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
778                 write_lock_bh(&mhi_chan->lock);
779                 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
780                 complete(&mhi_chan->completion);
781                 write_unlock_bh(&mhi_chan->lock);
782         } else {
783                 dev_err(&mhi_cntrl->mhi_dev->dev,
784                         "Completion packet for invalid channel ID: %d\n", chan);
785         }
786
787         mhi_del_ring_element(mhi_cntrl, mhi_ring);
788 }
789
790 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
791                              struct mhi_event *mhi_event,
792                              u32 event_quota)
793 {
794         struct mhi_ring_element *dev_rp, *local_rp;
795         struct mhi_ring *ev_ring = &mhi_event->ring;
796         struct mhi_event_ctxt *er_ctxt =
797                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
798         struct mhi_chan *mhi_chan;
799         struct device *dev = &mhi_cntrl->mhi_dev->dev;
800         u32 chan;
801         int count = 0;
802         dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
803
804         /*
805          * This is a quick check to avoid unnecessary event processing
806          * in case MHI is already in error state, but it's still possible
807          * to transition to error state while processing events
808          */
809         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
810                 return -EIO;
811
812         if (!is_valid_ring_ptr(ev_ring, ptr)) {
813                 dev_err(&mhi_cntrl->mhi_dev->dev,
814                         "Event ring rp points outside of the event ring\n");
815                 return -EIO;
816         }
817
818         dev_rp = mhi_to_virtual(ev_ring, ptr);
819         local_rp = ev_ring->rp;
820
821         while (dev_rp != local_rp) {
822                 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
823
824                 switch (type) {
825                 case MHI_PKT_TYPE_BW_REQ_EVENT:
826                 {
827                         struct mhi_link_info *link_info;
828
829                         link_info = &mhi_cntrl->mhi_link_info;
830                         write_lock_irq(&mhi_cntrl->pm_lock);
831                         link_info->target_link_speed =
832                                 MHI_TRE_GET_EV_LINKSPEED(local_rp);
833                         link_info->target_link_width =
834                                 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
835                         write_unlock_irq(&mhi_cntrl->pm_lock);
836                         dev_dbg(dev, "Received BW_REQ event\n");
837                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
838                         break;
839                 }
840                 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
841                 {
842                         enum mhi_state new_state;
843
844                         new_state = MHI_TRE_GET_EV_STATE(local_rp);
845
846                         dev_dbg(dev, "State change event to state: %s\n",
847                                 TO_MHI_STATE_STR(new_state));
848
849                         switch (new_state) {
850                         case MHI_STATE_M0:
851                                 mhi_pm_m0_transition(mhi_cntrl);
852                                 break;
853                         case MHI_STATE_M1:
854                                 mhi_pm_m1_transition(mhi_cntrl);
855                                 break;
856                         case MHI_STATE_M3:
857                                 mhi_pm_m3_transition(mhi_cntrl);
858                                 break;
859                         case MHI_STATE_SYS_ERR:
860                         {
861                                 enum mhi_pm_state pm_state;
862
863                                 dev_dbg(dev, "System error detected\n");
864                                 write_lock_irq(&mhi_cntrl->pm_lock);
865                                 pm_state = mhi_tryset_pm_state(mhi_cntrl,
866                                                         MHI_PM_SYS_ERR_DETECT);
867                                 write_unlock_irq(&mhi_cntrl->pm_lock);
868                                 if (pm_state == MHI_PM_SYS_ERR_DETECT)
869                                         mhi_pm_sys_err_handler(mhi_cntrl);
870                                 break;
871                         }
872                         default:
873                                 dev_err(dev, "Invalid state: %s\n",
874                                         TO_MHI_STATE_STR(new_state));
875                         }
876
877                         break;
878                 }
879                 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
880                         mhi_process_cmd_completion(mhi_cntrl, local_rp);
881                         break;
882                 case MHI_PKT_TYPE_EE_EVENT:
883                 {
884                         enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
885                         enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
886
887                         dev_dbg(dev, "Received EE event: %s\n",
888                                 TO_MHI_EXEC_STR(event));
889                         switch (event) {
890                         case MHI_EE_SBL:
891                                 st = DEV_ST_TRANSITION_SBL;
892                                 break;
893                         case MHI_EE_WFW:
894                         case MHI_EE_AMSS:
895                                 st = DEV_ST_TRANSITION_MISSION_MODE;
896                                 break;
897                         case MHI_EE_FP:
898                                 st = DEV_ST_TRANSITION_FP;
899                                 break;
900                         case MHI_EE_RDDM:
901                                 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
902                                 write_lock_irq(&mhi_cntrl->pm_lock);
903                                 mhi_cntrl->ee = event;
904                                 write_unlock_irq(&mhi_cntrl->pm_lock);
905                                 wake_up_all(&mhi_cntrl->state_event);
906                                 break;
907                         default:
908                                 dev_err(dev,
909                                         "Unhandled EE event: 0x%x\n", type);
910                         }
911                         if (st != DEV_ST_TRANSITION_MAX)
912                                 mhi_queue_state_transition(mhi_cntrl, st);
913
914                         break;
915                 }
916                 case MHI_PKT_TYPE_TX_EVENT:
917                         chan = MHI_TRE_GET_EV_CHID(local_rp);
918
919                         WARN_ON(chan >= mhi_cntrl->max_chan);
920
921                         /*
922                          * Only process the event ring elements whose channel
923                          * ID is within the maximum supported range.
924                          */
925                         if (chan < mhi_cntrl->max_chan) {
926                                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
927                                 if (!mhi_chan->configured)
928                                         break;
929                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
930                                 event_quota--;
931                         }
932                         break;
933                 default:
934                         dev_err(dev, "Unhandled event type: %d\n", type);
935                         break;
936                 }
937
938                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
939                 local_rp = ev_ring->rp;
940
941                 ptr = le64_to_cpu(er_ctxt->rp);
942                 if (!is_valid_ring_ptr(ev_ring, ptr)) {
943                         dev_err(&mhi_cntrl->mhi_dev->dev,
944                                 "Event ring rp points outside of the event ring\n");
945                         return -EIO;
946                 }
947
948                 dev_rp = mhi_to_virtual(ev_ring, ptr);
949                 count++;
950         }
951
952         read_lock_bh(&mhi_cntrl->pm_lock);
953         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
954                 mhi_ring_er_db(mhi_event);
955         read_unlock_bh(&mhi_cntrl->pm_lock);
956
957         return count;
958 }
959
960 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
961                                 struct mhi_event *mhi_event,
962                                 u32 event_quota)
963 {
964         struct mhi_ring_element *dev_rp, *local_rp;
965         struct mhi_ring *ev_ring = &mhi_event->ring;
966         struct mhi_event_ctxt *er_ctxt =
967                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
968         int count = 0;
969         u32 chan;
970         struct mhi_chan *mhi_chan;
971         dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
972
973         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
974                 return -EIO;
975
976         if (!is_valid_ring_ptr(ev_ring, ptr)) {
977                 dev_err(&mhi_cntrl->mhi_dev->dev,
978                         "Event ring rp points outside of the event ring\n");
979                 return -EIO;
980         }
981
982         dev_rp = mhi_to_virtual(ev_ring, ptr);
983         local_rp = ev_ring->rp;
984
985         while (dev_rp != local_rp && event_quota > 0) {
986                 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
987
988                 chan = MHI_TRE_GET_EV_CHID(local_rp);
989
990                 WARN_ON(chan >= mhi_cntrl->max_chan);
991
992                 /*
993                  * Only process the event ring elements whose channel
994                  * ID is within the maximum supported range.
995                  */
996                 if (chan < mhi_cntrl->max_chan &&
997                     mhi_cntrl->mhi_chan[chan].configured) {
998                         mhi_chan = &mhi_cntrl->mhi_chan[chan];
999
1000                         if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
1001                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
1002                                 event_quota--;
1003                         } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
1004                                 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
1005                                 event_quota--;
1006                         }
1007                 }
1008
1009                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
1010                 local_rp = ev_ring->rp;
1011
1012                 ptr = le64_to_cpu(er_ctxt->rp);
1013                 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1014                         dev_err(&mhi_cntrl->mhi_dev->dev,
1015                                 "Event ring rp points outside of the event ring\n");
1016                         return -EIO;
1017                 }
1018
1019                 dev_rp = mhi_to_virtual(ev_ring, ptr);
1020                 count++;
1021         }
1022         read_lock_bh(&mhi_cntrl->pm_lock);
1023         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1024                 mhi_ring_er_db(mhi_event);
1025         read_unlock_bh(&mhi_cntrl->pm_lock);
1026
1027         return count;
1028 }
1029
1030 void mhi_ev_task(unsigned long data)
1031 {
1032         struct mhi_event *mhi_event = (struct mhi_event *)data;
1033         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1034
1035         /* process all pending events */
1036         spin_lock_bh(&mhi_event->lock);
1037         mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1038         spin_unlock_bh(&mhi_event->lock);
1039 }
1040
1041 void mhi_ctrl_ev_task(unsigned long data)
1042 {
1043         struct mhi_event *mhi_event = (struct mhi_event *)data;
1044         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1045         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1046         enum mhi_state state;
1047         enum mhi_pm_state pm_state = 0;
1048         int ret;
1049
1050         /*
1051          * We can check PM state w/o a lock here because there is no way
1052          * PM state can change from reg access valid to no access while this
1053          * thread being executed.
1054          */
1055         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
1056                 /*
1057                  * We may have a pending event but not allowed to
1058                  * process it since we are probably in a suspended state,
1059                  * so trigger a resume.
1060                  */
1061                 mhi_trigger_resume(mhi_cntrl);
1062
1063                 return;
1064         }
1065
1066         /* Process ctrl events */
1067         ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1068
1069         /*
1070          * We received an IRQ but no events to process, maybe device went to
1071          * SYS_ERR state? Check the state to confirm.
1072          */
1073         if (!ret) {
1074                 write_lock_irq(&mhi_cntrl->pm_lock);
1075                 state = mhi_get_mhi_state(mhi_cntrl);
1076                 if (state == MHI_STATE_SYS_ERR) {
1077                         dev_dbg(dev, "System error detected\n");
1078                         pm_state = mhi_tryset_pm_state(mhi_cntrl,
1079                                                        MHI_PM_SYS_ERR_DETECT);
1080                 }
1081                 write_unlock_irq(&mhi_cntrl->pm_lock);
1082                 if (pm_state == MHI_PM_SYS_ERR_DETECT)
1083                         mhi_pm_sys_err_handler(mhi_cntrl);
1084         }
1085 }
1086
1087 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1088                              struct mhi_ring *ring)
1089 {
1090         void *tmp = ring->wp + ring->el_size;
1091
1092         if (tmp >= (ring->base + ring->len))
1093                 tmp = ring->base;
1094
1095         return (tmp == ring->rp);
1096 }
1097
1098 static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
1099                      enum dma_data_direction dir, enum mhi_flags mflags)
1100 {
1101         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1102         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1103                                                              mhi_dev->dl_chan;
1104         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1105         unsigned long flags;
1106         int ret;
1107
1108         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1109                 return -EIO;
1110
1111         read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1112
1113         ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1114         if (unlikely(ret)) {
1115                 ret = -EAGAIN;
1116                 goto exit_unlock;
1117         }
1118
1119         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1120         if (unlikely(ret))
1121                 goto exit_unlock;
1122
1123         /* Packet is queued, take a usage ref to exit M3 if necessary
1124          * for host->device buffer, balanced put is done on buffer completion
1125          * for device->host buffer, balanced put is after ringing the DB
1126          */
1127         mhi_cntrl->runtime_get(mhi_cntrl);
1128
1129         /* Assert dev_wake (to exit/prevent M1/M2)*/
1130         mhi_cntrl->wake_toggle(mhi_cntrl);
1131
1132         if (mhi_chan->dir == DMA_TO_DEVICE)
1133                 atomic_inc(&mhi_cntrl->pending_pkts);
1134
1135         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1136                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1137
1138         if (dir == DMA_FROM_DEVICE)
1139                 mhi_cntrl->runtime_put(mhi_cntrl);
1140
1141 exit_unlock:
1142         read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1143
1144         return ret;
1145 }
1146
1147 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1148                   struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1149 {
1150         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1151                                                              mhi_dev->dl_chan;
1152         struct mhi_buf_info buf_info = { };
1153
1154         buf_info.v_addr = skb->data;
1155         buf_info.cb_buf = skb;
1156         buf_info.len = len;
1157
1158         if (unlikely(mhi_chan->pre_alloc))
1159                 return -EINVAL;
1160
1161         return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1162 }
1163 EXPORT_SYMBOL_GPL(mhi_queue_skb);
1164
1165 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1166                   struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1167 {
1168         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1169                                                              mhi_dev->dl_chan;
1170         struct mhi_buf_info buf_info = { };
1171
1172         buf_info.p_addr = mhi_buf->dma_addr;
1173         buf_info.cb_buf = mhi_buf;
1174         buf_info.pre_mapped = true;
1175         buf_info.len = len;
1176
1177         if (unlikely(mhi_chan->pre_alloc))
1178                 return -EINVAL;
1179
1180         return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1181 }
1182 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1183
1184 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1185                         struct mhi_buf_info *info, enum mhi_flags flags)
1186 {
1187         struct mhi_ring *buf_ring, *tre_ring;
1188         struct mhi_ring_element *mhi_tre;
1189         struct mhi_buf_info *buf_info;
1190         int eot, eob, chain, bei;
1191         int ret;
1192
1193         buf_ring = &mhi_chan->buf_ring;
1194         tre_ring = &mhi_chan->tre_ring;
1195
1196         buf_info = buf_ring->wp;
1197         WARN_ON(buf_info->used);
1198         buf_info->pre_mapped = info->pre_mapped;
1199         if (info->pre_mapped)
1200                 buf_info->p_addr = info->p_addr;
1201         else
1202                 buf_info->v_addr = info->v_addr;
1203         buf_info->cb_buf = info->cb_buf;
1204         buf_info->wp = tre_ring->wp;
1205         buf_info->dir = mhi_chan->dir;
1206         buf_info->len = info->len;
1207
1208         if (!info->pre_mapped) {
1209                 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1210                 if (ret)
1211                         return ret;
1212         }
1213
1214         eob = !!(flags & MHI_EOB);
1215         eot = !!(flags & MHI_EOT);
1216         chain = !!(flags & MHI_CHAIN);
1217         bei = !!(mhi_chan->intmod);
1218
1219         mhi_tre = tre_ring->wp;
1220         mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1221         mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1222         mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1223
1224         /* increment WP */
1225         mhi_add_ring_element(mhi_cntrl, tre_ring);
1226         mhi_add_ring_element(mhi_cntrl, buf_ring);
1227
1228         return 0;
1229 }
1230
1231 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1232                   void *buf, size_t len, enum mhi_flags mflags)
1233 {
1234         struct mhi_buf_info buf_info = { };
1235
1236         buf_info.v_addr = buf;
1237         buf_info.cb_buf = buf;
1238         buf_info.len = len;
1239
1240         return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1241 }
1242 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1243
1244 bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
1245 {
1246         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1247         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1248                                         mhi_dev->ul_chan : mhi_dev->dl_chan;
1249         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1250
1251         return mhi_is_ring_full(mhi_cntrl, tre_ring);
1252 }
1253 EXPORT_SYMBOL_GPL(mhi_queue_is_full);
1254
1255 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1256                  struct mhi_chan *mhi_chan,
1257                  enum mhi_cmd_type cmd)
1258 {
1259         struct mhi_ring_element *cmd_tre = NULL;
1260         struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1261         struct mhi_ring *ring = &mhi_cmd->ring;
1262         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1263         int chan = 0;
1264
1265         if (mhi_chan)
1266                 chan = mhi_chan->chan;
1267
1268         spin_lock_bh(&mhi_cmd->lock);
1269         if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1270                 spin_unlock_bh(&mhi_cmd->lock);
1271                 return -ENOMEM;
1272         }
1273
1274         /* prepare the cmd tre */
1275         cmd_tre = ring->wp;
1276         switch (cmd) {
1277         case MHI_CMD_RESET_CHAN:
1278                 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1279                 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1280                 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1281                 break;
1282         case MHI_CMD_STOP_CHAN:
1283                 cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
1284                 cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
1285                 cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
1286                 break;
1287         case MHI_CMD_START_CHAN:
1288                 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1289                 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1290                 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1291                 break;
1292         default:
1293                 dev_err(dev, "Command not supported\n");
1294                 break;
1295         }
1296
1297         /* queue to hardware */
1298         mhi_add_ring_element(mhi_cntrl, ring);
1299         read_lock_bh(&mhi_cntrl->pm_lock);
1300         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1301                 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1302         read_unlock_bh(&mhi_cntrl->pm_lock);
1303         spin_unlock_bh(&mhi_cmd->lock);
1304
1305         return 0;
1306 }
1307
1308 static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
1309                                     struct mhi_chan *mhi_chan,
1310                                     enum mhi_ch_state_type to_state)
1311 {
1312         struct device *dev = &mhi_chan->mhi_dev->dev;
1313         enum mhi_cmd_type cmd = MHI_CMD_NOP;
1314         int ret;
1315
1316         dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
1317                 TO_CH_STATE_TYPE_STR(to_state));
1318
1319         switch (to_state) {
1320         case MHI_CH_STATE_TYPE_RESET:
1321                 write_lock_irq(&mhi_chan->lock);
1322                 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1323                     mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1324                     mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1325                         write_unlock_irq(&mhi_chan->lock);
1326                         return -EINVAL;
1327                 }
1328                 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1329                 write_unlock_irq(&mhi_chan->lock);
1330
1331                 cmd = MHI_CMD_RESET_CHAN;
1332                 break;
1333         case MHI_CH_STATE_TYPE_STOP:
1334                 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
1335                         return -EINVAL;
1336
1337                 cmd = MHI_CMD_STOP_CHAN;
1338                 break;
1339         case MHI_CH_STATE_TYPE_START:
1340                 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1341                     mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
1342                         return -EINVAL;
1343
1344                 cmd = MHI_CMD_START_CHAN;
1345                 break;
1346         default:
1347                 dev_err(dev, "%d: Channel state update to %s not allowed\n",
1348                         mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1349                 return -EINVAL;
1350         }
1351
1352         /* bring host and device out of suspended states */
1353         ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
1354         if (ret)
1355                 return ret;
1356         mhi_cntrl->runtime_get(mhi_cntrl);
1357
1358         reinit_completion(&mhi_chan->completion);
1359         ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
1360         if (ret) {
1361                 dev_err(dev, "%d: Failed to send %s channel command\n",
1362                         mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1363                 goto exit_channel_update;
1364         }
1365
1366         ret = wait_for_completion_timeout(&mhi_chan->completion,
1367                                        msecs_to_jiffies(mhi_cntrl->timeout_ms));
1368         if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1369                 dev_err(dev,
1370                         "%d: Failed to receive %s channel command completion\n",
1371                         mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1372                 ret = -EIO;
1373                 goto exit_channel_update;
1374         }
1375
1376         ret = 0;
1377
1378         if (to_state != MHI_CH_STATE_TYPE_RESET) {
1379                 write_lock_irq(&mhi_chan->lock);
1380                 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
1381                                       MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
1382                 write_unlock_irq(&mhi_chan->lock);
1383         }
1384
1385         dev_dbg(dev, "%d: Channel state change to %s successful\n",
1386                 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1387
1388 exit_channel_update:
1389         mhi_cntrl->runtime_put(mhi_cntrl);
1390         mhi_device_put(mhi_cntrl->mhi_dev);
1391
1392         return ret;
1393 }
1394
1395 static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1396                                   struct mhi_chan *mhi_chan)
1397 {
1398         int ret;
1399         struct device *dev = &mhi_chan->mhi_dev->dev;
1400
1401         mutex_lock(&mhi_chan->mutex);
1402
1403         if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1404                 dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1405                         TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1406                 goto exit_unprepare_channel;
1407         }
1408
1409         /* no more processing events for this channel */
1410         ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1411                                        MHI_CH_STATE_TYPE_RESET);
1412         if (ret)
1413                 dev_err(dev, "%d: Failed to reset channel, still resetting\n",
1414                         mhi_chan->chan);
1415
1416 exit_unprepare_channel:
1417         write_lock_irq(&mhi_chan->lock);
1418         mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1419         write_unlock_irq(&mhi_chan->lock);
1420
1421         if (!mhi_chan->offload_ch) {
1422                 mhi_reset_chan(mhi_cntrl, mhi_chan);
1423                 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1424         }
1425         dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
1426
1427         mutex_unlock(&mhi_chan->mutex);
1428 }
1429
1430 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1431                         struct mhi_chan *mhi_chan, unsigned int flags)
1432 {
1433         int ret = 0;
1434         struct device *dev = &mhi_chan->mhi_dev->dev;
1435
1436         if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1437                 dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1438                         TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1439                 return -ENOTCONN;
1440         }
1441
1442         mutex_lock(&mhi_chan->mutex);
1443
1444         /* Check of client manages channel context for offload channels */
1445         if (!mhi_chan->offload_ch) {
1446                 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1447                 if (ret)
1448                         goto error_init_chan;
1449         }
1450
1451         ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1452                                        MHI_CH_STATE_TYPE_START);
1453         if (ret)
1454                 goto error_pm_state;
1455
1456         if (mhi_chan->dir == DMA_FROM_DEVICE)
1457                 mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
1458
1459         /* Pre-allocate buffer for xfer ring */
1460         if (mhi_chan->pre_alloc) {
1461                 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1462                                                        &mhi_chan->tre_ring);
1463                 size_t len = mhi_cntrl->buffer_len;
1464
1465                 while (nr_el--) {
1466                         void *buf;
1467                         struct mhi_buf_info info = { };
1468
1469                         buf = kmalloc(len, GFP_KERNEL);
1470                         if (!buf) {
1471                                 ret = -ENOMEM;
1472                                 goto error_pre_alloc;
1473                         }
1474
1475                         /* Prepare transfer descriptors */
1476                         info.v_addr = buf;
1477                         info.cb_buf = buf;
1478                         info.len = len;
1479                         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1480                         if (ret) {
1481                                 kfree(buf);
1482                                 goto error_pre_alloc;
1483                         }
1484                 }
1485
1486                 read_lock_bh(&mhi_cntrl->pm_lock);
1487                 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1488                         read_lock_irq(&mhi_chan->lock);
1489                         mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1490                         read_unlock_irq(&mhi_chan->lock);
1491                 }
1492                 read_unlock_bh(&mhi_cntrl->pm_lock);
1493         }
1494
1495         mutex_unlock(&mhi_chan->mutex);
1496
1497         return 0;
1498
1499 error_pm_state:
1500         if (!mhi_chan->offload_ch)
1501                 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1502
1503 error_init_chan:
1504         mutex_unlock(&mhi_chan->mutex);
1505
1506         return ret;
1507
1508 error_pre_alloc:
1509         mutex_unlock(&mhi_chan->mutex);
1510         mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1511
1512         return ret;
1513 }
1514
1515 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1516                                   struct mhi_event *mhi_event,
1517                                   struct mhi_event_ctxt *er_ctxt,
1518                                   int chan)
1519
1520 {
1521         struct mhi_ring_element *dev_rp, *local_rp;
1522         struct mhi_ring *ev_ring;
1523         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1524         unsigned long flags;
1525         dma_addr_t ptr;
1526
1527         dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1528
1529         ev_ring = &mhi_event->ring;
1530
1531         /* mark all stale events related to channel as STALE event */
1532         spin_lock_irqsave(&mhi_event->lock, flags);
1533
1534         ptr = le64_to_cpu(er_ctxt->rp);
1535         if (!is_valid_ring_ptr(ev_ring, ptr)) {
1536                 dev_err(&mhi_cntrl->mhi_dev->dev,
1537                         "Event ring rp points outside of the event ring\n");
1538                 dev_rp = ev_ring->rp;
1539         } else {
1540                 dev_rp = mhi_to_virtual(ev_ring, ptr);
1541         }
1542
1543         local_rp = ev_ring->rp;
1544         while (dev_rp != local_rp) {
1545                 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1546                     chan == MHI_TRE_GET_EV_CHID(local_rp))
1547                         local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1548                                         MHI_PKT_TYPE_STALE_EVENT);
1549                 local_rp++;
1550                 if (local_rp == (ev_ring->base + ev_ring->len))
1551                         local_rp = ev_ring->base;
1552         }
1553
1554         dev_dbg(dev, "Finished marking events as stale events\n");
1555         spin_unlock_irqrestore(&mhi_event->lock, flags);
1556 }
1557
1558 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1559                                 struct mhi_chan *mhi_chan)
1560 {
1561         struct mhi_ring *buf_ring, *tre_ring;
1562         struct mhi_result result;
1563
1564         /* Reset any pending buffers */
1565         buf_ring = &mhi_chan->buf_ring;
1566         tre_ring = &mhi_chan->tre_ring;
1567         result.transaction_status = -ENOTCONN;
1568         result.bytes_xferd = 0;
1569         while (tre_ring->rp != tre_ring->wp) {
1570                 struct mhi_buf_info *buf_info = buf_ring->rp;
1571
1572                 if (mhi_chan->dir == DMA_TO_DEVICE) {
1573                         atomic_dec(&mhi_cntrl->pending_pkts);
1574                         /* Release the reference got from mhi_queue() */
1575                         mhi_cntrl->runtime_put(mhi_cntrl);
1576                 }
1577
1578                 if (!buf_info->pre_mapped)
1579                         mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1580
1581                 mhi_del_ring_element(mhi_cntrl, buf_ring);
1582                 mhi_del_ring_element(mhi_cntrl, tre_ring);
1583
1584                 if (mhi_chan->pre_alloc) {
1585                         kfree(buf_info->cb_buf);
1586                 } else {
1587                         result.buf_addr = buf_info->cb_buf;
1588                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1589                 }
1590         }
1591 }
1592
1593 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1594 {
1595         struct mhi_event *mhi_event;
1596         struct mhi_event_ctxt *er_ctxt;
1597         int chan = mhi_chan->chan;
1598
1599         /* Nothing to reset, client doesn't queue buffers */
1600         if (mhi_chan->offload_ch)
1601                 return;
1602
1603         read_lock_bh(&mhi_cntrl->pm_lock);
1604         mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1605         er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1606
1607         mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1608
1609         mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1610
1611         read_unlock_bh(&mhi_cntrl->pm_lock);
1612 }
1613
1614 static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
1615 {
1616         int ret, dir;
1617         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1618         struct mhi_chan *mhi_chan;
1619
1620         for (dir = 0; dir < 2; dir++) {
1621                 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1622                 if (!mhi_chan)
1623                         continue;
1624
1625                 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
1626                 if (ret)
1627                         goto error_open_chan;
1628         }
1629
1630         return 0;
1631
1632 error_open_chan:
1633         for (--dir; dir >= 0; dir--) {
1634                 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1635                 if (!mhi_chan)
1636                         continue;
1637
1638                 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1639         }
1640
1641         return ret;
1642 }
1643
1644 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1645 {
1646         return __mhi_prepare_for_transfer(mhi_dev, 0);
1647 }
1648 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1649
1650 int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
1651 {
1652         return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
1653 }
1654 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
1655
1656 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1657 {
1658         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1659         struct mhi_chan *mhi_chan;
1660         int dir;
1661
1662         for (dir = 0; dir < 2; dir++) {
1663                 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1664                 if (!mhi_chan)
1665                         continue;
1666
1667                 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1668         }
1669 }
1670 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1671
1672 int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1673 {
1674         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1675         struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1676         struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1677         int ret;
1678
1679         spin_lock_bh(&mhi_event->lock);
1680         ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1681         spin_unlock_bh(&mhi_event->lock);
1682
1683         return ret;
1684 }
1685 EXPORT_SYMBOL_GPL(mhi_poll);