1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/bitfield.h>
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/idr.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/mhi.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/wait.h>
23 static DEFINE_IDA(mhi_controller_ida);
25 const char * const mhi_ee_str[MHI_EE_MAX] = {
26 [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
27 [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
28 [MHI_EE_AMSS] = "MISSION MODE",
29 [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
30 [MHI_EE_WFW] = "WLAN FIRMWARE",
31 [MHI_EE_PTHRU] = "PASS THROUGH",
32 [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
33 [MHI_EE_FP] = "FLASH PROGRAMMER",
34 [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
35 [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
38 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
39 [DEV_ST_TRANSITION_PBL] = "PBL",
40 [DEV_ST_TRANSITION_READY] = "READY",
41 [DEV_ST_TRANSITION_SBL] = "SBL",
42 [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
43 [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
44 [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
45 [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
48 const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
49 [MHI_CH_STATE_TYPE_RESET] = "RESET",
50 [MHI_CH_STATE_TYPE_STOP] = "STOP",
51 [MHI_CH_STATE_TYPE_START] = "START",
54 static const char * const mhi_pm_state_str[] = {
55 [MHI_PM_STATE_DISABLE] = "DISABLE",
56 [MHI_PM_STATE_POR] = "POWER ON RESET",
57 [MHI_PM_STATE_M0] = "M0",
58 [MHI_PM_STATE_M2] = "M2",
59 [MHI_PM_STATE_M3_ENTER] = "M?->M3",
60 [MHI_PM_STATE_M3] = "M3",
61 [MHI_PM_STATE_M3_EXIT] = "M3->M0",
62 [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
63 [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
64 [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
65 [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
66 [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
69 const char *to_mhi_pm_state_str(u32 state)
76 if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
77 return "Invalid State";
79 return mhi_pm_state_str[index];
82 static ssize_t serial_number_show(struct device *dev,
83 struct device_attribute *attr,
86 struct mhi_device *mhi_dev = to_mhi_device(dev);
87 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
89 return sysfs_emit(buf, "Serial Number: %u\n",
90 mhi_cntrl->serial_number);
92 static DEVICE_ATTR_RO(serial_number);
94 static ssize_t oem_pk_hash_show(struct device *dev,
95 struct device_attribute *attr,
98 struct mhi_device *mhi_dev = to_mhi_device(dev);
99 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
102 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
103 cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
104 i, mhi_cntrl->oem_pk_hash[i]);
108 static DEVICE_ATTR_RO(oem_pk_hash);
110 static ssize_t soc_reset_store(struct device *dev,
111 struct device_attribute *attr,
115 struct mhi_device *mhi_dev = to_mhi_device(dev);
116 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
118 mhi_soc_reset(mhi_cntrl);
121 static DEVICE_ATTR_WO(soc_reset);
123 static struct attribute *mhi_dev_attrs[] = {
124 &dev_attr_serial_number.attr,
125 &dev_attr_oem_pk_hash.attr,
126 &dev_attr_soc_reset.attr,
129 ATTRIBUTE_GROUPS(mhi_dev);
131 /* MHI protocol requires the transfer ring to be aligned with ring length */
132 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
133 struct mhi_ring *ring,
136 ring->alloc_size = len + (len - 1);
137 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
138 &ring->dma_handle, GFP_KERNEL);
139 if (!ring->pre_aligned)
142 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
143 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
148 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
151 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
153 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
154 if (mhi_event->offload_ev)
157 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
160 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
163 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
165 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
166 struct device *dev = &mhi_cntrl->mhi_dev->dev;
167 unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
170 /* if controller driver has set irq_flags, use it */
171 if (mhi_cntrl->irq_flags)
172 irq_flags = mhi_cntrl->irq_flags;
174 /* Setup BHI_INTVEC IRQ */
175 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
176 mhi_intvec_threaded_handler,
182 * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here.
183 * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that
184 * IRQ_NOAUTOEN is not applicable.
186 disable_irq(mhi_cntrl->irq[0]);
188 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
189 if (mhi_event->offload_ev)
192 if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
193 dev_err(dev, "irq %d not available for event ring\n",
199 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
204 dev_err(dev, "Error requesting irq:%d for ev:%d\n",
205 mhi_cntrl->irq[mhi_event->irq], i);
209 disable_irq(mhi_cntrl->irq[mhi_event->irq]);
215 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
216 if (mhi_event->offload_ev)
219 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
221 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
226 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
229 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
230 struct mhi_cmd *mhi_cmd;
231 struct mhi_event *mhi_event;
232 struct mhi_ring *ring;
234 mhi_cmd = mhi_cntrl->mhi_cmd;
235 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
236 ring = &mhi_cmd->ring;
237 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
238 ring->pre_aligned, ring->dma_handle);
240 ring->iommu_base = 0;
243 dma_free_coherent(mhi_cntrl->cntrl_dev,
244 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
245 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
247 mhi_event = mhi_cntrl->mhi_event;
248 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
249 if (mhi_event->offload_ev)
252 ring = &mhi_event->ring;
253 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
254 ring->pre_aligned, ring->dma_handle);
256 ring->iommu_base = 0;
259 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
260 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
261 mhi_ctxt->er_ctxt_addr);
263 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
264 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
265 mhi_ctxt->chan_ctxt_addr);
268 mhi_cntrl->mhi_ctxt = NULL;
271 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
273 struct mhi_ctxt *mhi_ctxt;
274 struct mhi_chan_ctxt *chan_ctxt;
275 struct mhi_event_ctxt *er_ctxt;
276 struct mhi_cmd_ctxt *cmd_ctxt;
277 struct mhi_chan *mhi_chan;
278 struct mhi_event *mhi_event;
279 struct mhi_cmd *mhi_cmd;
281 int ret = -ENOMEM, i;
283 atomic_set(&mhi_cntrl->dev_wake, 0);
284 atomic_set(&mhi_cntrl->pending_pkts, 0);
286 mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
290 /* Setup channel ctxt */
291 mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
292 sizeof(*mhi_ctxt->chan_ctxt) *
294 &mhi_ctxt->chan_ctxt_addr,
296 if (!mhi_ctxt->chan_ctxt)
297 goto error_alloc_chan_ctxt;
299 mhi_chan = mhi_cntrl->mhi_chan;
300 chan_ctxt = mhi_ctxt->chan_ctxt;
301 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
302 /* Skip if it is an offload channel */
303 if (mhi_chan->offload_ch)
306 tmp = le32_to_cpu(chan_ctxt->chcfg);
307 tmp &= ~CHAN_CTX_CHSTATE_MASK;
308 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
309 tmp &= ~CHAN_CTX_BRSTMODE_MASK;
310 tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
311 tmp &= ~CHAN_CTX_POLLCFG_MASK;
312 tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
313 chan_ctxt->chcfg = cpu_to_le32(tmp);
315 chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
316 chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
318 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
319 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
322 /* Setup event context */
323 mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
324 sizeof(*mhi_ctxt->er_ctxt) *
325 mhi_cntrl->total_ev_rings,
326 &mhi_ctxt->er_ctxt_addr,
328 if (!mhi_ctxt->er_ctxt)
329 goto error_alloc_er_ctxt;
331 er_ctxt = mhi_ctxt->er_ctxt;
332 mhi_event = mhi_cntrl->mhi_event;
333 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
335 struct mhi_ring *ring = &mhi_event->ring;
337 /* Skip if it is an offload event */
338 if (mhi_event->offload_ev)
341 tmp = le32_to_cpu(er_ctxt->intmod);
342 tmp &= ~EV_CTX_INTMODC_MASK;
343 tmp &= ~EV_CTX_INTMODT_MASK;
344 tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
345 er_ctxt->intmod = cpu_to_le32(tmp);
347 er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
348 er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
349 mhi_event->db_cfg.db_mode = true;
351 ring->el_size = sizeof(struct mhi_ring_element);
352 ring->len = ring->el_size * ring->elements;
353 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
358 * If the read pointer equals to the write pointer, then the
361 ring->rp = ring->wp = ring->base;
362 er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
363 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
364 er_ctxt->rlen = cpu_to_le64(ring->len);
365 ring->ctxt_wp = &er_ctxt->wp;
368 /* Setup cmd context */
370 mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
371 sizeof(*mhi_ctxt->cmd_ctxt) *
373 &mhi_ctxt->cmd_ctxt_addr,
375 if (!mhi_ctxt->cmd_ctxt)
378 mhi_cmd = mhi_cntrl->mhi_cmd;
379 cmd_ctxt = mhi_ctxt->cmd_ctxt;
380 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
381 struct mhi_ring *ring = &mhi_cmd->ring;
383 ring->el_size = sizeof(struct mhi_ring_element);
384 ring->elements = CMD_EL_PER_RING;
385 ring->len = ring->el_size * ring->elements;
386 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
388 goto error_alloc_cmd;
390 ring->rp = ring->wp = ring->base;
391 cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
392 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
393 cmd_ctxt->rlen = cpu_to_le64(ring->len);
394 ring->ctxt_wp = &cmd_ctxt->wp;
397 mhi_cntrl->mhi_ctxt = mhi_ctxt;
402 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
403 struct mhi_ring *ring = &mhi_cmd->ring;
405 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
406 ring->pre_aligned, ring->dma_handle);
408 dma_free_coherent(mhi_cntrl->cntrl_dev,
409 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
410 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
411 i = mhi_cntrl->total_ev_rings;
412 mhi_event = mhi_cntrl->mhi_event + i;
415 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
416 struct mhi_ring *ring = &mhi_event->ring;
418 if (mhi_event->offload_ev)
421 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
422 ring->pre_aligned, ring->dma_handle);
424 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
425 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
426 mhi_ctxt->er_ctxt_addr);
429 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
430 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
431 mhi_ctxt->chan_ctxt_addr);
433 error_alloc_chan_ctxt:
439 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
443 struct mhi_chan *mhi_chan;
444 struct mhi_event *mhi_event;
445 void __iomem *base = mhi_cntrl->regs;
446 struct device *dev = &mhi_cntrl->mhi_dev->dev;
453 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
457 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
461 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
465 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
469 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
473 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
477 upper_32_bits(mhi_cntrl->iova_start),
481 lower_32_bits(mhi_cntrl->iova_start),
485 upper_32_bits(mhi_cntrl->iova_start),
489 lower_32_bits(mhi_cntrl->iova_start),
493 upper_32_bits(mhi_cntrl->iova_stop),
497 lower_32_bits(mhi_cntrl->iova_stop),
501 upper_32_bits(mhi_cntrl->iova_stop),
505 lower_32_bits(mhi_cntrl->iova_stop),
510 dev_dbg(dev, "Initializing MHI registers\n");
512 /* Read channel db offset */
513 ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
515 dev_err(dev, "Unable to read CHDBOFF register\n");
519 if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
520 dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
521 val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
526 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
527 mhi_cntrl->wake_set = false;
529 /* Setup channel db address for each channel in tre_ring */
530 mhi_chan = mhi_cntrl->mhi_chan;
531 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
532 mhi_chan->tre_ring.db_addr = base + val;
534 /* Read event ring db offset */
535 ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
537 dev_err(dev, "Unable to read ERDBOFF register\n");
541 if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
542 dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
543 val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
547 /* Setup event db address for each ev_ring */
548 mhi_event = mhi_cntrl->mhi_event;
549 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
550 if (mhi_event->offload_ev)
553 mhi_event->ring.db_addr = base + val;
556 /* Setup DB register for primary CMD rings */
557 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
559 /* Write to MMIO registers */
560 for (i = 0; reg_info[i].offset; i++)
561 mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
564 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
565 mhi_cntrl->total_ev_rings);
567 dev_err(dev, "Unable to write MHICFG register\n");
571 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
572 mhi_cntrl->hw_ev_rings);
574 dev_err(dev, "Unable to write MHICFG register\n");
581 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
582 struct mhi_chan *mhi_chan)
584 struct mhi_ring *buf_ring;
585 struct mhi_ring *tre_ring;
586 struct mhi_chan_ctxt *chan_ctxt;
589 buf_ring = &mhi_chan->buf_ring;
590 tre_ring = &mhi_chan->tre_ring;
591 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
593 if (!chan_ctxt->rbase) /* Already uninitialized */
596 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
597 tre_ring->pre_aligned, tre_ring->dma_handle);
598 vfree(buf_ring->base);
600 buf_ring->base = tre_ring->base = NULL;
601 tre_ring->ctxt_wp = NULL;
602 chan_ctxt->rbase = 0;
607 tmp = le32_to_cpu(chan_ctxt->chcfg);
608 tmp &= ~CHAN_CTX_CHSTATE_MASK;
609 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
610 chan_ctxt->chcfg = cpu_to_le32(tmp);
612 /* Update to all cores */
616 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
617 struct mhi_chan *mhi_chan)
619 struct mhi_ring *buf_ring;
620 struct mhi_ring *tre_ring;
621 struct mhi_chan_ctxt *chan_ctxt;
625 buf_ring = &mhi_chan->buf_ring;
626 tre_ring = &mhi_chan->tre_ring;
627 tre_ring->el_size = sizeof(struct mhi_ring_element);
628 tre_ring->len = tre_ring->el_size * tre_ring->elements;
629 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
630 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
634 buf_ring->el_size = sizeof(struct mhi_buf_info);
635 buf_ring->len = buf_ring->el_size * buf_ring->elements;
636 buf_ring->base = vzalloc(buf_ring->len);
638 if (!buf_ring->base) {
639 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
640 tre_ring->pre_aligned, tre_ring->dma_handle);
644 tmp = le32_to_cpu(chan_ctxt->chcfg);
645 tmp &= ~CHAN_CTX_CHSTATE_MASK;
646 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
647 chan_ctxt->chcfg = cpu_to_le32(tmp);
649 chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
650 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
651 chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
652 tre_ring->ctxt_wp = &chan_ctxt->wp;
654 tre_ring->rp = tre_ring->wp = tre_ring->base;
655 buf_ring->rp = buf_ring->wp = buf_ring->base;
656 mhi_chan->db_cfg.db_mode = 1;
658 /* Update to all cores */
664 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
665 const struct mhi_controller_config *config)
667 struct mhi_event *mhi_event;
668 const struct mhi_event_config *event_cfg;
669 struct device *dev = mhi_cntrl->cntrl_dev;
672 num = config->num_events;
673 mhi_cntrl->total_ev_rings = num;
674 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
676 if (!mhi_cntrl->mhi_event)
679 /* Populate event ring */
680 mhi_event = mhi_cntrl->mhi_event;
681 for (i = 0; i < num; i++) {
682 event_cfg = &config->event_cfg[i];
684 mhi_event->er_index = i;
685 mhi_event->ring.elements = event_cfg->num_elements;
686 mhi_event->intmod = event_cfg->irq_moderation_ms;
687 mhi_event->irq = event_cfg->irq;
689 if (event_cfg->channel != U32_MAX) {
690 /* This event ring has a dedicated channel */
691 mhi_event->chan = event_cfg->channel;
692 if (mhi_event->chan >= mhi_cntrl->max_chan) {
694 "Event Ring channel not available\n");
698 mhi_event->mhi_chan =
699 &mhi_cntrl->mhi_chan[mhi_event->chan];
702 /* Priority is fixed to 1 for now */
703 mhi_event->priority = 1;
705 mhi_event->db_cfg.brstmode = event_cfg->mode;
706 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
709 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
710 mhi_event->db_cfg.process_db = mhi_db_brstmode;
712 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
714 mhi_event->data_type = event_cfg->data_type;
716 switch (mhi_event->data_type) {
718 mhi_event->process_event = mhi_process_data_event_ring;
721 mhi_event->process_event = mhi_process_ctrl_ev_ring;
724 dev_err(dev, "Event Ring type not supported\n");
728 mhi_event->hw_ring = event_cfg->hardware_event;
729 if (mhi_event->hw_ring)
730 mhi_cntrl->hw_ev_rings++;
732 mhi_cntrl->sw_ev_rings++;
734 mhi_event->cl_manage = event_cfg->client_managed;
735 mhi_event->offload_ev = event_cfg->offload_channel;
743 kfree(mhi_cntrl->mhi_event);
747 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
748 const struct mhi_controller_config *config)
750 const struct mhi_channel_config *ch_cfg;
751 struct device *dev = mhi_cntrl->cntrl_dev;
755 mhi_cntrl->max_chan = config->max_channels;
758 * The allocation of MHI channels can exceed 32KB in some scenarios,
759 * so to avoid any memory possible allocation failures, vzalloc is
762 mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan,
763 sizeof(*mhi_cntrl->mhi_chan));
764 if (!mhi_cntrl->mhi_chan)
767 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
769 /* Populate channel configurations */
770 for (i = 0; i < config->num_channels; i++) {
771 struct mhi_chan *mhi_chan;
773 ch_cfg = &config->ch_cfg[i];
776 if (chan >= mhi_cntrl->max_chan) {
777 dev_err(dev, "Channel %d not available\n", chan);
781 mhi_chan = &mhi_cntrl->mhi_chan[chan];
782 mhi_chan->name = ch_cfg->name;
783 mhi_chan->chan = chan;
785 mhi_chan->tre_ring.elements = ch_cfg->num_elements;
786 if (!mhi_chan->tre_ring.elements)
790 * For some channels, local ring length should be bigger than
791 * the transfer ring length due to internal logical channels
792 * in device. So host can queue much more buffers than transfer
793 * ring length. Example, RSC channels should have a larger local
794 * channel length than transfer ring length.
796 mhi_chan->buf_ring.elements = ch_cfg->local_elements;
797 if (!mhi_chan->buf_ring.elements)
798 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
799 mhi_chan->er_index = ch_cfg->event_ring;
800 mhi_chan->dir = ch_cfg->dir;
803 * For most channels, chtype is identical to channel directions.
804 * So, if it is not defined then assign channel direction to
807 mhi_chan->type = ch_cfg->type;
809 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
811 mhi_chan->ee_mask = ch_cfg->ee_mask;
812 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
813 mhi_chan->lpm_notify = ch_cfg->lpm_notify;
814 mhi_chan->offload_ch = ch_cfg->offload_channel;
815 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
816 mhi_chan->pre_alloc = ch_cfg->auto_queue;
817 mhi_chan->wake_capable = ch_cfg->wake_capable;
820 * If MHI host allocates buffers, then the channel direction
821 * should be DMA_FROM_DEVICE
823 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
824 dev_err(dev, "Invalid channel configuration\n");
829 * Bi-directional and direction less channel must be an
832 if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
833 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
834 dev_err(dev, "Invalid channel configuration\n");
838 if (!mhi_chan->offload_ch) {
839 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
840 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
841 dev_err(dev, "Invalid Door bell mode\n");
846 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
847 mhi_chan->db_cfg.process_db = mhi_db_brstmode;
849 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
851 mhi_chan->configured = true;
853 if (mhi_chan->lpm_notify)
854 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
860 vfree(mhi_cntrl->mhi_chan);
865 static int parse_config(struct mhi_controller *mhi_cntrl,
866 const struct mhi_controller_config *config)
870 /* Parse MHI channel configuration */
871 ret = parse_ch_cfg(mhi_cntrl, config);
875 /* Parse MHI event configuration */
876 ret = parse_ev_cfg(mhi_cntrl, config);
880 mhi_cntrl->timeout_ms = config->timeout_ms;
881 if (!mhi_cntrl->timeout_ms)
882 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
884 mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms;
885 mhi_cntrl->bounce_buf = config->use_bounce_buf;
886 mhi_cntrl->buffer_len = config->buf_len;
887 if (!mhi_cntrl->buffer_len)
888 mhi_cntrl->buffer_len = MHI_MAX_MTU;
890 /* By default, host is allowed to ring DB in both M0 and M2 states */
891 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
892 if (config->m2_no_db)
893 mhi_cntrl->db_access &= ~MHI_PM_M2;
898 vfree(mhi_cntrl->mhi_chan);
903 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
904 const struct mhi_controller_config *config)
906 struct mhi_event *mhi_event;
907 struct mhi_chan *mhi_chan;
908 struct mhi_cmd *mhi_cmd;
909 struct mhi_device *mhi_dev;
913 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
914 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
915 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
916 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
917 !mhi_cntrl->irq || !mhi_cntrl->reg_len)
920 ret = parse_config(mhi_cntrl, config);
924 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
925 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
926 if (!mhi_cntrl->mhi_cmd) {
931 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
932 mutex_init(&mhi_cntrl->pm_mutex);
933 rwlock_init(&mhi_cntrl->pm_lock);
934 spin_lock_init(&mhi_cntrl->transition_lock);
935 spin_lock_init(&mhi_cntrl->wlock);
936 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
937 init_waitqueue_head(&mhi_cntrl->state_event);
939 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
940 if (!mhi_cntrl->hiprio_wq) {
941 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
946 mhi_cmd = mhi_cntrl->mhi_cmd;
947 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
948 spin_lock_init(&mhi_cmd->lock);
950 mhi_event = mhi_cntrl->mhi_event;
951 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
952 /* Skip for offload events */
953 if (mhi_event->offload_ev)
956 mhi_event->mhi_cntrl = mhi_cntrl;
957 spin_lock_init(&mhi_event->lock);
958 if (mhi_event->data_type == MHI_ER_CTRL)
959 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
962 tasklet_init(&mhi_event->task, mhi_ev_task,
966 mhi_chan = mhi_cntrl->mhi_chan;
967 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
968 mutex_init(&mhi_chan->mutex);
969 init_completion(&mhi_chan->completion);
970 rwlock_init(&mhi_chan->lock);
972 /* used in setting bei field of TRE */
973 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
974 mhi_chan->intmod = mhi_event->intmod;
977 if (mhi_cntrl->bounce_buf) {
978 mhi_cntrl->map_single = mhi_map_single_use_bb;
979 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
981 mhi_cntrl->map_single = mhi_map_single_no_bb;
982 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
985 /* Read the MHI device info */
986 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
987 SOC_HW_VERSION_OFFS, &soc_info);
991 mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
992 mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
993 mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
994 mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
996 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
997 if (mhi_cntrl->index < 0) {
998 ret = mhi_cntrl->index;
1002 ret = mhi_init_irq_setup(mhi_cntrl);
1006 /* Register controller with MHI bus */
1007 mhi_dev = mhi_alloc_device(mhi_cntrl);
1008 if (IS_ERR(mhi_dev)) {
1009 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
1010 ret = PTR_ERR(mhi_dev);
1011 goto error_setup_irq;
1014 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
1015 mhi_dev->mhi_cntrl = mhi_cntrl;
1016 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
1017 mhi_dev->name = dev_name(&mhi_dev->dev);
1019 /* Init wakeup source */
1020 device_init_wakeup(&mhi_dev->dev, true);
1022 ret = device_add(&mhi_dev->dev);
1024 goto err_release_dev;
1026 mhi_cntrl->mhi_dev = mhi_dev;
1028 mhi_create_debugfs(mhi_cntrl);
1033 put_device(&mhi_dev->dev);
1035 mhi_deinit_free_irq(mhi_cntrl);
1037 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1039 destroy_workqueue(mhi_cntrl->hiprio_wq);
1041 kfree(mhi_cntrl->mhi_cmd);
1043 kfree(mhi_cntrl->mhi_event);
1044 vfree(mhi_cntrl->mhi_chan);
1048 EXPORT_SYMBOL_GPL(mhi_register_controller);
1050 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
1052 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
1053 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
1056 mhi_deinit_free_irq(mhi_cntrl);
1057 mhi_destroy_debugfs(mhi_cntrl);
1059 destroy_workqueue(mhi_cntrl->hiprio_wq);
1060 kfree(mhi_cntrl->mhi_cmd);
1061 kfree(mhi_cntrl->mhi_event);
1063 /* Drop the references to MHI devices created for channels */
1064 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1065 if (!mhi_chan->mhi_dev)
1068 put_device(&mhi_chan->mhi_dev->dev);
1070 vfree(mhi_cntrl->mhi_chan);
1072 device_del(&mhi_dev->dev);
1073 put_device(&mhi_dev->dev);
1075 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1077 EXPORT_SYMBOL_GPL(mhi_unregister_controller);
1079 struct mhi_controller *mhi_alloc_controller(void)
1081 struct mhi_controller *mhi_cntrl;
1083 mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1087 EXPORT_SYMBOL_GPL(mhi_alloc_controller);
1089 void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1093 EXPORT_SYMBOL_GPL(mhi_free_controller);
1095 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1097 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1098 u32 bhi_off, bhie_off;
1101 mutex_lock(&mhi_cntrl->pm_mutex);
1103 ret = mhi_init_dev_ctxt(mhi_cntrl);
1105 goto error_dev_ctxt;
1107 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
1109 dev_err(dev, "Error getting BHI offset\n");
1110 goto error_reg_offset;
1113 if (bhi_off >= mhi_cntrl->reg_len) {
1114 dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
1115 bhi_off, mhi_cntrl->reg_len);
1117 goto error_reg_offset;
1119 mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
1121 if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
1122 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1125 dev_err(dev, "Error getting BHIE offset\n");
1126 goto error_reg_offset;
1129 if (bhie_off >= mhi_cntrl->reg_len) {
1131 "BHIe offset: 0x%x is out of range: 0x%zx\n",
1132 bhie_off, mhi_cntrl->reg_len);
1134 goto error_reg_offset;
1136 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1139 if (mhi_cntrl->rddm_size) {
1141 * This controller supports RDDM, so we need to manually clear
1142 * BHIE RX registers since POR values are undefined.
1144 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1145 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
1148 * Allocate RDDM table for debugging purpose if specified
1150 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1151 mhi_cntrl->rddm_size);
1152 if (mhi_cntrl->rddm_image) {
1153 ret = mhi_rddm_prepare(mhi_cntrl,
1154 mhi_cntrl->rddm_image);
1156 mhi_free_bhie_table(mhi_cntrl,
1157 mhi_cntrl->rddm_image);
1158 goto error_reg_offset;
1163 mutex_unlock(&mhi_cntrl->pm_mutex);
1168 mhi_deinit_dev_ctxt(mhi_cntrl);
1171 mutex_unlock(&mhi_cntrl->pm_mutex);
1175 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1177 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1179 if (mhi_cntrl->fbc_image) {
1180 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1181 mhi_cntrl->fbc_image = NULL;
1184 if (mhi_cntrl->rddm_image) {
1185 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1186 mhi_cntrl->rddm_image = NULL;
1189 mhi_cntrl->bhi = NULL;
1190 mhi_cntrl->bhie = NULL;
1192 mhi_deinit_dev_ctxt(mhi_cntrl);
1194 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1196 static void mhi_release_device(struct device *dev)
1198 struct mhi_device *mhi_dev = to_mhi_device(dev);
1201 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1202 * devices for the channels will only get created if the mhi_dev
1203 * associated with it is NULL. This scenario will happen during the
1204 * controller suspend and resume.
1206 if (mhi_dev->ul_chan)
1207 mhi_dev->ul_chan->mhi_dev = NULL;
1209 if (mhi_dev->dl_chan)
1210 mhi_dev->dl_chan->mhi_dev = NULL;
1215 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1217 struct mhi_device *mhi_dev;
1220 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1222 return ERR_PTR(-ENOMEM);
1224 dev = &mhi_dev->dev;
1225 device_initialize(dev);
1226 dev->bus = &mhi_bus_type;
1227 dev->release = mhi_release_device;
1229 if (mhi_cntrl->mhi_dev) {
1230 /* for MHI client devices, parent is the MHI controller device */
1231 dev->parent = &mhi_cntrl->mhi_dev->dev;
1233 /* for MHI controller device, parent is the bus device (e.g. pci device) */
1234 dev->parent = mhi_cntrl->cntrl_dev;
1237 mhi_dev->mhi_cntrl = mhi_cntrl;
1238 mhi_dev->dev_wake = 0;
1243 static int mhi_driver_probe(struct device *dev)
1245 struct mhi_device *mhi_dev = to_mhi_device(dev);
1246 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1247 struct device_driver *drv = dev->driver;
1248 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1249 struct mhi_event *mhi_event;
1250 struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1251 struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1254 /* Bring device out of LPM */
1255 ret = mhi_device_get_sync(mhi_dev);
1263 * If channel supports LPM notifications then status_cb should
1266 if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1269 /* For non-offload channels then xfer_cb should be provided */
1270 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1273 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1279 * If channel supports LPM notifications then status_cb should
1282 if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1285 /* For non-offload channels then xfer_cb should be provided */
1286 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1289 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1292 * If the channel event ring is managed by client, then
1293 * status_cb must be provided so that the framework can
1294 * notify pending data
1296 if (mhi_event->cl_manage && !mhi_drv->status_cb)
1299 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1302 /* Call the user provided probe function */
1303 ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1307 mhi_device_put(mhi_dev);
1312 mhi_unprepare_from_transfer(mhi_dev);
1314 mhi_device_put(mhi_dev);
1319 static int mhi_driver_remove(struct device *dev)
1321 struct mhi_device *mhi_dev = to_mhi_device(dev);
1322 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1323 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1324 struct mhi_chan *mhi_chan;
1325 enum mhi_ch_state ch_state[] = {
1326 MHI_CH_STATE_DISABLED,
1327 MHI_CH_STATE_DISABLED
1331 /* Skip if it is a controller device */
1332 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1335 /* Reset both channels */
1336 for (dir = 0; dir < 2; dir++) {
1337 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1342 /* Wake all threads waiting for completion */
1343 write_lock_irq(&mhi_chan->lock);
1344 mhi_chan->ccs = MHI_EV_CC_INVALID;
1345 complete_all(&mhi_chan->completion);
1346 write_unlock_irq(&mhi_chan->lock);
1348 /* Set the channel state to disabled */
1349 mutex_lock(&mhi_chan->mutex);
1350 write_lock_irq(&mhi_chan->lock);
1351 ch_state[dir] = mhi_chan->ch_state;
1352 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1353 write_unlock_irq(&mhi_chan->lock);
1355 /* Reset the non-offload channel */
1356 if (!mhi_chan->offload_ch)
1357 mhi_reset_chan(mhi_cntrl, mhi_chan);
1359 mutex_unlock(&mhi_chan->mutex);
1362 mhi_drv->remove(mhi_dev);
1364 /* De-init channel if it was enabled */
1365 for (dir = 0; dir < 2; dir++) {
1366 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1371 mutex_lock(&mhi_chan->mutex);
1373 if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
1374 ch_state[dir] == MHI_CH_STATE_STOP) &&
1375 !mhi_chan->offload_ch)
1376 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1378 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1380 mutex_unlock(&mhi_chan->mutex);
1383 while (mhi_dev->dev_wake)
1384 mhi_device_put(mhi_dev);
1389 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1391 struct device_driver *driver = &mhi_drv->driver;
1393 if (!mhi_drv->probe || !mhi_drv->remove)
1396 driver->bus = &mhi_bus_type;
1397 driver->owner = owner;
1398 driver->probe = mhi_driver_probe;
1399 driver->remove = mhi_driver_remove;
1401 return driver_register(driver);
1403 EXPORT_SYMBOL_GPL(__mhi_driver_register);
1405 void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1407 driver_unregister(&mhi_drv->driver);
1409 EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1411 static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
1413 const struct mhi_device *mhi_dev = to_mhi_device(dev);
1415 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1419 static int mhi_match(struct device *dev, struct device_driver *drv)
1421 struct mhi_device *mhi_dev = to_mhi_device(dev);
1422 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1423 const struct mhi_device_id *id;
1426 * If the device is a controller type then there is no client driver
1427 * associated with it
1429 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1432 for (id = mhi_drv->id_table; id->chan[0]; id++)
1433 if (!strcmp(mhi_dev->name, id->chan)) {
1441 struct bus_type mhi_bus_type = {
1445 .uevent = mhi_uevent,
1446 .dev_groups = mhi_dev_groups,
1449 static int __init mhi_init(void)
1452 return bus_register(&mhi_bus_type);
1455 static void __exit mhi_exit(void)
1458 bus_unregister(&mhi_bus_type);
1461 postcore_initcall(mhi_init);
1462 module_exit(mhi_exit);
1464 MODULE_LICENSE("GPL v2");
1465 MODULE_DESCRIPTION("Modem Host Interface");