Merge tag 'vfs-6.8.netfs' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs
[sfrench/cifs-2.6.git] / drivers / pci / endpoint / functions / pci-epf-mhi.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI EPF driver for MHI Endpoint devices
4  *
5  * Copyright (C) 2023 Linaro Ltd.
6  * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7  */
8
9 #include <linux/dmaengine.h>
10 #include <linux/mhi_ep.h>
11 #include <linux/module.h>
12 #include <linux/of_dma.h>
13 #include <linux/platform_device.h>
14 #include <linux/pci-epc.h>
15 #include <linux/pci-epf.h>
16
17 #define MHI_VERSION_1_0 0x01000000
18
19 #define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
20
21 /* Platform specific flags */
22 #define MHI_EPF_USE_DMA BIT(0)
23
24 struct pci_epf_mhi_dma_transfer {
25         struct pci_epf_mhi *epf_mhi;
26         struct mhi_ep_buf_info buf_info;
27         struct list_head node;
28         dma_addr_t paddr;
29         enum dma_data_direction dir;
30         size_t size;
31 };
32
33 struct pci_epf_mhi_ep_info {
34         const struct mhi_ep_cntrl_config *config;
35         struct pci_epf_header *epf_header;
36         enum pci_barno bar_num;
37         u32 epf_flags;
38         u32 msi_count;
39         u32 mru;
40         u32 flags;
41 };
42
43 #define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction)       \
44         {                                                       \
45                 .num = ch_num,                                  \
46                 .name = ch_name,                                \
47                 .dir = direction,                               \
48         }
49
50 #define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name)               \
51         MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_TO_DEVICE)
52
53 #define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name)               \
54         MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_FROM_DEVICE)
55
56 static const struct mhi_ep_channel_config mhi_v1_channels[] = {
57         MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
58         MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
59         MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
60         MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
61         MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
62         MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
63         MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
64         MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
65         MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
66         MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
67         MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
68         MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
69         MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
70         MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
71         MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
72         MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
73         MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
74         MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
75         MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
76         MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
77         MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
78         MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
79         MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
80         MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
81         MHI_EP_CHANNEL_CONFIG_UL(46, "IP_SW0"),
82         MHI_EP_CHANNEL_CONFIG_DL(47, "IP_SW0"),
83 };
84
85 static const struct mhi_ep_cntrl_config mhi_v1_config = {
86         .max_channels = 128,
87         .num_channels = ARRAY_SIZE(mhi_v1_channels),
88         .ch_cfg = mhi_v1_channels,
89         .mhi_version = MHI_VERSION_1_0,
90 };
91
92 static struct pci_epf_header sdx55_header = {
93         .vendorid = PCI_VENDOR_ID_QCOM,
94         .deviceid = 0x0306,
95         .baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
96         .subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
97         .interrupt_pin  = PCI_INTERRUPT_INTA,
98 };
99
100 static const struct pci_epf_mhi_ep_info sdx55_info = {
101         .config = &mhi_v1_config,
102         .epf_header = &sdx55_header,
103         .bar_num = BAR_0,
104         .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
105         .msi_count = 32,
106         .mru = 0x8000,
107 };
108
109 static struct pci_epf_header sm8450_header = {
110         .vendorid = PCI_VENDOR_ID_QCOM,
111         .deviceid = 0x0306,
112         .baseclass_code = PCI_CLASS_OTHERS,
113         .interrupt_pin = PCI_INTERRUPT_INTA,
114 };
115
116 static const struct pci_epf_mhi_ep_info sm8450_info = {
117         .config = &mhi_v1_config,
118         .epf_header = &sm8450_header,
119         .bar_num = BAR_0,
120         .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
121         .msi_count = 32,
122         .mru = 0x8000,
123         .flags = MHI_EPF_USE_DMA,
124 };
125
126 struct pci_epf_mhi {
127         const struct pci_epc_features *epc_features;
128         const struct pci_epf_mhi_ep_info *info;
129         struct mhi_ep_cntrl mhi_cntrl;
130         struct pci_epf *epf;
131         struct mutex lock;
132         void __iomem *mmio;
133         resource_size_t mmio_phys;
134         struct dma_chan *dma_chan_tx;
135         struct dma_chan *dma_chan_rx;
136         struct workqueue_struct *dma_wq;
137         struct work_struct dma_work;
138         struct list_head dma_list;
139         spinlock_t list_lock;
140         u32 mmio_size;
141         int irq;
142 };
143
144 static size_t get_align_offset(struct pci_epf_mhi *epf_mhi, u64 addr)
145 {
146         return addr & (epf_mhi->epc_features->align -1);
147 }
148
149 static int __pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
150                                  phys_addr_t *paddr, void __iomem **vaddr,
151                                  size_t offset, size_t size)
152 {
153         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
154         struct pci_epf *epf = epf_mhi->epf;
155         struct pci_epc *epc = epf->epc;
156         int ret;
157
158         *vaddr = pci_epc_mem_alloc_addr(epc, paddr, size + offset);
159         if (!*vaddr)
160                 return -ENOMEM;
161
162         ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, *paddr,
163                                pci_addr - offset, size + offset);
164         if (ret) {
165                 pci_epc_mem_free_addr(epc, *paddr, *vaddr, size + offset);
166                 return ret;
167         }
168
169         *paddr = *paddr + offset;
170         *vaddr = *vaddr + offset;
171
172         return 0;
173 }
174
175 static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
176                                  phys_addr_t *paddr, void __iomem **vaddr,
177                                  size_t size)
178 {
179         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
180         size_t offset = get_align_offset(epf_mhi, pci_addr);
181
182         return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
183                                       offset, size);
184 }
185
186 static void __pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl,
187                                      u64 pci_addr, phys_addr_t paddr,
188                                      void __iomem *vaddr, size_t offset,
189                                      size_t size)
190 {
191         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
192         struct pci_epf *epf = epf_mhi->epf;
193         struct pci_epc *epc = epf->epc;
194
195         pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, paddr - offset);
196         pci_epc_mem_free_addr(epc, paddr - offset, vaddr - offset,
197                               size + offset);
198 }
199
200 static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
201                                    phys_addr_t paddr, void __iomem *vaddr,
202                                    size_t size)
203 {
204         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
205         size_t offset = get_align_offset(epf_mhi, pci_addr);
206
207         __pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
208                                  size);
209 }
210
211 static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
212 {
213         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
214         struct pci_epf *epf = epf_mhi->epf;
215         struct pci_epc *epc = epf->epc;
216
217         /*
218          * MHI supplies 0 based MSI vectors but the API expects the vector
219          * number to start from 1, so we need to increment the vector by 1.
220          */
221         pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_IRQ_MSI,
222                           vector + 1);
223 }
224
225 static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
226                                  struct mhi_ep_buf_info *buf_info)
227 {
228         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
229         size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
230         void __iomem *tre_buf;
231         phys_addr_t tre_phys;
232         int ret;
233
234         mutex_lock(&epf_mhi->lock);
235
236         ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
237                                       &tre_buf, offset, buf_info->size);
238         if (ret) {
239                 mutex_unlock(&epf_mhi->lock);
240                 return ret;
241         }
242
243         memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);
244
245         __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
246                                  tre_buf, offset, buf_info->size);
247
248         mutex_unlock(&epf_mhi->lock);
249
250         if (buf_info->cb)
251                 buf_info->cb(buf_info);
252
253         return 0;
254 }
255
256 static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
257                                   struct mhi_ep_buf_info *buf_info)
258 {
259         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
260         size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
261         void __iomem *tre_buf;
262         phys_addr_t tre_phys;
263         int ret;
264
265         mutex_lock(&epf_mhi->lock);
266
267         ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
268                                       &tre_buf, offset, buf_info->size);
269         if (ret) {
270                 mutex_unlock(&epf_mhi->lock);
271                 return ret;
272         }
273
274         memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);
275
276         __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
277                                  tre_buf, offset, buf_info->size);
278
279         mutex_unlock(&epf_mhi->lock);
280
281         if (buf_info->cb)
282                 buf_info->cb(buf_info);
283
284         return 0;
285 }
286
287 static void pci_epf_mhi_dma_callback(void *param)
288 {
289         complete(param);
290 }
291
292 static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
293                                  struct mhi_ep_buf_info *buf_info)
294 {
295         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
296         struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
297         struct dma_chan *chan = epf_mhi->dma_chan_rx;
298         struct device *dev = &epf_mhi->epf->dev;
299         DECLARE_COMPLETION_ONSTACK(complete);
300         struct dma_async_tx_descriptor *desc;
301         struct dma_slave_config config = {};
302         dma_cookie_t cookie;
303         dma_addr_t dst_addr;
304         int ret;
305
306         if (buf_info->size < SZ_4K)
307                 return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
308
309         mutex_lock(&epf_mhi->lock);
310
311         config.direction = DMA_DEV_TO_MEM;
312         config.src_addr = buf_info->host_addr;
313
314         ret = dmaengine_slave_config(chan, &config);
315         if (ret) {
316                 dev_err(dev, "Failed to configure DMA channel\n");
317                 goto err_unlock;
318         }
319
320         dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
321                                   DMA_FROM_DEVICE);
322         ret = dma_mapping_error(dma_dev, dst_addr);
323         if (ret) {
324                 dev_err(dev, "Failed to map remote memory\n");
325                 goto err_unlock;
326         }
327
328         desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
329                                            DMA_DEV_TO_MEM,
330                                            DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
331         if (!desc) {
332                 dev_err(dev, "Failed to prepare DMA\n");
333                 ret = -EIO;
334                 goto err_unmap;
335         }
336
337         desc->callback = pci_epf_mhi_dma_callback;
338         desc->callback_param = &complete;
339
340         cookie = dmaengine_submit(desc);
341         ret = dma_submit_error(cookie);
342         if (ret) {
343                 dev_err(dev, "Failed to do DMA submit\n");
344                 goto err_unmap;
345         }
346
347         dma_async_issue_pending(chan);
348         ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
349         if (!ret) {
350                 dev_err(dev, "DMA transfer timeout\n");
351                 dmaengine_terminate_sync(chan);
352                 ret = -ETIMEDOUT;
353         }
354
355 err_unmap:
356         dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
357 err_unlock:
358         mutex_unlock(&epf_mhi->lock);
359
360         return ret;
361 }
362
363 static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
364                                   struct mhi_ep_buf_info *buf_info)
365 {
366         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
367         struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
368         struct dma_chan *chan = epf_mhi->dma_chan_tx;
369         struct device *dev = &epf_mhi->epf->dev;
370         DECLARE_COMPLETION_ONSTACK(complete);
371         struct dma_async_tx_descriptor *desc;
372         struct dma_slave_config config = {};
373         dma_cookie_t cookie;
374         dma_addr_t src_addr;
375         int ret;
376
377         if (buf_info->size < SZ_4K)
378                 return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
379
380         mutex_lock(&epf_mhi->lock);
381
382         config.direction = DMA_MEM_TO_DEV;
383         config.dst_addr = buf_info->host_addr;
384
385         ret = dmaengine_slave_config(chan, &config);
386         if (ret) {
387                 dev_err(dev, "Failed to configure DMA channel\n");
388                 goto err_unlock;
389         }
390
391         src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
392                                   DMA_TO_DEVICE);
393         ret = dma_mapping_error(dma_dev, src_addr);
394         if (ret) {
395                 dev_err(dev, "Failed to map remote memory\n");
396                 goto err_unlock;
397         }
398
399         desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
400                                            DMA_MEM_TO_DEV,
401                                            DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
402         if (!desc) {
403                 dev_err(dev, "Failed to prepare DMA\n");
404                 ret = -EIO;
405                 goto err_unmap;
406         }
407
408         desc->callback = pci_epf_mhi_dma_callback;
409         desc->callback_param = &complete;
410
411         cookie = dmaengine_submit(desc);
412         ret = dma_submit_error(cookie);
413         if (ret) {
414                 dev_err(dev, "Failed to do DMA submit\n");
415                 goto err_unmap;
416         }
417
418         dma_async_issue_pending(chan);
419         ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
420         if (!ret) {
421                 dev_err(dev, "DMA transfer timeout\n");
422                 dmaengine_terminate_sync(chan);
423                 ret = -ETIMEDOUT;
424         }
425
426 err_unmap:
427         dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
428 err_unlock:
429         mutex_unlock(&epf_mhi->lock);
430
431         return ret;
432 }
433
434 static void pci_epf_mhi_dma_worker(struct work_struct *work)
435 {
436         struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work);
437         struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
438         struct pci_epf_mhi_dma_transfer *itr, *tmp;
439         struct mhi_ep_buf_info *buf_info;
440         unsigned long flags;
441         LIST_HEAD(head);
442
443         spin_lock_irqsave(&epf_mhi->list_lock, flags);
444         list_splice_tail_init(&epf_mhi->dma_list, &head);
445         spin_unlock_irqrestore(&epf_mhi->list_lock, flags);
446
447         list_for_each_entry_safe(itr, tmp, &head, node) {
448                 list_del(&itr->node);
449                 dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir);
450                 buf_info = &itr->buf_info;
451                 buf_info->cb(buf_info);
452                 kfree(itr);
453         }
454 }
455
456 static void pci_epf_mhi_dma_async_callback(void *param)
457 {
458         struct pci_epf_mhi_dma_transfer *transfer = param;
459         struct pci_epf_mhi *epf_mhi = transfer->epf_mhi;
460
461         spin_lock(&epf_mhi->list_lock);
462         list_add_tail(&transfer->node, &epf_mhi->dma_list);
463         spin_unlock(&epf_mhi->list_lock);
464
465         queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work);
466 }
467
468 static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
469                                        struct mhi_ep_buf_info *buf_info)
470 {
471         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
472         struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
473         struct pci_epf_mhi_dma_transfer *transfer = NULL;
474         struct dma_chan *chan = epf_mhi->dma_chan_rx;
475         struct device *dev = &epf_mhi->epf->dev;
476         DECLARE_COMPLETION_ONSTACK(complete);
477         struct dma_async_tx_descriptor *desc;
478         struct dma_slave_config config = {};
479         dma_cookie_t cookie;
480         dma_addr_t dst_addr;
481         int ret;
482
483         mutex_lock(&epf_mhi->lock);
484
485         config.direction = DMA_DEV_TO_MEM;
486         config.src_addr = buf_info->host_addr;
487
488         ret = dmaengine_slave_config(chan, &config);
489         if (ret) {
490                 dev_err(dev, "Failed to configure DMA channel\n");
491                 goto err_unlock;
492         }
493
494         dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
495                                   DMA_FROM_DEVICE);
496         ret = dma_mapping_error(dma_dev, dst_addr);
497         if (ret) {
498                 dev_err(dev, "Failed to map remote memory\n");
499                 goto err_unlock;
500         }
501
502         desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
503                                            DMA_DEV_TO_MEM,
504                                            DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
505         if (!desc) {
506                 dev_err(dev, "Failed to prepare DMA\n");
507                 ret = -EIO;
508                 goto err_unmap;
509         }
510
511         transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
512         if (!transfer) {
513                 ret = -ENOMEM;
514                 goto err_unmap;
515         }
516
517         transfer->epf_mhi = epf_mhi;
518         transfer->paddr = dst_addr;
519         transfer->size = buf_info->size;
520         transfer->dir = DMA_FROM_DEVICE;
521         memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
522
523         desc->callback = pci_epf_mhi_dma_async_callback;
524         desc->callback_param = transfer;
525
526         cookie = dmaengine_submit(desc);
527         ret = dma_submit_error(cookie);
528         if (ret) {
529                 dev_err(dev, "Failed to do DMA submit\n");
530                 goto err_free_transfer;
531         }
532
533         dma_async_issue_pending(chan);
534
535         goto err_unlock;
536
537 err_free_transfer:
538         kfree(transfer);
539 err_unmap:
540         dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
541 err_unlock:
542         mutex_unlock(&epf_mhi->lock);
543
544         return ret;
545 }
546
547 static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
548                                         struct mhi_ep_buf_info *buf_info)
549 {
550         struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
551         struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
552         struct pci_epf_mhi_dma_transfer *transfer = NULL;
553         struct dma_chan *chan = epf_mhi->dma_chan_tx;
554         struct device *dev = &epf_mhi->epf->dev;
555         DECLARE_COMPLETION_ONSTACK(complete);
556         struct dma_async_tx_descriptor *desc;
557         struct dma_slave_config config = {};
558         dma_cookie_t cookie;
559         dma_addr_t src_addr;
560         int ret;
561
562         mutex_lock(&epf_mhi->lock);
563
564         config.direction = DMA_MEM_TO_DEV;
565         config.dst_addr = buf_info->host_addr;
566
567         ret = dmaengine_slave_config(chan, &config);
568         if (ret) {
569                 dev_err(dev, "Failed to configure DMA channel\n");
570                 goto err_unlock;
571         }
572
573         src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
574                                   DMA_TO_DEVICE);
575         ret = dma_mapping_error(dma_dev, src_addr);
576         if (ret) {
577                 dev_err(dev, "Failed to map remote memory\n");
578                 goto err_unlock;
579         }
580
581         desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
582                                            DMA_MEM_TO_DEV,
583                                            DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
584         if (!desc) {
585                 dev_err(dev, "Failed to prepare DMA\n");
586                 ret = -EIO;
587                 goto err_unmap;
588         }
589
590         transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
591         if (!transfer) {
592                 ret = -ENOMEM;
593                 goto err_unmap;
594         }
595
596         transfer->epf_mhi = epf_mhi;
597         transfer->paddr = src_addr;
598         transfer->size = buf_info->size;
599         transfer->dir = DMA_TO_DEVICE;
600         memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
601
602         desc->callback = pci_epf_mhi_dma_async_callback;
603         desc->callback_param = transfer;
604
605         cookie = dmaengine_submit(desc);
606         ret = dma_submit_error(cookie);
607         if (ret) {
608                 dev_err(dev, "Failed to do DMA submit\n");
609                 goto err_free_transfer;
610         }
611
612         dma_async_issue_pending(chan);
613
614         goto err_unlock;
615
616 err_free_transfer:
617         kfree(transfer);
618 err_unmap:
619         dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
620 err_unlock:
621         mutex_unlock(&epf_mhi->lock);
622
623         return ret;
624 }
625
626 struct epf_dma_filter {
627         struct device *dev;
628         u32 dma_mask;
629 };
630
631 static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
632 {
633         struct epf_dma_filter *filter = node;
634         struct dma_slave_caps caps;
635
636         memset(&caps, 0, sizeof(caps));
637         dma_get_slave_caps(chan, &caps);
638
639         return chan->device->dev == filter->dev && filter->dma_mask &
640                                         caps.directions;
641 }
642
643 static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
644 {
645         struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
646         struct device *dev = &epf_mhi->epf->dev;
647         struct epf_dma_filter filter;
648         dma_cap_mask_t mask;
649         int ret;
650
651         dma_cap_zero(mask);
652         dma_cap_set(DMA_SLAVE, mask);
653
654         filter.dev = dma_dev;
655         filter.dma_mask = BIT(DMA_MEM_TO_DEV);
656         epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
657                                                    &filter);
658         if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
659                 dev_err(dev, "Failed to request tx channel\n");
660                 return -ENODEV;
661         }
662
663         filter.dma_mask = BIT(DMA_DEV_TO_MEM);
664         epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
665                                                    &filter);
666         if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
667                 dev_err(dev, "Failed to request rx channel\n");
668                 ret = -ENODEV;
669                 goto err_release_tx;
670         }
671
672         epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0);
673         if (!epf_mhi->dma_wq) {
674                 ret = -ENOMEM;
675                 goto err_release_rx;
676         }
677
678         INIT_LIST_HEAD(&epf_mhi->dma_list);
679         INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker);
680         spin_lock_init(&epf_mhi->list_lock);
681
682         return 0;
683
684 err_release_rx:
685         dma_release_channel(epf_mhi->dma_chan_rx);
686         epf_mhi->dma_chan_rx = NULL;
687 err_release_tx:
688         dma_release_channel(epf_mhi->dma_chan_tx);
689         epf_mhi->dma_chan_tx = NULL;
690
691         return ret;
692 }
693
694 static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
695 {
696         destroy_workqueue(epf_mhi->dma_wq);
697         dma_release_channel(epf_mhi->dma_chan_tx);
698         dma_release_channel(epf_mhi->dma_chan_rx);
699         epf_mhi->dma_chan_tx = NULL;
700         epf_mhi->dma_chan_rx = NULL;
701 }
702
703 static int pci_epf_mhi_core_init(struct pci_epf *epf)
704 {
705         struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
706         const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
707         struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
708         struct pci_epc *epc = epf->epc;
709         struct device *dev = &epf->dev;
710         int ret;
711
712         epf_bar->phys_addr = epf_mhi->mmio_phys;
713         epf_bar->size = epf_mhi->mmio_size;
714         epf_bar->barno = info->bar_num;
715         epf_bar->flags = info->epf_flags;
716         ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
717         if (ret) {
718                 dev_err(dev, "Failed to set BAR: %d\n", ret);
719                 return ret;
720         }
721
722         ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
723                               order_base_2(info->msi_count));
724         if (ret) {
725                 dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
726                 return ret;
727         }
728
729         ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no,
730                                    epf->header);
731         if (ret) {
732                 dev_err(dev, "Failed to set Configuration header: %d\n", ret);
733                 return ret;
734         }
735
736         epf_mhi->epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
737         if (!epf_mhi->epc_features)
738                 return -ENODATA;
739
740         return 0;
741 }
742
743 static int pci_epf_mhi_link_up(struct pci_epf *epf)
744 {
745         struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
746         const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
747         struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
748         struct pci_epc *epc = epf->epc;
749         struct device *dev = &epf->dev;
750         int ret;
751
752         if (info->flags & MHI_EPF_USE_DMA) {
753                 ret = pci_epf_mhi_dma_init(epf_mhi);
754                 if (ret) {
755                         dev_err(dev, "Failed to initialize DMA: %d\n", ret);
756                         return ret;
757                 }
758         }
759
760         mhi_cntrl->mmio = epf_mhi->mmio;
761         mhi_cntrl->irq = epf_mhi->irq;
762         mhi_cntrl->mru = info->mru;
763
764         /* Assign the struct dev of PCI EP as MHI controller device */
765         mhi_cntrl->cntrl_dev = epc->dev.parent;
766         mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
767         mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
768         mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
769         mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
770         mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
771         if (info->flags & MHI_EPF_USE_DMA) {
772                 mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
773                 mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
774                 mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
775                 mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
776         }
777
778         /* Register the MHI EP controller */
779         ret = mhi_ep_register_controller(mhi_cntrl, info->config);
780         if (ret) {
781                 dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
782                 if (info->flags & MHI_EPF_USE_DMA)
783                         pci_epf_mhi_dma_deinit(epf_mhi);
784                 return ret;
785         }
786
787         return 0;
788 }
789
790 static int pci_epf_mhi_link_down(struct pci_epf *epf)
791 {
792         struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
793         const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
794         struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
795
796         if (mhi_cntrl->mhi_dev) {
797                 mhi_ep_power_down(mhi_cntrl);
798                 if (info->flags & MHI_EPF_USE_DMA)
799                         pci_epf_mhi_dma_deinit(epf_mhi);
800                 mhi_ep_unregister_controller(mhi_cntrl);
801         }
802
803         return 0;
804 }
805
806 static int pci_epf_mhi_bme(struct pci_epf *epf)
807 {
808         struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
809         const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
810         struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
811         struct device *dev = &epf->dev;
812         int ret;
813
814         /*
815          * Power up the MHI EP stack if link is up and stack is in power down
816          * state.
817          */
818         if (!mhi_cntrl->enabled && mhi_cntrl->mhi_dev) {
819                 ret = mhi_ep_power_up(mhi_cntrl);
820                 if (ret) {
821                         dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
822                         if (info->flags & MHI_EPF_USE_DMA)
823                                 pci_epf_mhi_dma_deinit(epf_mhi);
824                         mhi_ep_unregister_controller(mhi_cntrl);
825                 }
826         }
827
828         return 0;
829 }
830
831 static int pci_epf_mhi_bind(struct pci_epf *epf)
832 {
833         struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
834         struct pci_epc *epc = epf->epc;
835         struct platform_device *pdev = to_platform_device(epc->dev.parent);
836         struct resource *res;
837         int ret;
838
839         /* Get MMIO base address from Endpoint controller */
840         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
841         epf_mhi->mmio_phys = res->start;
842         epf_mhi->mmio_size = resource_size(res);
843
844         epf_mhi->mmio = ioremap(epf_mhi->mmio_phys, epf_mhi->mmio_size);
845         if (!epf_mhi->mmio)
846                 return -ENOMEM;
847
848         ret = platform_get_irq_byname(pdev, "doorbell");
849         if (ret < 0) {
850                 iounmap(epf_mhi->mmio);
851                 return ret;
852         }
853
854         epf_mhi->irq = ret;
855
856         return 0;
857 }
858
859 static void pci_epf_mhi_unbind(struct pci_epf *epf)
860 {
861         struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
862         const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
863         struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
864         struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
865         struct pci_epc *epc = epf->epc;
866
867         /*
868          * Forcefully power down the MHI EP stack. Only way to bring the MHI EP
869          * stack back to working state after successive bind is by getting BME
870          * from host.
871          */
872         if (mhi_cntrl->mhi_dev) {
873                 mhi_ep_power_down(mhi_cntrl);
874                 if (info->flags & MHI_EPF_USE_DMA)
875                         pci_epf_mhi_dma_deinit(epf_mhi);
876                 mhi_ep_unregister_controller(mhi_cntrl);
877         }
878
879         iounmap(epf_mhi->mmio);
880         pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
881 }
882
883 static const struct pci_epc_event_ops pci_epf_mhi_event_ops = {
884         .core_init = pci_epf_mhi_core_init,
885         .link_up = pci_epf_mhi_link_up,
886         .link_down = pci_epf_mhi_link_down,
887         .bme = pci_epf_mhi_bme,
888 };
889
890 static int pci_epf_mhi_probe(struct pci_epf *epf,
891                              const struct pci_epf_device_id *id)
892 {
893         struct pci_epf_mhi_ep_info *info =
894                         (struct pci_epf_mhi_ep_info *)id->driver_data;
895         struct pci_epf_mhi *epf_mhi;
896         struct device *dev = &epf->dev;
897
898         epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
899         if (!epf_mhi)
900                 return -ENOMEM;
901
902         epf->header = info->epf_header;
903         epf_mhi->info = info;
904         epf_mhi->epf = epf;
905
906         epf->event_ops = &pci_epf_mhi_event_ops;
907
908         mutex_init(&epf_mhi->lock);
909
910         epf_set_drvdata(epf, epf_mhi);
911
912         return 0;
913 }
914
915 static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
916         { .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
917         { .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
918         {},
919 };
920
921 static const struct pci_epf_ops pci_epf_mhi_ops = {
922         .unbind = pci_epf_mhi_unbind,
923         .bind   = pci_epf_mhi_bind,
924 };
925
926 static struct pci_epf_driver pci_epf_mhi_driver = {
927         .driver.name    = "pci_epf_mhi",
928         .probe          = pci_epf_mhi_probe,
929         .id_table       = pci_epf_mhi_ids,
930         .ops            = &pci_epf_mhi_ops,
931         .owner          = THIS_MODULE,
932 };
933
934 static int __init pci_epf_mhi_init(void)
935 {
936         return pci_epf_register_driver(&pci_epf_mhi_driver);
937 }
938 module_init(pci_epf_mhi_init);
939
940 static void __exit pci_epf_mhi_exit(void)
941 {
942         pci_epf_unregister_driver(&pci_epf_mhi_driver);
943 }
944 module_exit(pci_epf_mhi_exit);
945
946 MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
947 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
948 MODULE_LICENSE("GPL");