Merge tag 'for-linus-20131112' of git://git.infradead.org/linux-mtd
[sfrench/cifs-2.6.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_2_0_DEVICE_ID   (0x003c)
40
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
43         {0}
44 };
45
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
47                                        u32 *data);
48
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
52                                              int num);
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
59
60 static const struct ce_attr host_ce_config_wlan[] = {
61         /* CE0: host->target HTC control and raw streams */
62         {
63                 .flags = CE_ATTR_FLAGS,
64                 .src_nentries = 16,
65                 .src_sz_max = 256,
66                 .dest_nentries = 0,
67         },
68
69         /* CE1: target->host HTT + HTC control */
70         {
71                 .flags = CE_ATTR_FLAGS,
72                 .src_nentries = 0,
73                 .src_sz_max = 512,
74                 .dest_nentries = 512,
75         },
76
77         /* CE2: target->host WMI */
78         {
79                 .flags = CE_ATTR_FLAGS,
80                 .src_nentries = 0,
81                 .src_sz_max = 2048,
82                 .dest_nentries = 32,
83         },
84
85         /* CE3: host->target WMI */
86         {
87                 .flags = CE_ATTR_FLAGS,
88                 .src_nentries = 32,
89                 .src_sz_max = 2048,
90                 .dest_nentries = 0,
91         },
92
93         /* CE4: host->target HTT */
94         {
95                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
97                 .src_sz_max = 256,
98                 .dest_nentries = 0,
99         },
100
101         /* CE5: unused */
102         {
103                 .flags = CE_ATTR_FLAGS,
104                 .src_nentries = 0,
105                 .src_sz_max = 0,
106                 .dest_nentries = 0,
107         },
108
109         /* CE6: target autonomous hif_memcpy */
110         {
111                 .flags = CE_ATTR_FLAGS,
112                 .src_nentries = 0,
113                 .src_sz_max = 0,
114                 .dest_nentries = 0,
115         },
116
117         /* CE7: ce_diag, the Diagnostic Window */
118         {
119                 .flags = CE_ATTR_FLAGS,
120                 .src_nentries = 2,
121                 .src_sz_max = DIAG_TRANSFER_LIMIT,
122                 .dest_nentries = 2,
123         },
124 };
125
126 /* Target firmware's Copy Engine configuration. */
127 static const struct ce_pipe_config target_ce_config_wlan[] = {
128         /* CE0: host->target HTC control and raw streams */
129         {
130                 .pipenum = 0,
131                 .pipedir = PIPEDIR_OUT,
132                 .nentries = 32,
133                 .nbytes_max = 256,
134                 .flags = CE_ATTR_FLAGS,
135                 .reserved = 0,
136         },
137
138         /* CE1: target->host HTT + HTC control */
139         {
140                 .pipenum = 1,
141                 .pipedir = PIPEDIR_IN,
142                 .nentries = 32,
143                 .nbytes_max = 512,
144                 .flags = CE_ATTR_FLAGS,
145                 .reserved = 0,
146         },
147
148         /* CE2: target->host WMI */
149         {
150                 .pipenum = 2,
151                 .pipedir = PIPEDIR_IN,
152                 .nentries = 32,
153                 .nbytes_max = 2048,
154                 .flags = CE_ATTR_FLAGS,
155                 .reserved = 0,
156         },
157
158         /* CE3: host->target WMI */
159         {
160                 .pipenum = 3,
161                 .pipedir = PIPEDIR_OUT,
162                 .nentries = 32,
163                 .nbytes_max = 2048,
164                 .flags = CE_ATTR_FLAGS,
165                 .reserved = 0,
166         },
167
168         /* CE4: host->target HTT */
169         {
170                 .pipenum = 4,
171                 .pipedir = PIPEDIR_OUT,
172                 .nentries = 256,
173                 .nbytes_max = 256,
174                 .flags = CE_ATTR_FLAGS,
175                 .reserved = 0,
176         },
177
178         /* NB: 50% of src nentries, since tx has 2 frags */
179
180         /* CE5: unused */
181         {
182                 .pipenum = 5,
183                 .pipedir = PIPEDIR_OUT,
184                 .nentries = 32,
185                 .nbytes_max = 2048,
186                 .flags = CE_ATTR_FLAGS,
187                 .reserved = 0,
188         },
189
190         /* CE6: Reserved for target autonomous hif_memcpy */
191         {
192                 .pipenum = 6,
193                 .pipedir = PIPEDIR_INOUT,
194                 .nentries = 32,
195                 .nbytes_max = 4096,
196                 .flags = CE_ATTR_FLAGS,
197                 .reserved = 0,
198         },
199
200         /* CE7 used only by Host */
201 };
202
203 /*
204  * Diagnostic read/write access is provided for startup/config/debug usage.
205  * Caller must guarantee proper alignment, when applicable, and single user
206  * at any moment.
207  */
208 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
209                                     int nbytes)
210 {
211         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
212         int ret = 0;
213         u32 buf;
214         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
215         unsigned int id;
216         unsigned int flags;
217         struct ath10k_ce_pipe *ce_diag;
218         /* Host buffer address in CE space */
219         u32 ce_data;
220         dma_addr_t ce_data_base = 0;
221         void *data_buf = NULL;
222         int i;
223
224         /*
225          * This code cannot handle reads to non-memory space. Redirect to the
226          * register read fn but preserve the multi word read capability of
227          * this fn
228          */
229         if (address < DRAM_BASE_ADDRESS) {
230                 if (!IS_ALIGNED(address, 4) ||
231                     !IS_ALIGNED((unsigned long)data, 4))
232                         return -EIO;
233
234                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
235                                            ar, address, (u32 *)data)) == 0)) {
236                         nbytes -= sizeof(u32);
237                         address += sizeof(u32);
238                         data += sizeof(u32);
239                 }
240                 return ret;
241         }
242
243         ce_diag = ar_pci->ce_diag;
244
245         /*
246          * Allocate a temporary bounce buffer to hold caller's data
247          * to be DMA'ed from Target. This guarantees
248          *   1) 4-byte alignment
249          *   2) Buffer in DMA-able space
250          */
251         orig_nbytes = nbytes;
252         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
253                                                          orig_nbytes,
254                                                          &ce_data_base);
255
256         if (!data_buf) {
257                 ret = -ENOMEM;
258                 goto done;
259         }
260         memset(data_buf, 0, orig_nbytes);
261
262         remaining_bytes = orig_nbytes;
263         ce_data = ce_data_base;
264         while (remaining_bytes) {
265                 nbytes = min_t(unsigned int, remaining_bytes,
266                                DIAG_TRANSFER_LIMIT);
267
268                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
269                 if (ret != 0)
270                         goto done;
271
272                 /* Request CE to send from Target(!) address to Host buffer */
273                 /*
274                  * The address supplied by the caller is in the
275                  * Target CPU virtual address space.
276                  *
277                  * In order to use this address with the diagnostic CE,
278                  * convert it from Target CPU virtual address space
279                  * to CE address space
280                  */
281                 ath10k_pci_wake(ar);
282                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
283                                                      address);
284                 ath10k_pci_sleep(ar);
285
286                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
287                                  0);
288                 if (ret)
289                         goto done;
290
291                 i = 0;
292                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
293                                                      &completed_nbytes,
294                                                      &id) != 0) {
295                         mdelay(1);
296                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
297                                 ret = -EBUSY;
298                                 goto done;
299                         }
300                 }
301
302                 if (nbytes != completed_nbytes) {
303                         ret = -EIO;
304                         goto done;
305                 }
306
307                 if (buf != (u32) address) {
308                         ret = -EIO;
309                         goto done;
310                 }
311
312                 i = 0;
313                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
314                                                      &completed_nbytes,
315                                                      &id, &flags) != 0) {
316                         mdelay(1);
317
318                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
319                                 ret = -EBUSY;
320                                 goto done;
321                         }
322                 }
323
324                 if (nbytes != completed_nbytes) {
325                         ret = -EIO;
326                         goto done;
327                 }
328
329                 if (buf != ce_data) {
330                         ret = -EIO;
331                         goto done;
332                 }
333
334                 remaining_bytes -= nbytes;
335                 address += nbytes;
336                 ce_data += nbytes;
337         }
338
339 done:
340         if (ret == 0) {
341                 /* Copy data from allocated DMA buf to caller's buf */
342                 WARN_ON_ONCE(orig_nbytes & 3);
343                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
344                         ((u32 *)data)[i] =
345                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
346                 }
347         } else
348                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
349                            __func__, address);
350
351         if (data_buf)
352                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
353                                     data_buf, ce_data_base);
354
355         return ret;
356 }
357
358 /* Read 4-byte aligned data from Target memory or register */
359 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
360                                        u32 *data)
361 {
362         /* Assume range doesn't cross this boundary */
363         if (address >= DRAM_BASE_ADDRESS)
364                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
365
366         ath10k_pci_wake(ar);
367         *data = ath10k_pci_read32(ar, address);
368         ath10k_pci_sleep(ar);
369         return 0;
370 }
371
372 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
373                                      const void *data, int nbytes)
374 {
375         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
376         int ret = 0;
377         u32 buf;
378         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
379         unsigned int id;
380         unsigned int flags;
381         struct ath10k_ce_pipe *ce_diag;
382         void *data_buf = NULL;
383         u32 ce_data;    /* Host buffer address in CE space */
384         dma_addr_t ce_data_base = 0;
385         int i;
386
387         ce_diag = ar_pci->ce_diag;
388
389         /*
390          * Allocate a temporary bounce buffer to hold caller's data
391          * to be DMA'ed to Target. This guarantees
392          *   1) 4-byte alignment
393          *   2) Buffer in DMA-able space
394          */
395         orig_nbytes = nbytes;
396         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
397                                                          orig_nbytes,
398                                                          &ce_data_base);
399         if (!data_buf) {
400                 ret = -ENOMEM;
401                 goto done;
402         }
403
404         /* Copy caller's data to allocated DMA buf */
405         WARN_ON_ONCE(orig_nbytes & 3);
406         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
407                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
408
409         /*
410          * The address supplied by the caller is in the
411          * Target CPU virtual address space.
412          *
413          * In order to use this address with the diagnostic CE,
414          * convert it from
415          *    Target CPU virtual address space
416          * to
417          *    CE address space
418          */
419         ath10k_pci_wake(ar);
420         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
421         ath10k_pci_sleep(ar);
422
423         remaining_bytes = orig_nbytes;
424         ce_data = ce_data_base;
425         while (remaining_bytes) {
426                 /* FIXME: check cast */
427                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
428
429                 /* Set up to receive directly into Target(!) address */
430                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
431                 if (ret != 0)
432                         goto done;
433
434                 /*
435                  * Request CE to send caller-supplied data that
436                  * was copied to bounce buffer to Target(!) address.
437                  */
438                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
439                                      nbytes, 0, 0);
440                 if (ret != 0)
441                         goto done;
442
443                 i = 0;
444                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
445                                                      &completed_nbytes,
446                                                      &id) != 0) {
447                         mdelay(1);
448
449                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
450                                 ret = -EBUSY;
451                                 goto done;
452                         }
453                 }
454
455                 if (nbytes != completed_nbytes) {
456                         ret = -EIO;
457                         goto done;
458                 }
459
460                 if (buf != ce_data) {
461                         ret = -EIO;
462                         goto done;
463                 }
464
465                 i = 0;
466                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
467                                                      &completed_nbytes,
468                                                      &id, &flags) != 0) {
469                         mdelay(1);
470
471                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
472                                 ret = -EBUSY;
473                                 goto done;
474                         }
475                 }
476
477                 if (nbytes != completed_nbytes) {
478                         ret = -EIO;
479                         goto done;
480                 }
481
482                 if (buf != address) {
483                         ret = -EIO;
484                         goto done;
485                 }
486
487                 remaining_bytes -= nbytes;
488                 address += nbytes;
489                 ce_data += nbytes;
490         }
491
492 done:
493         if (data_buf) {
494                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
495                                     ce_data_base);
496         }
497
498         if (ret != 0)
499                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
500                            address);
501
502         return ret;
503 }
504
505 /* Write 4B data to Target memory or register */
506 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
507                                         u32 data)
508 {
509         /* Assume range doesn't cross this boundary */
510         if (address >= DRAM_BASE_ADDRESS)
511                 return ath10k_pci_diag_write_mem(ar, address, &data,
512                                                  sizeof(u32));
513
514         ath10k_pci_wake(ar);
515         ath10k_pci_write32(ar, address, data);
516         ath10k_pci_sleep(ar);
517         return 0;
518 }
519
520 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
521 {
522         void __iomem *mem = ath10k_pci_priv(ar)->mem;
523         u32 val;
524         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
525                        RTC_STATE_ADDRESS);
526         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
527 }
528
529 static void ath10k_pci_wait(struct ath10k *ar)
530 {
531         int n = 100;
532
533         while (n-- && !ath10k_pci_target_is_awake(ar))
534                 msleep(10);
535
536         if (n < 0)
537                 ath10k_warn("Unable to wakeup target\n");
538 }
539
540 int ath10k_do_pci_wake(struct ath10k *ar)
541 {
542         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
543         void __iomem *pci_addr = ar_pci->mem;
544         int tot_delay = 0;
545         int curr_delay = 5;
546
547         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
548                 /* Force AWAKE */
549                 iowrite32(PCIE_SOC_WAKE_V_MASK,
550                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
551                           PCIE_SOC_WAKE_ADDRESS);
552         }
553         atomic_inc(&ar_pci->keep_awake_count);
554
555         if (ar_pci->verified_awake)
556                 return 0;
557
558         for (;;) {
559                 if (ath10k_pci_target_is_awake(ar)) {
560                         ar_pci->verified_awake = true;
561                         return 0;
562                 }
563
564                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
565                         ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
566                                     PCIE_WAKE_TIMEOUT,
567                                     atomic_read(&ar_pci->keep_awake_count));
568                         return -ETIMEDOUT;
569                 }
570
571                 udelay(curr_delay);
572                 tot_delay += curr_delay;
573
574                 if (curr_delay < 50)
575                         curr_delay += 5;
576         }
577 }
578
579 void ath10k_do_pci_sleep(struct ath10k *ar)
580 {
581         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582         void __iomem *pci_addr = ar_pci->mem;
583
584         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
585                 /* Allow sleep */
586                 ar_pci->verified_awake = false;
587                 iowrite32(PCIE_SOC_WAKE_RESET,
588                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
589                           PCIE_SOC_WAKE_ADDRESS);
590         }
591 }
592
593 /*
594  * FIXME: Handle OOM properly.
595  */
596 static inline
597 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
598 {
599         struct ath10k_pci_compl *compl = NULL;
600
601         spin_lock_bh(&pipe_info->pipe_lock);
602         if (list_empty(&pipe_info->compl_free)) {
603                 ath10k_warn("Completion buffers are full\n");
604                 goto exit;
605         }
606         compl = list_first_entry(&pipe_info->compl_free,
607                                  struct ath10k_pci_compl, list);
608         list_del(&compl->list);
609 exit:
610         spin_unlock_bh(&pipe_info->pipe_lock);
611         return compl;
612 }
613
614 /* Called by lower (CE) layer when a send to Target completes. */
615 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
616 {
617         struct ath10k *ar = ce_state->ar;
618         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
619         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
620         struct ath10k_pci_compl *compl;
621         void *transfer_context;
622         u32 ce_data;
623         unsigned int nbytes;
624         unsigned int transfer_id;
625
626         while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
627                                              &ce_data, &nbytes,
628                                              &transfer_id) == 0) {
629                 compl = get_free_compl(pipe_info);
630                 if (!compl)
631                         break;
632
633                 compl->state = ATH10K_PCI_COMPL_SEND;
634                 compl->ce_state = ce_state;
635                 compl->pipe_info = pipe_info;
636                 compl->skb = transfer_context;
637                 compl->nbytes = nbytes;
638                 compl->transfer_id = transfer_id;
639                 compl->flags = 0;
640
641                 /*
642                  * Add the completion to the processing queue.
643                  */
644                 spin_lock_bh(&ar_pci->compl_lock);
645                 list_add_tail(&compl->list, &ar_pci->compl_process);
646                 spin_unlock_bh(&ar_pci->compl_lock);
647         }
648
649         ath10k_pci_process_ce(ar);
650 }
651
652 /* Called by lower (CE) layer when data is received from the Target. */
653 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
654 {
655         struct ath10k *ar = ce_state->ar;
656         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
657         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
658         struct ath10k_pci_compl *compl;
659         struct sk_buff *skb;
660         void *transfer_context;
661         u32 ce_data;
662         unsigned int nbytes;
663         unsigned int transfer_id;
664         unsigned int flags;
665
666         while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
667                                              &ce_data, &nbytes, &transfer_id,
668                                              &flags) == 0) {
669                 compl = get_free_compl(pipe_info);
670                 if (!compl)
671                         break;
672
673                 compl->state = ATH10K_PCI_COMPL_RECV;
674                 compl->ce_state = ce_state;
675                 compl->pipe_info = pipe_info;
676                 compl->skb = transfer_context;
677                 compl->nbytes = nbytes;
678                 compl->transfer_id = transfer_id;
679                 compl->flags = flags;
680
681                 skb = transfer_context;
682                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
683                                  skb->len + skb_tailroom(skb),
684                                  DMA_FROM_DEVICE);
685                 /*
686                  * Add the completion to the processing queue.
687                  */
688                 spin_lock_bh(&ar_pci->compl_lock);
689                 list_add_tail(&compl->list, &ar_pci->compl_process);
690                 spin_unlock_bh(&ar_pci->compl_lock);
691         }
692
693         ath10k_pci_process_ce(ar);
694 }
695
696 /* Send the first nbytes bytes of the buffer */
697 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
698                                     unsigned int transfer_id,
699                                     unsigned int bytes, struct sk_buff *nbuf)
700 {
701         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
702         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
703         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
704         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
705         unsigned int len;
706         u32 flags = 0;
707         int ret;
708
709         len = min(bytes, nbuf->len);
710         bytes -= len;
711
712         if (len & 3)
713                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
714
715         ath10k_dbg(ATH10K_DBG_PCI,
716                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
717                    nbuf->data, (unsigned long long) skb_cb->paddr,
718                    nbuf->len, len);
719         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
720                         "ath10k tx: data: ",
721                         nbuf->data, nbuf->len);
722
723         ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
724                              flags);
725         if (ret)
726                 ath10k_warn("CE send failed: %p\n", nbuf);
727
728         return ret;
729 }
730
731 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
732 {
733         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
734         return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
735 }
736
737 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
738 {
739         u32 reg_dump_area = 0;
740         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
741         u32 host_addr;
742         int ret;
743         u32 i;
744
745         ath10k_err("firmware crashed!\n");
746         ath10k_err("hardware name %s version 0x%x\n",
747                    ar->hw_params.name, ar->target_version);
748         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
749                    ar->fw_version_minor, ar->fw_version_release,
750                    ar->fw_version_build);
751
752         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
753         if (ath10k_pci_diag_read_mem(ar, host_addr,
754                                      &reg_dump_area, sizeof(u32)) != 0) {
755                 ath10k_warn("could not read hi_failure_state\n");
756                 return;
757         }
758
759         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
760
761         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
762                                        &reg_dump_values[0],
763                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
764         if (ret != 0) {
765                 ath10k_err("could not dump FW Dump Area\n");
766                 return;
767         }
768
769         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
770
771         ath10k_err("target Register Dump\n");
772         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
773                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
774                            i,
775                            reg_dump_values[i],
776                            reg_dump_values[i + 1],
777                            reg_dump_values[i + 2],
778                            reg_dump_values[i + 3]);
779
780         ieee80211_queue_work(ar->hw, &ar->restart_work);
781 }
782
783 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
784                                                int force)
785 {
786         if (!force) {
787                 int resources;
788                 /*
789                  * Decide whether to actually poll for completions, or just
790                  * wait for a later chance.
791                  * If there seem to be plenty of resources left, then just wait
792                  * since checking involves reading a CE register, which is a
793                  * relatively expensive operation.
794                  */
795                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
796
797                 /*
798                  * If at least 50% of the total resources are still available,
799                  * don't bother checking again yet.
800                  */
801                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
802                         return;
803         }
804         ath10k_ce_per_engine_service(ar, pipe);
805 }
806
807 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
808                                          struct ath10k_hif_cb *callbacks)
809 {
810         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
811
812         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
813
814         memcpy(&ar_pci->msg_callbacks_current, callbacks,
815                sizeof(ar_pci->msg_callbacks_current));
816 }
817
818 static int ath10k_pci_start_ce(struct ath10k *ar)
819 {
820         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
821         struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
822         const struct ce_attr *attr;
823         struct ath10k_pci_pipe *pipe_info;
824         struct ath10k_pci_compl *compl;
825         int i, pipe_num, completions, disable_interrupts;
826
827         spin_lock_init(&ar_pci->compl_lock);
828         INIT_LIST_HEAD(&ar_pci->compl_process);
829
830         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
831                 pipe_info = &ar_pci->pipe_info[pipe_num];
832
833                 spin_lock_init(&pipe_info->pipe_lock);
834                 INIT_LIST_HEAD(&pipe_info->compl_free);
835
836                 /* Handle Diagnostic CE specially */
837                 if (pipe_info->ce_hdl == ce_diag)
838                         continue;
839
840                 attr = &host_ce_config_wlan[pipe_num];
841                 completions = 0;
842
843                 if (attr->src_nentries) {
844                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
845                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
846                                                    ath10k_pci_ce_send_done,
847                                                    disable_interrupts);
848                         completions += attr->src_nentries;
849                 }
850
851                 if (attr->dest_nentries) {
852                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
853                                                    ath10k_pci_ce_recv_data);
854                         completions += attr->dest_nentries;
855                 }
856
857                 if (completions == 0)
858                         continue;
859
860                 for (i = 0; i < completions; i++) {
861                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
862                         if (!compl) {
863                                 ath10k_warn("No memory for completion state\n");
864                                 ath10k_pci_stop_ce(ar);
865                                 return -ENOMEM;
866                         }
867
868                         compl->state = ATH10K_PCI_COMPL_FREE;
869                         list_add_tail(&compl->list, &pipe_info->compl_free);
870                 }
871         }
872
873         return 0;
874 }
875
876 static void ath10k_pci_stop_ce(struct ath10k *ar)
877 {
878         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
879         struct ath10k_pci_compl *compl;
880         struct sk_buff *skb;
881         int i;
882
883         ath10k_ce_disable_interrupts(ar);
884
885         /* Cancel the pending tasklet */
886         tasklet_kill(&ar_pci->intr_tq);
887
888         for (i = 0; i < CE_COUNT; i++)
889                 tasklet_kill(&ar_pci->pipe_info[i].intr);
890
891         /* Mark pending completions as aborted, so that upper layers free up
892          * their associated resources */
893         spin_lock_bh(&ar_pci->compl_lock);
894         list_for_each_entry(compl, &ar_pci->compl_process, list) {
895                 skb = compl->skb;
896                 ATH10K_SKB_CB(skb)->is_aborted = true;
897         }
898         spin_unlock_bh(&ar_pci->compl_lock);
899 }
900
901 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
902 {
903         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
904         struct ath10k_pci_compl *compl, *tmp;
905         struct ath10k_pci_pipe *pipe_info;
906         struct sk_buff *netbuf;
907         int pipe_num;
908
909         /* Free pending completions. */
910         spin_lock_bh(&ar_pci->compl_lock);
911         if (!list_empty(&ar_pci->compl_process))
912                 ath10k_warn("pending completions still present! possible memory leaks.\n");
913
914         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
915                 list_del(&compl->list);
916                 netbuf = compl->skb;
917                 dev_kfree_skb_any(netbuf);
918                 kfree(compl);
919         }
920         spin_unlock_bh(&ar_pci->compl_lock);
921
922         /* Free unused completions for each pipe. */
923         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
924                 pipe_info = &ar_pci->pipe_info[pipe_num];
925
926                 spin_lock_bh(&pipe_info->pipe_lock);
927                 list_for_each_entry_safe(compl, tmp,
928                                          &pipe_info->compl_free, list) {
929                         list_del(&compl->list);
930                         kfree(compl);
931                 }
932                 spin_unlock_bh(&pipe_info->pipe_lock);
933         }
934 }
935
936 static void ath10k_pci_process_ce(struct ath10k *ar)
937 {
938         struct ath10k_pci *ar_pci = ar->hif.priv;
939         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
940         struct ath10k_pci_compl *compl;
941         struct sk_buff *skb;
942         unsigned int nbytes;
943         int ret, send_done = 0;
944
945         /* Upper layers aren't ready to handle tx/rx completions in parallel so
946          * we must serialize all completion processing. */
947
948         spin_lock_bh(&ar_pci->compl_lock);
949         if (ar_pci->compl_processing) {
950                 spin_unlock_bh(&ar_pci->compl_lock);
951                 return;
952         }
953         ar_pci->compl_processing = true;
954         spin_unlock_bh(&ar_pci->compl_lock);
955
956         for (;;) {
957                 spin_lock_bh(&ar_pci->compl_lock);
958                 if (list_empty(&ar_pci->compl_process)) {
959                         spin_unlock_bh(&ar_pci->compl_lock);
960                         break;
961                 }
962                 compl = list_first_entry(&ar_pci->compl_process,
963                                          struct ath10k_pci_compl, list);
964                 list_del(&compl->list);
965                 spin_unlock_bh(&ar_pci->compl_lock);
966
967                 switch (compl->state) {
968                 case ATH10K_PCI_COMPL_SEND:
969                         cb->tx_completion(ar,
970                                           compl->skb,
971                                           compl->transfer_id);
972                         send_done = 1;
973                         break;
974                 case ATH10K_PCI_COMPL_RECV:
975                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
976                         if (ret) {
977                                 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
978                                             compl->pipe_info->pipe_num);
979                                 break;
980                         }
981
982                         skb = compl->skb;
983                         nbytes = compl->nbytes;
984
985                         ath10k_dbg(ATH10K_DBG_PCI,
986                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
987                                    skb, nbytes);
988                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
989                                         "ath10k rx: ", skb->data, nbytes);
990
991                         if (skb->len + skb_tailroom(skb) >= nbytes) {
992                                 skb_trim(skb, 0);
993                                 skb_put(skb, nbytes);
994                                 cb->rx_completion(ar, skb,
995                                                   compl->pipe_info->pipe_num);
996                         } else {
997                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
998                                             nbytes,
999                                             skb->len + skb_tailroom(skb));
1000                         }
1001                         break;
1002                 case ATH10K_PCI_COMPL_FREE:
1003                         ath10k_warn("free completion cannot be processed\n");
1004                         break;
1005                 default:
1006                         ath10k_warn("invalid completion state (%d)\n",
1007                                     compl->state);
1008                         break;
1009                 }
1010
1011                 compl->state = ATH10K_PCI_COMPL_FREE;
1012
1013                 /*
1014                  * Add completion back to the pipe's free list.
1015                  */
1016                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1017                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1018                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1019         }
1020
1021         spin_lock_bh(&ar_pci->compl_lock);
1022         ar_pci->compl_processing = false;
1023         spin_unlock_bh(&ar_pci->compl_lock);
1024 }
1025
1026 /* TODO - temporary mapping while we have too few CE's */
1027 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1028                                               u16 service_id, u8 *ul_pipe,
1029                                               u8 *dl_pipe, int *ul_is_polled,
1030                                               int *dl_is_polled)
1031 {
1032         int ret = 0;
1033
1034         /* polling for received messages not supported */
1035         *dl_is_polled = 0;
1036
1037         switch (service_id) {
1038         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1039                 /*
1040                  * Host->target HTT gets its own pipe, so it can be polled
1041                  * while other pipes are interrupt driven.
1042                  */
1043                 *ul_pipe = 4;
1044                 /*
1045                  * Use the same target->host pipe for HTC ctrl, HTC raw
1046                  * streams, and HTT.
1047                  */
1048                 *dl_pipe = 1;
1049                 break;
1050
1051         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1052         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1053                 /*
1054                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1055                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1056                  * WMI services.  So, if another CE is needed, change
1057                  * this to *ul_pipe = 3, which frees up CE 0.
1058                  */
1059                 /* *ul_pipe = 3; */
1060                 *ul_pipe = 0;
1061                 *dl_pipe = 1;
1062                 break;
1063
1064         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1065         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1066         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1067         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1068
1069         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1070                 *ul_pipe = 3;
1071                 *dl_pipe = 2;
1072                 break;
1073
1074                 /* pipe 5 unused   */
1075                 /* pipe 6 reserved */
1076                 /* pipe 7 reserved */
1077
1078         default:
1079                 ret = -1;
1080                 break;
1081         }
1082         *ul_is_polled =
1083                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1084
1085         return ret;
1086 }
1087
1088 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1089                                                 u8 *ul_pipe, u8 *dl_pipe)
1090 {
1091         int ul_is_polled, dl_is_polled;
1092
1093         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1094                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1095                                                  ul_pipe,
1096                                                  dl_pipe,
1097                                                  &ul_is_polled,
1098                                                  &dl_is_polled);
1099 }
1100
1101 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1102                                    int num)
1103 {
1104         struct ath10k *ar = pipe_info->hif_ce_state;
1105         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1106         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1107         struct sk_buff *skb;
1108         dma_addr_t ce_data;
1109         int i, ret = 0;
1110
1111         if (pipe_info->buf_sz == 0)
1112                 return 0;
1113
1114         for (i = 0; i < num; i++) {
1115                 skb = dev_alloc_skb(pipe_info->buf_sz);
1116                 if (!skb) {
1117                         ath10k_warn("could not allocate skbuff for pipe %d\n",
1118                                     num);
1119                         ret = -ENOMEM;
1120                         goto err;
1121                 }
1122
1123                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1124
1125                 ce_data = dma_map_single(ar->dev, skb->data,
1126                                          skb->len + skb_tailroom(skb),
1127                                          DMA_FROM_DEVICE);
1128
1129                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1130                         ath10k_warn("could not dma map skbuff\n");
1131                         dev_kfree_skb_any(skb);
1132                         ret = -EIO;
1133                         goto err;
1134                 }
1135
1136                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1137
1138                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1139                                                pipe_info->buf_sz,
1140                                                PCI_DMA_FROMDEVICE);
1141
1142                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1143                                                  ce_data);
1144                 if (ret) {
1145                         ath10k_warn("could not enqueue to pipe %d (%d)\n",
1146                                     num, ret);
1147                         goto err;
1148                 }
1149         }
1150
1151         return ret;
1152
1153 err:
1154         ath10k_pci_rx_pipe_cleanup(pipe_info);
1155         return ret;
1156 }
1157
1158 static int ath10k_pci_post_rx(struct ath10k *ar)
1159 {
1160         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1161         struct ath10k_pci_pipe *pipe_info;
1162         const struct ce_attr *attr;
1163         int pipe_num, ret = 0;
1164
1165         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1166                 pipe_info = &ar_pci->pipe_info[pipe_num];
1167                 attr = &host_ce_config_wlan[pipe_num];
1168
1169                 if (attr->dest_nentries == 0)
1170                         continue;
1171
1172                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1173                                               attr->dest_nentries - 1);
1174                 if (ret) {
1175                         ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1176                                     pipe_num);
1177
1178                         for (; pipe_num >= 0; pipe_num--) {
1179                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1180                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1181                         }
1182                         return ret;
1183                 }
1184         }
1185
1186         return 0;
1187 }
1188
1189 static int ath10k_pci_hif_start(struct ath10k *ar)
1190 {
1191         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1192         int ret;
1193
1194         ret = ath10k_pci_start_ce(ar);
1195         if (ret) {
1196                 ath10k_warn("could not start CE (%d)\n", ret);
1197                 return ret;
1198         }
1199
1200         /* Post buffers once to start things off. */
1201         ret = ath10k_pci_post_rx(ar);
1202         if (ret) {
1203                 ath10k_warn("could not post rx pipes (%d)\n", ret);
1204                 return ret;
1205         }
1206
1207         ar_pci->started = 1;
1208         return 0;
1209 }
1210
1211 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1212 {
1213         struct ath10k *ar;
1214         struct ath10k_pci *ar_pci;
1215         struct ath10k_ce_pipe *ce_hdl;
1216         u32 buf_sz;
1217         struct sk_buff *netbuf;
1218         u32 ce_data;
1219
1220         buf_sz = pipe_info->buf_sz;
1221
1222         /* Unused Copy Engine */
1223         if (buf_sz == 0)
1224                 return;
1225
1226         ar = pipe_info->hif_ce_state;
1227         ar_pci = ath10k_pci_priv(ar);
1228
1229         if (!ar_pci->started)
1230                 return;
1231
1232         ce_hdl = pipe_info->ce_hdl;
1233
1234         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1235                                           &ce_data) == 0) {
1236                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1237                                  netbuf->len + skb_tailroom(netbuf),
1238                                  DMA_FROM_DEVICE);
1239                 dev_kfree_skb_any(netbuf);
1240         }
1241 }
1242
1243 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1244 {
1245         struct ath10k *ar;
1246         struct ath10k_pci *ar_pci;
1247         struct ath10k_ce_pipe *ce_hdl;
1248         struct sk_buff *netbuf;
1249         u32 ce_data;
1250         unsigned int nbytes;
1251         unsigned int id;
1252         u32 buf_sz;
1253
1254         buf_sz = pipe_info->buf_sz;
1255
1256         /* Unused Copy Engine */
1257         if (buf_sz == 0)
1258                 return;
1259
1260         ar = pipe_info->hif_ce_state;
1261         ar_pci = ath10k_pci_priv(ar);
1262
1263         if (!ar_pci->started)
1264                 return;
1265
1266         ce_hdl = pipe_info->ce_hdl;
1267
1268         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1269                                           &ce_data, &nbytes, &id) == 0) {
1270                 /*
1271                  * Indicate the completion to higer layer to free
1272                  * the buffer
1273                  */
1274                 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1275                 ar_pci->msg_callbacks_current.tx_completion(ar,
1276                                                             netbuf,
1277                                                             id);
1278         }
1279 }
1280
1281 /*
1282  * Cleanup residual buffers for device shutdown:
1283  *    buffers that were enqueued for receive
1284  *    buffers that were to be sent
1285  * Note: Buffers that had completed but which were
1286  * not yet processed are on a completion queue. They
1287  * are handled when the completion thread shuts down.
1288  */
1289 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1290 {
1291         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1292         int pipe_num;
1293
1294         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1295                 struct ath10k_pci_pipe *pipe_info;
1296
1297                 pipe_info = &ar_pci->pipe_info[pipe_num];
1298                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1299                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1300         }
1301 }
1302
1303 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1304 {
1305         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1306         struct ath10k_pci_pipe *pipe_info;
1307         int pipe_num;
1308
1309         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1310                 pipe_info = &ar_pci->pipe_info[pipe_num];
1311                 if (pipe_info->ce_hdl) {
1312                         ath10k_ce_deinit(pipe_info->ce_hdl);
1313                         pipe_info->ce_hdl = NULL;
1314                         pipe_info->buf_sz = 0;
1315                 }
1316         }
1317 }
1318
1319 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1320 {
1321         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1322         int i;
1323
1324         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1325                 disable_irq(ar_pci->pdev->irq + i);
1326 }
1327
1328 static void ath10k_pci_hif_stop(struct ath10k *ar)
1329 {
1330         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1331
1332         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1333
1334         /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1335          * by ath10k_pci_start_intr(). */
1336         ath10k_pci_disable_irqs(ar);
1337
1338         ath10k_pci_stop_ce(ar);
1339
1340         /* At this point, asynchronous threads are stopped, the target should
1341          * not DMA nor interrupt. We process the leftovers and then free
1342          * everything else up. */
1343
1344         ath10k_pci_process_ce(ar);
1345         ath10k_pci_cleanup_ce(ar);
1346         ath10k_pci_buffer_cleanup(ar);
1347
1348         ar_pci->started = 0;
1349 }
1350
1351 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1352                                            void *req, u32 req_len,
1353                                            void *resp, u32 *resp_len)
1354 {
1355         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1356         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1357         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1358         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1359         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1360         dma_addr_t req_paddr = 0;
1361         dma_addr_t resp_paddr = 0;
1362         struct bmi_xfer xfer = {};
1363         void *treq, *tresp = NULL;
1364         int ret = 0;
1365
1366         if (resp && !resp_len)
1367                 return -EINVAL;
1368
1369         if (resp && resp_len && *resp_len == 0)
1370                 return -EINVAL;
1371
1372         treq = kmemdup(req, req_len, GFP_KERNEL);
1373         if (!treq)
1374                 return -ENOMEM;
1375
1376         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1377         ret = dma_mapping_error(ar->dev, req_paddr);
1378         if (ret)
1379                 goto err_dma;
1380
1381         if (resp && resp_len) {
1382                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1383                 if (!tresp) {
1384                         ret = -ENOMEM;
1385                         goto err_req;
1386                 }
1387
1388                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1389                                             DMA_FROM_DEVICE);
1390                 ret = dma_mapping_error(ar->dev, resp_paddr);
1391                 if (ret)
1392                         goto err_req;
1393
1394                 xfer.wait_for_resp = true;
1395                 xfer.resp_len = 0;
1396
1397                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1398         }
1399
1400         init_completion(&xfer.done);
1401
1402         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1403         if (ret)
1404                 goto err_resp;
1405
1406         ret = wait_for_completion_timeout(&xfer.done,
1407                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1408         if (ret <= 0) {
1409                 u32 unused_buffer;
1410                 unsigned int unused_nbytes;
1411                 unsigned int unused_id;
1412
1413                 ret = -ETIMEDOUT;
1414                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1415                                            &unused_nbytes, &unused_id);
1416         } else {
1417                 /* non-zero means we did not time out */
1418                 ret = 0;
1419         }
1420
1421 err_resp:
1422         if (resp) {
1423                 u32 unused_buffer;
1424
1425                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1426                 dma_unmap_single(ar->dev, resp_paddr,
1427                                  *resp_len, DMA_FROM_DEVICE);
1428         }
1429 err_req:
1430         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1431
1432         if (ret == 0 && resp_len) {
1433                 *resp_len = min(*resp_len, xfer.resp_len);
1434                 memcpy(resp, tresp, xfer.resp_len);
1435         }
1436 err_dma:
1437         kfree(treq);
1438         kfree(tresp);
1439
1440         return ret;
1441 }
1442
1443 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1444 {
1445         struct bmi_xfer *xfer;
1446         u32 ce_data;
1447         unsigned int nbytes;
1448         unsigned int transfer_id;
1449
1450         if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1451                                           &nbytes, &transfer_id))
1452                 return;
1453
1454         if (xfer->wait_for_resp)
1455                 return;
1456
1457         complete(&xfer->done);
1458 }
1459
1460 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1461 {
1462         struct bmi_xfer *xfer;
1463         u32 ce_data;
1464         unsigned int nbytes;
1465         unsigned int transfer_id;
1466         unsigned int flags;
1467
1468         if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1469                                           &nbytes, &transfer_id, &flags))
1470                 return;
1471
1472         if (!xfer->wait_for_resp) {
1473                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1474                 return;
1475         }
1476
1477         xfer->resp_len = nbytes;
1478         complete(&xfer->done);
1479 }
1480
1481 /*
1482  * Map from service/endpoint to Copy Engine.
1483  * This table is derived from the CE_PCI TABLE, above.
1484  * It is passed to the Target at startup for use by firmware.
1485  */
1486 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1487         {
1488                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1489                  PIPEDIR_OUT,           /* out = UL = host -> target */
1490                  3,
1491         },
1492         {
1493                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1494                  PIPEDIR_IN,            /* in = DL = target -> host */
1495                  2,
1496         },
1497         {
1498                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1499                  PIPEDIR_OUT,           /* out = UL = host -> target */
1500                  3,
1501         },
1502         {
1503                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1504                  PIPEDIR_IN,            /* in = DL = target -> host */
1505                  2,
1506         },
1507         {
1508                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1509                  PIPEDIR_OUT,           /* out = UL = host -> target */
1510                  3,
1511         },
1512         {
1513                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1514                  PIPEDIR_IN,            /* in = DL = target -> host */
1515                  2,
1516         },
1517         {
1518                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1519                  PIPEDIR_OUT,           /* out = UL = host -> target */
1520                  3,
1521         },
1522         {
1523                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1524                  PIPEDIR_IN,            /* in = DL = target -> host */
1525                  2,
1526         },
1527         {
1528                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1529                  PIPEDIR_OUT,           /* out = UL = host -> target */
1530                  3,
1531         },
1532         {
1533                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1534                  PIPEDIR_IN,            /* in = DL = target -> host */
1535                  2,
1536         },
1537         {
1538                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1539                  PIPEDIR_OUT,           /* out = UL = host -> target */
1540                  0,             /* could be moved to 3 (share with WMI) */
1541         },
1542         {
1543                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1544                  PIPEDIR_IN,            /* in = DL = target -> host */
1545                  1,
1546         },
1547         {
1548                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1549                  PIPEDIR_OUT,           /* out = UL = host -> target */
1550                  0,
1551         },
1552         {
1553                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1554                  PIPEDIR_IN,            /* in = DL = target -> host */
1555                  1,
1556         },
1557         {
1558                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1559                  PIPEDIR_OUT,           /* out = UL = host -> target */
1560                  4,
1561         },
1562         {
1563                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1564                  PIPEDIR_IN,            /* in = DL = target -> host */
1565                  1,
1566         },
1567
1568         /* (Additions here) */
1569
1570         {                               /* Must be last */
1571                  0,
1572                  0,
1573                  0,
1574         },
1575 };
1576
1577 /*
1578  * Send an interrupt to the device to wake up the Target CPU
1579  * so it has an opportunity to notice any changed state.
1580  */
1581 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1582 {
1583         int ret;
1584         u32 core_ctrl;
1585
1586         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1587                                               CORE_CTRL_ADDRESS,
1588                                           &core_ctrl);
1589         if (ret) {
1590                 ath10k_warn("Unable to read core ctrl\n");
1591                 return ret;
1592         }
1593
1594         /* A_INUM_FIRMWARE interrupt to Target CPU */
1595         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1596
1597         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1598                                                CORE_CTRL_ADDRESS,
1599                                            core_ctrl);
1600         if (ret)
1601                 ath10k_warn("Unable to set interrupt mask\n");
1602
1603         return ret;
1604 }
1605
1606 static int ath10k_pci_init_config(struct ath10k *ar)
1607 {
1608         u32 interconnect_targ_addr;
1609         u32 pcie_state_targ_addr = 0;
1610         u32 pipe_cfg_targ_addr = 0;
1611         u32 svc_to_pipe_map = 0;
1612         u32 pcie_config_flags = 0;
1613         u32 ealloc_value;
1614         u32 ealloc_targ_addr;
1615         u32 flag2_value;
1616         u32 flag2_targ_addr;
1617         int ret = 0;
1618
1619         /* Download to Target the CE Config and the service-to-CE map */
1620         interconnect_targ_addr =
1621                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1622
1623         /* Supply Target-side CE configuration */
1624         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1625                                           &pcie_state_targ_addr);
1626         if (ret != 0) {
1627                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1628                 return ret;
1629         }
1630
1631         if (pcie_state_targ_addr == 0) {
1632                 ret = -EIO;
1633                 ath10k_err("Invalid pcie state addr\n");
1634                 return ret;
1635         }
1636
1637         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1638                                           offsetof(struct pcie_state,
1639                                                    pipe_cfg_addr),
1640                                           &pipe_cfg_targ_addr);
1641         if (ret != 0) {
1642                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1643                 return ret;
1644         }
1645
1646         if (pipe_cfg_targ_addr == 0) {
1647                 ret = -EIO;
1648                 ath10k_err("Invalid pipe cfg addr\n");
1649                 return ret;
1650         }
1651
1652         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1653                                  target_ce_config_wlan,
1654                                  sizeof(target_ce_config_wlan));
1655
1656         if (ret != 0) {
1657                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1658                 return ret;
1659         }
1660
1661         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1662                                           offsetof(struct pcie_state,
1663                                                    svc_to_pipe_map),
1664                                           &svc_to_pipe_map);
1665         if (ret != 0) {
1666                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1667                 return ret;
1668         }
1669
1670         if (svc_to_pipe_map == 0) {
1671                 ret = -EIO;
1672                 ath10k_err("Invalid svc_to_pipe map\n");
1673                 return ret;
1674         }
1675
1676         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1677                                  target_service_to_ce_map_wlan,
1678                                  sizeof(target_service_to_ce_map_wlan));
1679         if (ret != 0) {
1680                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1681                 return ret;
1682         }
1683
1684         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1685                                           offsetof(struct pcie_state,
1686                                                    config_flags),
1687                                           &pcie_config_flags);
1688         if (ret != 0) {
1689                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1690                 return ret;
1691         }
1692
1693         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1694
1695         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1696                                  offsetof(struct pcie_state, config_flags),
1697                                  &pcie_config_flags,
1698                                  sizeof(pcie_config_flags));
1699         if (ret != 0) {
1700                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1701                 return ret;
1702         }
1703
1704         /* configure early allocation */
1705         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1706
1707         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1708         if (ret != 0) {
1709                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1710                 return ret;
1711         }
1712
1713         /* first bank is switched to IRAM */
1714         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1715                          HI_EARLY_ALLOC_MAGIC_MASK);
1716         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1717                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1718
1719         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1720         if (ret != 0) {
1721                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1722                 return ret;
1723         }
1724
1725         /* Tell Target to proceed with initialization */
1726         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1727
1728         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1729         if (ret != 0) {
1730                 ath10k_err("Failed to get option val: %d\n", ret);
1731                 return ret;
1732         }
1733
1734         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1735
1736         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1737         if (ret != 0) {
1738                 ath10k_err("Failed to set option val: %d\n", ret);
1739                 return ret;
1740         }
1741
1742         return 0;
1743 }
1744
1745
1746
1747 static int ath10k_pci_ce_init(struct ath10k *ar)
1748 {
1749         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1750         struct ath10k_pci_pipe *pipe_info;
1751         const struct ce_attr *attr;
1752         int pipe_num;
1753
1754         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1755                 pipe_info = &ar_pci->pipe_info[pipe_num];
1756                 pipe_info->pipe_num = pipe_num;
1757                 pipe_info->hif_ce_state = ar;
1758                 attr = &host_ce_config_wlan[pipe_num];
1759
1760                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1761                 if (pipe_info->ce_hdl == NULL) {
1762                         ath10k_err("Unable to initialize CE for pipe: %d\n",
1763                                    pipe_num);
1764
1765                         /* It is safe to call it here. It checks if ce_hdl is
1766                          * valid for each pipe */
1767                         ath10k_pci_ce_deinit(ar);
1768                         return -1;
1769                 }
1770
1771                 if (pipe_num == ar_pci->ce_count - 1) {
1772                         /*
1773                          * Reserve the ultimate CE for
1774                          * diagnostic Window support
1775                          */
1776                         ar_pci->ce_diag =
1777                         ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1778                         continue;
1779                 }
1780
1781                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1782         }
1783
1784         /*
1785          * Initially, establish CE completion handlers for use with BMI.
1786          * These are overwritten with generic handlers after we exit BMI phase.
1787          */
1788         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1789         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1790                                    ath10k_pci_bmi_send_done, 0);
1791
1792         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1793         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1794                                    ath10k_pci_bmi_recv_data);
1795
1796         return 0;
1797 }
1798
1799 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1800 {
1801         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1802         u32 fw_indicator_address, fw_indicator;
1803
1804         ath10k_pci_wake(ar);
1805
1806         fw_indicator_address = ar_pci->fw_indicator_address;
1807         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1808
1809         if (fw_indicator & FW_IND_EVENT_PENDING) {
1810                 /* ACK: clear Target-side pending event */
1811                 ath10k_pci_write32(ar, fw_indicator_address,
1812                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1813
1814                 if (ar_pci->started) {
1815                         ath10k_pci_hif_dump_area(ar);
1816                 } else {
1817                         /*
1818                          * Probable Target failure before we're prepared
1819                          * to handle it.  Generally unexpected.
1820                          */
1821                         ath10k_warn("early firmware event indicated\n");
1822                 }
1823         }
1824
1825         ath10k_pci_sleep(ar);
1826 }
1827
1828 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1829 {
1830         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1831         int ret;
1832
1833         ret = ath10k_pci_start_intr(ar);
1834         if (ret) {
1835                 ath10k_err("could not start interrupt handling (%d)\n", ret);
1836                 goto err;
1837         }
1838
1839         /*
1840          * Bring the target up cleanly.
1841          *
1842          * The target may be in an undefined state with an AUX-powered Target
1843          * and a Host in WoW mode. If the Host crashes, loses power, or is
1844          * restarted (without unloading the driver) then the Target is left
1845          * (aux) powered and running. On a subsequent driver load, the Target
1846          * is in an unexpected state. We try to catch that here in order to
1847          * reset the Target and retry the probe.
1848          */
1849         ath10k_pci_device_reset(ar);
1850
1851         ret = ath10k_pci_reset_target(ar);
1852         if (ret)
1853                 goto err_irq;
1854
1855         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1856                 /* Force AWAKE forever */
1857                 ath10k_do_pci_wake(ar);
1858
1859         ret = ath10k_pci_ce_init(ar);
1860         if (ret)
1861                 goto err_ps;
1862
1863         ret = ath10k_pci_init_config(ar);
1864         if (ret)
1865                 goto err_ce;
1866
1867         ret = ath10k_pci_wake_target_cpu(ar);
1868         if (ret) {
1869                 ath10k_err("could not wake up target CPU (%d)\n", ret);
1870                 goto err_ce;
1871         }
1872
1873         return 0;
1874
1875 err_ce:
1876         ath10k_pci_ce_deinit(ar);
1877 err_ps:
1878         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1879                 ath10k_do_pci_sleep(ar);
1880 err_irq:
1881         ath10k_pci_stop_intr(ar);
1882 err:
1883         return ret;
1884 }
1885
1886 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1887 {
1888         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1889
1890         ath10k_pci_stop_intr(ar);
1891
1892         ath10k_pci_ce_deinit(ar);
1893         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1894                 ath10k_do_pci_sleep(ar);
1895 }
1896
1897 #ifdef CONFIG_PM
1898
1899 #define ATH10K_PCI_PM_CONTROL 0x44
1900
1901 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1902 {
1903         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1904         struct pci_dev *pdev = ar_pci->pdev;
1905         u32 val;
1906
1907         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1908
1909         if ((val & 0x000000ff) != 0x3) {
1910                 pci_save_state(pdev);
1911                 pci_disable_device(pdev);
1912                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1913                                        (val & 0xffffff00) | 0x03);
1914         }
1915
1916         return 0;
1917 }
1918
1919 static int ath10k_pci_hif_resume(struct ath10k *ar)
1920 {
1921         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1922         struct pci_dev *pdev = ar_pci->pdev;
1923         u32 val;
1924
1925         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1926
1927         if ((val & 0x000000ff) != 0) {
1928                 pci_restore_state(pdev);
1929                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1930                                        val & 0xffffff00);
1931                 /*
1932                  * Suspend/Resume resets the PCI configuration space,
1933                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1934                  * to keep PCI Tx retries from interfering with C3 CPU state
1935                  */
1936                 pci_read_config_dword(pdev, 0x40, &val);
1937
1938                 if ((val & 0x0000ff00) != 0)
1939                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1940         }
1941
1942         return 0;
1943 }
1944 #endif
1945
1946 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1947         .send_head              = ath10k_pci_hif_send_head,
1948         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1949         .start                  = ath10k_pci_hif_start,
1950         .stop                   = ath10k_pci_hif_stop,
1951         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1952         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1953         .send_complete_check    = ath10k_pci_hif_send_complete_check,
1954         .set_callbacks          = ath10k_pci_hif_set_callbacks,
1955         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
1956         .power_up               = ath10k_pci_hif_power_up,
1957         .power_down             = ath10k_pci_hif_power_down,
1958 #ifdef CONFIG_PM
1959         .suspend                = ath10k_pci_hif_suspend,
1960         .resume                 = ath10k_pci_hif_resume,
1961 #endif
1962 };
1963
1964 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1965 {
1966         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1967         struct ath10k_pci *ar_pci = pipe->ar_pci;
1968
1969         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1970 }
1971
1972 static void ath10k_msi_err_tasklet(unsigned long data)
1973 {
1974         struct ath10k *ar = (struct ath10k *)data;
1975
1976         ath10k_pci_fw_interrupt_handler(ar);
1977 }
1978
1979 /*
1980  * Handler for a per-engine interrupt on a PARTICULAR CE.
1981  * This is used in cases where each CE has a private MSI interrupt.
1982  */
1983 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1984 {
1985         struct ath10k *ar = arg;
1986         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1987         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1988
1989         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1990                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1991                 return IRQ_HANDLED;
1992         }
1993
1994         /*
1995          * NOTE: We are able to derive ce_id from irq because we
1996          * use a one-to-one mapping for CE's 0..5.
1997          * CE's 6 & 7 do not use interrupts at all.
1998          *
1999          * This mapping must be kept in sync with the mapping
2000          * used by firmware.
2001          */
2002         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2003         return IRQ_HANDLED;
2004 }
2005
2006 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2007 {
2008         struct ath10k *ar = arg;
2009         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2010
2011         tasklet_schedule(&ar_pci->msi_fw_err);
2012         return IRQ_HANDLED;
2013 }
2014
2015 /*
2016  * Top-level interrupt handler for all PCI interrupts from a Target.
2017  * When a block of MSI interrupts is allocated, this top-level handler
2018  * is not used; instead, we directly call the correct sub-handler.
2019  */
2020 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2021 {
2022         struct ath10k *ar = arg;
2023         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2024
2025         if (ar_pci->num_msi_intrs == 0) {
2026                 /*
2027                  * IMPORTANT: INTR_CLR regiser has to be set after
2028                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
2029                  * really cleared.
2030                  */
2031                 iowrite32(0, ar_pci->mem +
2032                           (SOC_CORE_BASE_ADDRESS |
2033                            PCIE_INTR_ENABLE_ADDRESS));
2034                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2035                           PCIE_INTR_CE_MASK_ALL,
2036                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2037                                          PCIE_INTR_CLR_ADDRESS));
2038                 /*
2039                  * IMPORTANT: this extra read transaction is required to
2040                  * flush the posted write buffer.
2041                  */
2042                 (void) ioread32(ar_pci->mem +
2043                                 (SOC_CORE_BASE_ADDRESS |
2044                                  PCIE_INTR_ENABLE_ADDRESS));
2045         }
2046
2047         tasklet_schedule(&ar_pci->intr_tq);
2048
2049         return IRQ_HANDLED;
2050 }
2051
2052 static void ath10k_pci_tasklet(unsigned long data)
2053 {
2054         struct ath10k *ar = (struct ath10k *)data;
2055         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2056
2057         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2058         ath10k_ce_per_engine_service_any(ar);
2059
2060         if (ar_pci->num_msi_intrs == 0) {
2061                 /* Enable Legacy PCI line interrupts */
2062                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2063                           PCIE_INTR_CE_MASK_ALL,
2064                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2065                                          PCIE_INTR_ENABLE_ADDRESS));
2066                 /*
2067                  * IMPORTANT: this extra read transaction is required to
2068                  * flush the posted write buffer
2069                  */
2070                 (void) ioread32(ar_pci->mem +
2071                                 (SOC_CORE_BASE_ADDRESS |
2072                                  PCIE_INTR_ENABLE_ADDRESS));
2073         }
2074 }
2075
2076 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2077 {
2078         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2079         int ret;
2080         int i;
2081
2082         ret = pci_enable_msi_block(ar_pci->pdev, num);
2083         if (ret)
2084                 return ret;
2085
2086         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2087                           ath10k_pci_msi_fw_handler,
2088                           IRQF_SHARED, "ath10k_pci", ar);
2089         if (ret) {
2090                 ath10k_warn("request_irq(%d) failed %d\n",
2091                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2092
2093                 pci_disable_msi(ar_pci->pdev);
2094                 return ret;
2095         }
2096
2097         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2098                 ret = request_irq(ar_pci->pdev->irq + i,
2099                                   ath10k_pci_per_engine_handler,
2100                                   IRQF_SHARED, "ath10k_pci", ar);
2101                 if (ret) {
2102                         ath10k_warn("request_irq(%d) failed %d\n",
2103                                     ar_pci->pdev->irq + i, ret);
2104
2105                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2106                                 free_irq(ar_pci->pdev->irq + i, ar);
2107
2108                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2109                         pci_disable_msi(ar_pci->pdev);
2110                         return ret;
2111                 }
2112         }
2113
2114         ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2115         return 0;
2116 }
2117
2118 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2119 {
2120         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2121         int ret;
2122
2123         ret = pci_enable_msi(ar_pci->pdev);
2124         if (ret < 0)
2125                 return ret;
2126
2127         ret = request_irq(ar_pci->pdev->irq,
2128                           ath10k_pci_interrupt_handler,
2129                           IRQF_SHARED, "ath10k_pci", ar);
2130         if (ret < 0) {
2131                 pci_disable_msi(ar_pci->pdev);
2132                 return ret;
2133         }
2134
2135         ath10k_info("MSI interrupt handling\n");
2136         return 0;
2137 }
2138
2139 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2140 {
2141         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2142         int ret;
2143
2144         ret = request_irq(ar_pci->pdev->irq,
2145                           ath10k_pci_interrupt_handler,
2146                           IRQF_SHARED, "ath10k_pci", ar);
2147         if (ret < 0)
2148                 return ret;
2149
2150         /*
2151          * Make sure to wake the Target before enabling Legacy
2152          * Interrupt.
2153          */
2154         iowrite32(PCIE_SOC_WAKE_V_MASK,
2155                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2156                   PCIE_SOC_WAKE_ADDRESS);
2157
2158         ath10k_pci_wait(ar);
2159
2160         /*
2161          * A potential race occurs here: The CORE_BASE write
2162          * depends on target correctly decoding AXI address but
2163          * host won't know when target writes BAR to CORE_CTRL.
2164          * This write might get lost if target has NOT written BAR.
2165          * For now, fix the race by repeating the write in below
2166          * synchronization checking.
2167          */
2168         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2169                   PCIE_INTR_CE_MASK_ALL,
2170                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2171                                  PCIE_INTR_ENABLE_ADDRESS));
2172         iowrite32(PCIE_SOC_WAKE_RESET,
2173                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2174                   PCIE_SOC_WAKE_ADDRESS);
2175
2176         ath10k_info("legacy interrupt handling\n");
2177         return 0;
2178 }
2179
2180 static int ath10k_pci_start_intr(struct ath10k *ar)
2181 {
2182         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2183         int num = MSI_NUM_REQUEST;
2184         int ret;
2185         int i;
2186
2187         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2188         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2189                      (unsigned long) ar);
2190
2191         for (i = 0; i < CE_COUNT; i++) {
2192                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2193                 tasklet_init(&ar_pci->pipe_info[i].intr,
2194                              ath10k_pci_ce_tasklet,
2195                              (unsigned long)&ar_pci->pipe_info[i]);
2196         }
2197
2198         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2199                 num = 1;
2200
2201         if (num > 1) {
2202                 ret = ath10k_pci_start_intr_msix(ar, num);
2203                 if (ret == 0)
2204                         goto exit;
2205
2206                 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2207                 num = 1;
2208         }
2209
2210         if (num == 1) {
2211                 ret = ath10k_pci_start_intr_msi(ar);
2212                 if (ret == 0)
2213                         goto exit;
2214
2215                 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2216                             ret);
2217                 num = 0;
2218         }
2219
2220         ret = ath10k_pci_start_intr_legacy(ar);
2221
2222 exit:
2223         ar_pci->num_msi_intrs = num;
2224         ar_pci->ce_count = CE_COUNT;
2225         return ret;
2226 }
2227
2228 static void ath10k_pci_stop_intr(struct ath10k *ar)
2229 {
2230         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2231         int i;
2232
2233         /* There's at least one interrupt irregardless whether its legacy INTR
2234          * or MSI or MSI-X */
2235         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2236                 free_irq(ar_pci->pdev->irq + i, ar);
2237
2238         if (ar_pci->num_msi_intrs > 0)
2239                 pci_disable_msi(ar_pci->pdev);
2240 }
2241
2242 static int ath10k_pci_reset_target(struct ath10k *ar)
2243 {
2244         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2245         int wait_limit = 300; /* 3 sec */
2246
2247         /* Wait for Target to finish initialization before we proceed. */
2248         iowrite32(PCIE_SOC_WAKE_V_MASK,
2249                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2250                   PCIE_SOC_WAKE_ADDRESS);
2251
2252         ath10k_pci_wait(ar);
2253
2254         while (wait_limit-- &&
2255                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2256                  FW_IND_INITIALIZED)) {
2257                 if (ar_pci->num_msi_intrs == 0)
2258                         /* Fix potential race by repeating CORE_BASE writes */
2259                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2260                                   PCIE_INTR_CE_MASK_ALL,
2261                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2262                                                  PCIE_INTR_ENABLE_ADDRESS));
2263                 mdelay(10);
2264         }
2265
2266         if (wait_limit < 0) {
2267                 ath10k_err("Target stalled\n");
2268                 iowrite32(PCIE_SOC_WAKE_RESET,
2269                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2270                           PCIE_SOC_WAKE_ADDRESS);
2271                 return -EIO;
2272         }
2273
2274         iowrite32(PCIE_SOC_WAKE_RESET,
2275                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2276                   PCIE_SOC_WAKE_ADDRESS);
2277
2278         return 0;
2279 }
2280
2281 static void ath10k_pci_device_reset(struct ath10k *ar)
2282 {
2283         int i;
2284         u32 val;
2285
2286         if (!SOC_GLOBAL_RESET_ADDRESS)
2287                 return;
2288
2289         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
2290                                PCIE_SOC_WAKE_V_MASK);
2291         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2292                 if (ath10k_pci_target_is_awake(ar))
2293                         break;
2294                 msleep(1);
2295         }
2296
2297         /* Put Target, including PCIe, into RESET. */
2298         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2299         val |= 1;
2300         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2301
2302         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2303                 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2304                                           RTC_STATE_COLD_RESET_MASK)
2305                         break;
2306                 msleep(1);
2307         }
2308
2309         /* Pull Target, including PCIe, out of RESET. */
2310         val &= ~1;
2311         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2312
2313         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2314                 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2315                                             RTC_STATE_COLD_RESET_MASK))
2316                         break;
2317                 msleep(1);
2318         }
2319
2320         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2321 }
2322
2323 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2324 {
2325         int i;
2326
2327         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2328                 if (!test_bit(i, ar_pci->features))
2329                         continue;
2330
2331                 switch (i) {
2332                 case ATH10K_PCI_FEATURE_MSI_X:
2333                         ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2334                         break;
2335                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2336                         ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2337                         break;
2338                 }
2339         }
2340 }
2341
2342 static int ath10k_pci_probe(struct pci_dev *pdev,
2343                             const struct pci_device_id *pci_dev)
2344 {
2345         void __iomem *mem;
2346         int ret = 0;
2347         struct ath10k *ar;
2348         struct ath10k_pci *ar_pci;
2349         u32 lcr_val, chip_id;
2350
2351         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2352
2353         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2354         if (ar_pci == NULL)
2355                 return -ENOMEM;
2356
2357         ar_pci->pdev = pdev;
2358         ar_pci->dev = &pdev->dev;
2359
2360         switch (pci_dev->device) {
2361         case QCA988X_2_0_DEVICE_ID:
2362                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2363                 break;
2364         default:
2365                 ret = -ENODEV;
2366                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2367                 goto err_ar_pci;
2368         }
2369
2370         if (ath10k_target_ps)
2371                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2372
2373         ath10k_pci_dump_features(ar_pci);
2374
2375         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2376         if (!ar) {
2377                 ath10k_err("ath10k_core_create failed!\n");
2378                 ret = -EINVAL;
2379                 goto err_ar_pci;
2380         }
2381
2382         ar_pci->ar = ar;
2383         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2384         atomic_set(&ar_pci->keep_awake_count, 0);
2385
2386         pci_set_drvdata(pdev, ar);
2387
2388         /*
2389          * Without any knowledge of the Host, the Target may have been reset or
2390          * power cycled and its Config Space may no longer reflect the PCI
2391          * address space that was assigned earlier by the PCI infrastructure.
2392          * Refresh it now.
2393          */
2394         ret = pci_assign_resource(pdev, BAR_NUM);
2395         if (ret) {
2396                 ath10k_err("cannot assign PCI space: %d\n", ret);
2397                 goto err_ar;
2398         }
2399
2400         ret = pci_enable_device(pdev);
2401         if (ret) {
2402                 ath10k_err("cannot enable PCI device: %d\n", ret);
2403                 goto err_ar;
2404         }
2405
2406         /* Request MMIO resources */
2407         ret = pci_request_region(pdev, BAR_NUM, "ath");
2408         if (ret) {
2409                 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2410                 goto err_device;
2411         }
2412
2413         /*
2414          * Target structures have a limit of 32 bit DMA pointers.
2415          * DMA pointers can be wider than 32 bits by default on some systems.
2416          */
2417         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2418         if (ret) {
2419                 ath10k_err("32-bit DMA not available: %d\n", ret);
2420                 goto err_region;
2421         }
2422
2423         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2424         if (ret) {
2425                 ath10k_err("cannot enable 32-bit consistent DMA\n");
2426                 goto err_region;
2427         }
2428
2429         /* Set bus master bit in PCI_COMMAND to enable DMA */
2430         pci_set_master(pdev);
2431
2432         /*
2433          * Temporary FIX: disable ASPM
2434          * Will be removed after the OTP is programmed
2435          */
2436         pci_read_config_dword(pdev, 0x80, &lcr_val);
2437         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2438
2439         /* Arrange for access to Target SoC registers. */
2440         mem = pci_iomap(pdev, BAR_NUM, 0);
2441         if (!mem) {
2442                 ath10k_err("PCI iomap error\n");
2443                 ret = -EIO;
2444                 goto err_master;
2445         }
2446
2447         ar_pci->mem = mem;
2448
2449         spin_lock_init(&ar_pci->ce_lock);
2450
2451         ret = ath10k_do_pci_wake(ar);
2452         if (ret) {
2453                 ath10k_err("Failed to get chip id: %d\n", ret);
2454                 return ret;
2455         }
2456
2457         chip_id = ath10k_pci_read32(ar,
2458                                     RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
2459
2460         ath10k_do_pci_sleep(ar);
2461
2462         ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2463
2464         ret = ath10k_core_register(ar, chip_id);
2465         if (ret) {
2466                 ath10k_err("could not register driver core (%d)\n", ret);
2467                 goto err_iomap;
2468         }
2469
2470         return 0;
2471
2472 err_iomap:
2473         pci_iounmap(pdev, mem);
2474 err_master:
2475         pci_clear_master(pdev);
2476 err_region:
2477         pci_release_region(pdev, BAR_NUM);
2478 err_device:
2479         pci_disable_device(pdev);
2480 err_ar:
2481         ath10k_core_destroy(ar);
2482 err_ar_pci:
2483         /* call HIF PCI free here */
2484         kfree(ar_pci);
2485
2486         return ret;
2487 }
2488
2489 static void ath10k_pci_remove(struct pci_dev *pdev)
2490 {
2491         struct ath10k *ar = pci_get_drvdata(pdev);
2492         struct ath10k_pci *ar_pci;
2493
2494         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2495
2496         if (!ar)
2497                 return;
2498
2499         ar_pci = ath10k_pci_priv(ar);
2500
2501         if (!ar_pci)
2502                 return;
2503
2504         tasklet_kill(&ar_pci->msi_fw_err);
2505
2506         ath10k_core_unregister(ar);
2507
2508         pci_iounmap(pdev, ar_pci->mem);
2509         pci_release_region(pdev, BAR_NUM);
2510         pci_clear_master(pdev);
2511         pci_disable_device(pdev);
2512
2513         ath10k_core_destroy(ar);
2514         kfree(ar_pci);
2515 }
2516
2517 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2518
2519 static struct pci_driver ath10k_pci_driver = {
2520         .name = "ath10k_pci",
2521         .id_table = ath10k_pci_id_table,
2522         .probe = ath10k_pci_probe,
2523         .remove = ath10k_pci_remove,
2524 };
2525
2526 static int __init ath10k_pci_init(void)
2527 {
2528         int ret;
2529
2530         ret = pci_register_driver(&ath10k_pci_driver);
2531         if (ret)
2532                 ath10k_err("pci_register_driver failed [%d]\n", ret);
2533
2534         return ret;
2535 }
2536 module_init(ath10k_pci_init);
2537
2538 static void __exit ath10k_pci_exit(void)
2539 {
2540         pci_unregister_driver(&ath10k_pci_driver);
2541 }
2542
2543 module_exit(ath10k_pci_exit);
2544
2545 MODULE_AUTHOR("Qualcomm Atheros");
2546 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2547 MODULE_LICENSE("Dual BSD/GPL");
2548 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2549 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2550 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);