Merge remote-tracking branches 'asoc/topic/wm8960', 'asoc/topic/wm8978' and 'asoc...
[sfrench/cifs-2.6.git] / drivers / ntb / hw / intel / ntb_hw_intel.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * Intel PCIe NTB Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50
51 #include <linux/debugfs.h>
52 #include <linux/delay.h>
53 #include <linux/init.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/random.h>
58 #include <linux/slab.h>
59 #include <linux/ntb.h>
60
61 #include "ntb_hw_intel.h"
62
63 #define NTB_NAME        "ntb_hw_intel"
64 #define NTB_DESC        "Intel(R) PCI-E Non-Transparent Bridge Driver"
65 #define NTB_VER         "2.0"
66
67 MODULE_DESCRIPTION(NTB_DESC);
68 MODULE_VERSION(NTB_VER);
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_AUTHOR("Intel Corporation");
71
72 #define bar0_off(base, bar) ((base) + ((bar) << 2))
73 #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
74
75 static const struct intel_ntb_reg atom_reg;
76 static const struct intel_ntb_alt_reg atom_pri_reg;
77 static const struct intel_ntb_alt_reg atom_sec_reg;
78 static const struct intel_ntb_alt_reg atom_b2b_reg;
79 static const struct intel_ntb_xlat_reg atom_pri_xlat;
80 static const struct intel_ntb_xlat_reg atom_sec_xlat;
81 static const struct intel_ntb_reg xeon_reg;
82 static const struct intel_ntb_alt_reg xeon_pri_reg;
83 static const struct intel_ntb_alt_reg xeon_sec_reg;
84 static const struct intel_ntb_alt_reg xeon_b2b_reg;
85 static const struct intel_ntb_xlat_reg xeon_pri_xlat;
86 static const struct intel_ntb_xlat_reg xeon_sec_xlat;
87 static struct intel_b2b_addr xeon_b2b_usd_addr;
88 static struct intel_b2b_addr xeon_b2b_dsd_addr;
89 static const struct intel_ntb_reg skx_reg;
90 static const struct intel_ntb_alt_reg skx_pri_reg;
91 static const struct intel_ntb_alt_reg skx_b2b_reg;
92 static const struct intel_ntb_xlat_reg skx_sec_xlat;
93 static const struct ntb_dev_ops intel_ntb_ops;
94 static const struct ntb_dev_ops intel_ntb3_ops;
95
96 static const struct file_operations intel_ntb_debugfs_info;
97 static struct dentry *debugfs_dir;
98
99 static int b2b_mw_idx = -1;
100 module_param(b2b_mw_idx, int, 0644);
101 MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
102                  "value of zero or positive starts from first mw idx, and a "
103                  "negative value starts from last mw idx.  Both sides MUST "
104                  "set the same value here!");
105
106 static unsigned int b2b_mw_share;
107 module_param(b2b_mw_share, uint, 0644);
108 MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
109                  "ntb so that the peer ntb only occupies the first half of "
110                  "the mw, so the second half can still be used as a mw.  Both "
111                  "sides MUST set the same value here!");
112
113 module_param_named(xeon_b2b_usd_bar2_addr64,
114                    xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
115 MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
116                  "XEON B2B USD BAR 2 64-bit address");
117
118 module_param_named(xeon_b2b_usd_bar4_addr64,
119                    xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
120 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
121                  "XEON B2B USD BAR 4 64-bit address");
122
123 module_param_named(xeon_b2b_usd_bar4_addr32,
124                    xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
125 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
126                  "XEON B2B USD split-BAR 4 32-bit address");
127
128 module_param_named(xeon_b2b_usd_bar5_addr32,
129                    xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
130 MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
131                  "XEON B2B USD split-BAR 5 32-bit address");
132
133 module_param_named(xeon_b2b_dsd_bar2_addr64,
134                    xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
135 MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
136                  "XEON B2B DSD BAR 2 64-bit address");
137
138 module_param_named(xeon_b2b_dsd_bar4_addr64,
139                    xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
140 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
141                  "XEON B2B DSD BAR 4 64-bit address");
142
143 module_param_named(xeon_b2b_dsd_bar4_addr32,
144                    xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
145 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
146                  "XEON B2B DSD split-BAR 4 32-bit address");
147
148 module_param_named(xeon_b2b_dsd_bar5_addr32,
149                    xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
150 MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
151                  "XEON B2B DSD split-BAR 5 32-bit address");
152
153 static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
154 static int xeon_init_isr(struct intel_ntb_dev *ndev);
155
156 #ifndef ioread64
157 #ifdef readq
158 #define ioread64 readq
159 #else
160 #define ioread64 _ioread64
161 static inline u64 _ioread64(void __iomem *mmio)
162 {
163         u64 low, high;
164
165         low = ioread32(mmio);
166         high = ioread32(mmio + sizeof(u32));
167         return low | (high << 32);
168 }
169 #endif
170 #endif
171
172 #ifndef iowrite64
173 #ifdef writeq
174 #define iowrite64 writeq
175 #else
176 #define iowrite64 _iowrite64
177 static inline void _iowrite64(u64 val, void __iomem *mmio)
178 {
179         iowrite32(val, mmio);
180         iowrite32(val >> 32, mmio + sizeof(u32));
181 }
182 #endif
183 #endif
184
185 static inline int pdev_is_atom(struct pci_dev *pdev)
186 {
187         switch (pdev->device) {
188         case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
189                 return 1;
190         }
191         return 0;
192 }
193
194 static inline int pdev_is_xeon(struct pci_dev *pdev)
195 {
196         switch (pdev->device) {
197         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
198         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
199         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
200         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
201         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
202         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
203         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
204         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
205         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
206         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
207         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
208         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
209         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
210         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
211         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
212                 return 1;
213         }
214         return 0;
215 }
216
217 static inline int pdev_is_skx_xeon(struct pci_dev *pdev)
218 {
219         if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
220                 return 1;
221
222         return 0;
223 }
224
225 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
226 {
227         ndev->unsafe_flags = 0;
228         ndev->unsafe_flags_ignore = 0;
229
230         /* Only B2B has a workaround to avoid SDOORBELL */
231         if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
232                 if (!ntb_topo_is_b2b(ndev->ntb.topo))
233                         ndev->unsafe_flags |= NTB_UNSAFE_DB;
234
235         /* No low level workaround to avoid SB01BASE */
236         if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
237                 ndev->unsafe_flags |= NTB_UNSAFE_DB;
238                 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
239         }
240 }
241
242 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
243                                  unsigned long flag)
244 {
245         return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
246 }
247
248 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
249                                      unsigned long flag)
250 {
251         flag &= ndev->unsafe_flags;
252         ndev->unsafe_flags_ignore |= flag;
253
254         return !!flag;
255 }
256
257 static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
258 {
259         if (idx < 0 || idx >= ndev->mw_count)
260                 return -EINVAL;
261         return ndev->reg->mw_bar[idx];
262 }
263
264 static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
265                                phys_addr_t *db_addr, resource_size_t *db_size,
266                                phys_addr_t reg_addr, unsigned long reg)
267 {
268         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
269                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
270
271         if (db_addr) {
272                 *db_addr = reg_addr + reg;
273                 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
274         }
275
276         if (db_size) {
277                 *db_size = ndev->reg->db_size;
278                 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
279         }
280
281         return 0;
282 }
283
284 static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
285                                void __iomem *mmio)
286 {
287         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
288                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
289
290         return ndev->reg->db_ioread(mmio);
291 }
292
293 static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
294                                 void __iomem *mmio)
295 {
296         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
297                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
298
299         if (db_bits & ~ndev->db_valid_mask)
300                 return -EINVAL;
301
302         ndev->reg->db_iowrite(db_bits, mmio);
303
304         return 0;
305 }
306
307 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
308                                    void __iomem *mmio)
309 {
310         unsigned long irqflags;
311
312         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
313                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
314
315         if (db_bits & ~ndev->db_valid_mask)
316                 return -EINVAL;
317
318         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
319         {
320                 ndev->db_mask |= db_bits;
321                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
322         }
323         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
324
325         return 0;
326 }
327
328 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
329                                      void __iomem *mmio)
330 {
331         unsigned long irqflags;
332
333         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
334                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
335
336         if (db_bits & ~ndev->db_valid_mask)
337                 return -EINVAL;
338
339         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
340         {
341                 ndev->db_mask &= ~db_bits;
342                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
343         }
344         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
345
346         return 0;
347 }
348
349 static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
350 {
351         u64 shift, mask;
352
353         shift = ndev->db_vec_shift;
354         mask = BIT_ULL(shift) - 1;
355
356         return mask << (shift * db_vector);
357 }
358
359 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
360                                  phys_addr_t *spad_addr, phys_addr_t reg_addr,
361                                  unsigned long reg)
362 {
363         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
364                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
365
366         if (idx < 0 || idx >= ndev->spad_count)
367                 return -EINVAL;
368
369         if (spad_addr) {
370                 *spad_addr = reg_addr + reg + (idx << 2);
371                 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
372         }
373
374         return 0;
375 }
376
377 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
378                                  void __iomem *mmio)
379 {
380         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
381                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
382
383         if (idx < 0 || idx >= ndev->spad_count)
384                 return 0;
385
386         return ioread32(mmio + (idx << 2));
387 }
388
389 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
390                                   void __iomem *mmio)
391 {
392         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
393                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
394
395         if (idx < 0 || idx >= ndev->spad_count)
396                 return -EINVAL;
397
398         iowrite32(val, mmio + (idx << 2));
399
400         return 0;
401 }
402
403 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
404 {
405         u64 vec_mask;
406
407         vec_mask = ndev_vec_mask(ndev, vec);
408
409         if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
410                 vec_mask |= ndev->db_link_mask;
411
412         dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
413
414         ndev->last_ts = jiffies;
415
416         if (vec_mask & ndev->db_link_mask) {
417                 if (ndev->reg->poll_link(ndev))
418                         ntb_link_event(&ndev->ntb);
419         }
420
421         if (vec_mask & ndev->db_valid_mask)
422                 ntb_db_event(&ndev->ntb, vec);
423
424         return IRQ_HANDLED;
425 }
426
427 static irqreturn_t ndev_vec_isr(int irq, void *dev)
428 {
429         struct intel_ntb_vec *nvec = dev;
430
431         dev_dbg(ndev_dev(nvec->ndev), "irq: %d  nvec->num: %d\n",
432                 irq, nvec->num);
433
434         return ndev_interrupt(nvec->ndev, nvec->num);
435 }
436
437 static irqreturn_t ndev_irq_isr(int irq, void *dev)
438 {
439         struct intel_ntb_dev *ndev = dev;
440
441         return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
442 }
443
444 static int ndev_init_isr(struct intel_ntb_dev *ndev,
445                          int msix_min, int msix_max,
446                          int msix_shift, int total_shift)
447 {
448         struct pci_dev *pdev;
449         int rc, i, msix_count, node;
450
451         pdev = ndev_pdev(ndev);
452
453         node = dev_to_node(&pdev->dev);
454
455         /* Mask all doorbell interrupts */
456         ndev->db_mask = ndev->db_valid_mask;
457         ndev->reg->db_iowrite(ndev->db_mask,
458                               ndev->self_mmio +
459                               ndev->self_reg->db_mask);
460
461         /* Try to set up msix irq */
462
463         ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
464                                  GFP_KERNEL, node);
465         if (!ndev->vec)
466                 goto err_msix_vec_alloc;
467
468         ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
469                                   GFP_KERNEL, node);
470         if (!ndev->msix)
471                 goto err_msix_alloc;
472
473         for (i = 0; i < msix_max; ++i)
474                 ndev->msix[i].entry = i;
475
476         msix_count = pci_enable_msix_range(pdev, ndev->msix,
477                                            msix_min, msix_max);
478         if (msix_count < 0)
479                 goto err_msix_enable;
480
481         for (i = 0; i < msix_count; ++i) {
482                 ndev->vec[i].ndev = ndev;
483                 ndev->vec[i].num = i;
484                 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
485                                  "ndev_vec_isr", &ndev->vec[i]);
486                 if (rc)
487                         goto err_msix_request;
488         }
489
490         dev_dbg(ndev_dev(ndev), "Using %d msix interrupts\n", msix_count);
491         ndev->db_vec_count = msix_count;
492         ndev->db_vec_shift = msix_shift;
493         return 0;
494
495 err_msix_request:
496         while (i-- > 0)
497                 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
498         pci_disable_msix(pdev);
499 err_msix_enable:
500         kfree(ndev->msix);
501 err_msix_alloc:
502         kfree(ndev->vec);
503 err_msix_vec_alloc:
504         ndev->msix = NULL;
505         ndev->vec = NULL;
506
507         /* Try to set up msi irq */
508
509         rc = pci_enable_msi(pdev);
510         if (rc)
511                 goto err_msi_enable;
512
513         rc = request_irq(pdev->irq, ndev_irq_isr, 0,
514                          "ndev_irq_isr", ndev);
515         if (rc)
516                 goto err_msi_request;
517
518         dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
519         ndev->db_vec_count = 1;
520         ndev->db_vec_shift = total_shift;
521         return 0;
522
523 err_msi_request:
524         pci_disable_msi(pdev);
525 err_msi_enable:
526
527         /* Try to set up intx irq */
528
529         pci_intx(pdev, 1);
530
531         rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
532                          "ndev_irq_isr", ndev);
533         if (rc)
534                 goto err_intx_request;
535
536         dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
537         ndev->db_vec_count = 1;
538         ndev->db_vec_shift = total_shift;
539         return 0;
540
541 err_intx_request:
542         return rc;
543 }
544
545 static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
546 {
547         struct pci_dev *pdev;
548         int i;
549
550         pdev = ndev_pdev(ndev);
551
552         /* Mask all doorbell interrupts */
553         ndev->db_mask = ndev->db_valid_mask;
554         ndev->reg->db_iowrite(ndev->db_mask,
555                               ndev->self_mmio +
556                               ndev->self_reg->db_mask);
557
558         if (ndev->msix) {
559                 i = ndev->db_vec_count;
560                 while (i--)
561                         free_irq(ndev->msix[i].vector, &ndev->vec[i]);
562                 pci_disable_msix(pdev);
563                 kfree(ndev->msix);
564                 kfree(ndev->vec);
565         } else {
566                 free_irq(pdev->irq, ndev);
567                 if (pci_dev_msi_enabled(pdev))
568                         pci_disable_msi(pdev);
569         }
570 }
571
572 static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
573                                       size_t count, loff_t *offp)
574 {
575         struct intel_ntb_dev *ndev;
576         void __iomem *mmio;
577         char *buf;
578         size_t buf_size;
579         ssize_t ret, off;
580         union { u64 v64; u32 v32; u16 v16; } u;
581
582         ndev = filp->private_data;
583         mmio = ndev->self_mmio;
584
585         buf_size = min(count, 0x800ul);
586
587         buf = kmalloc(buf_size, GFP_KERNEL);
588         if (!buf)
589                 return -ENOMEM;
590
591         off = 0;
592
593         off += scnprintf(buf + off, buf_size - off,
594                          "NTB Device Information:\n");
595
596         off += scnprintf(buf + off, buf_size - off,
597                          "Connection Topology -\t%s\n",
598                          ntb_topo_string(ndev->ntb.topo));
599
600         off += scnprintf(buf + off, buf_size - off,
601                          "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
602         off += scnprintf(buf + off, buf_size - off,
603                          "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
604
605         if (!ndev->reg->link_is_up(ndev))
606                 off += scnprintf(buf + off, buf_size - off,
607                                  "Link Status -\t\tDown\n");
608         else {
609                 off += scnprintf(buf + off, buf_size - off,
610                                  "Link Status -\t\tUp\n");
611                 off += scnprintf(buf + off, buf_size - off,
612                                  "Link Speed -\t\tPCI-E Gen %u\n",
613                                  NTB_LNK_STA_SPEED(ndev->lnk_sta));
614                 off += scnprintf(buf + off, buf_size - off,
615                                  "Link Width -\t\tx%u\n",
616                                  NTB_LNK_STA_WIDTH(ndev->lnk_sta));
617         }
618
619         off += scnprintf(buf + off, buf_size - off,
620                          "Memory Window Count -\t%u\n", ndev->mw_count);
621         off += scnprintf(buf + off, buf_size - off,
622                          "Scratchpad Count -\t%u\n", ndev->spad_count);
623         off += scnprintf(buf + off, buf_size - off,
624                          "Doorbell Count -\t%u\n", ndev->db_count);
625         off += scnprintf(buf + off, buf_size - off,
626                          "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
627         off += scnprintf(buf + off, buf_size - off,
628                          "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
629
630         off += scnprintf(buf + off, buf_size - off,
631                          "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
632         off += scnprintf(buf + off, buf_size - off,
633                          "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
634         off += scnprintf(buf + off, buf_size - off,
635                          "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
636
637         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
638         off += scnprintf(buf + off, buf_size - off,
639                          "Doorbell Mask -\t\t%#llx\n", u.v64);
640
641         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
642         off += scnprintf(buf + off, buf_size - off,
643                          "Doorbell Bell -\t\t%#llx\n", u.v64);
644
645         off += scnprintf(buf + off, buf_size - off,
646                          "\nNTB Incoming XLAT:\n");
647
648         u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET);
649         off += scnprintf(buf + off, buf_size - off,
650                          "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
651
652         u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET);
653         off += scnprintf(buf + off, buf_size - off,
654                          "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
655
656         u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
657         off += scnprintf(buf + off, buf_size - off,
658                          "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
659
660         u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
661         off += scnprintf(buf + off, buf_size - off,
662                          "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
663
664         if (ntb_topo_is_b2b(ndev->ntb.topo)) {
665                 off += scnprintf(buf + off, buf_size - off,
666                                  "\nNTB Outgoing B2B XLAT:\n");
667
668                 u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET);
669                 off += scnprintf(buf + off, buf_size - off,
670                                  "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
671
672                 u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET);
673                 off += scnprintf(buf + off, buf_size - off,
674                                  "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
675
676                 u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET);
677                 off += scnprintf(buf + off, buf_size - off,
678                                  "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
679
680                 u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET);
681                 off += scnprintf(buf + off, buf_size - off,
682                                  "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
683
684                 off += scnprintf(buf + off, buf_size - off,
685                                  "\nNTB Secondary BAR:\n");
686
687                 u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET);
688                 off += scnprintf(buf + off, buf_size - off,
689                                  "EMBAR0 -\t\t%#018llx\n", u.v64);
690
691                 u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET);
692                 off += scnprintf(buf + off, buf_size - off,
693                                  "EMBAR1 -\t\t%#018llx\n", u.v64);
694
695                 u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET);
696                 off += scnprintf(buf + off, buf_size - off,
697                                  "EMBAR2 -\t\t%#018llx\n", u.v64);
698         }
699
700         off += scnprintf(buf + off, buf_size - off,
701                          "\nNTB Statistics:\n");
702
703         u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET);
704         off += scnprintf(buf + off, buf_size - off,
705                          "Upstream Memory Miss -\t%u\n", u.v16);
706
707         off += scnprintf(buf + off, buf_size - off,
708                          "\nNTB Hardware Errors:\n");
709
710         if (!pci_read_config_word(ndev->ntb.pdev,
711                                   SKX_DEVSTS_OFFSET, &u.v16))
712                 off += scnprintf(buf + off, buf_size - off,
713                                  "DEVSTS -\t\t%#06x\n", u.v16);
714
715         if (!pci_read_config_word(ndev->ntb.pdev,
716                                   SKX_LINK_STATUS_OFFSET, &u.v16))
717                 off += scnprintf(buf + off, buf_size - off,
718                                  "LNKSTS -\t\t%#06x\n", u.v16);
719
720         if (!pci_read_config_dword(ndev->ntb.pdev,
721                                    SKX_UNCERRSTS_OFFSET, &u.v32))
722                 off += scnprintf(buf + off, buf_size - off,
723                                  "UNCERRSTS -\t\t%#06x\n", u.v32);
724
725         if (!pci_read_config_dword(ndev->ntb.pdev,
726                                    SKX_CORERRSTS_OFFSET, &u.v32))
727                 off += scnprintf(buf + off, buf_size - off,
728                                  "CORERRSTS -\t\t%#06x\n", u.v32);
729
730         ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
731         kfree(buf);
732         return ret;
733 }
734
735 static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
736                                      size_t count, loff_t *offp)
737 {
738         struct intel_ntb_dev *ndev;
739         struct pci_dev *pdev;
740         void __iomem *mmio;
741         char *buf;
742         size_t buf_size;
743         ssize_t ret, off;
744         union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
745
746         ndev = filp->private_data;
747         pdev = ndev_pdev(ndev);
748         mmio = ndev->self_mmio;
749
750         buf_size = min(count, 0x800ul);
751
752         buf = kmalloc(buf_size, GFP_KERNEL);
753         if (!buf)
754                 return -ENOMEM;
755
756         off = 0;
757
758         off += scnprintf(buf + off, buf_size - off,
759                          "NTB Device Information:\n");
760
761         off += scnprintf(buf + off, buf_size - off,
762                          "Connection Topology -\t%s\n",
763                          ntb_topo_string(ndev->ntb.topo));
764
765         if (ndev->b2b_idx != UINT_MAX) {
766                 off += scnprintf(buf + off, buf_size - off,
767                                  "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
768                 off += scnprintf(buf + off, buf_size - off,
769                                  "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
770         }
771
772         off += scnprintf(buf + off, buf_size - off,
773                          "BAR4 Split -\t\t%s\n",
774                          ndev->bar4_split ? "yes" : "no");
775
776         off += scnprintf(buf + off, buf_size - off,
777                          "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
778         off += scnprintf(buf + off, buf_size - off,
779                          "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
780
781         if (!ndev->reg->link_is_up(ndev)) {
782                 off += scnprintf(buf + off, buf_size - off,
783                                  "Link Status -\t\tDown\n");
784         } else {
785                 off += scnprintf(buf + off, buf_size - off,
786                                  "Link Status -\t\tUp\n");
787                 off += scnprintf(buf + off, buf_size - off,
788                                  "Link Speed -\t\tPCI-E Gen %u\n",
789                                  NTB_LNK_STA_SPEED(ndev->lnk_sta));
790                 off += scnprintf(buf + off, buf_size - off,
791                                  "Link Width -\t\tx%u\n",
792                                  NTB_LNK_STA_WIDTH(ndev->lnk_sta));
793         }
794
795         off += scnprintf(buf + off, buf_size - off,
796                          "Memory Window Count -\t%u\n", ndev->mw_count);
797         off += scnprintf(buf + off, buf_size - off,
798                          "Scratchpad Count -\t%u\n", ndev->spad_count);
799         off += scnprintf(buf + off, buf_size - off,
800                          "Doorbell Count -\t%u\n", ndev->db_count);
801         off += scnprintf(buf + off, buf_size - off,
802                          "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
803         off += scnprintf(buf + off, buf_size - off,
804                          "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
805
806         off += scnprintf(buf + off, buf_size - off,
807                          "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
808         off += scnprintf(buf + off, buf_size - off,
809                          "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
810         off += scnprintf(buf + off, buf_size - off,
811                          "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
812
813         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
814         off += scnprintf(buf + off, buf_size - off,
815                          "Doorbell Mask -\t\t%#llx\n", u.v64);
816
817         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
818         off += scnprintf(buf + off, buf_size - off,
819                          "Doorbell Bell -\t\t%#llx\n", u.v64);
820
821         off += scnprintf(buf + off, buf_size - off,
822                          "\nNTB Window Size:\n");
823
824         pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
825         off += scnprintf(buf + off, buf_size - off,
826                          "PBAR23SZ %hhu\n", u.v8);
827         if (!ndev->bar4_split) {
828                 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
829                 off += scnprintf(buf + off, buf_size - off,
830                                  "PBAR45SZ %hhu\n", u.v8);
831         } else {
832                 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
833                 off += scnprintf(buf + off, buf_size - off,
834                                  "PBAR4SZ %hhu\n", u.v8);
835                 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
836                 off += scnprintf(buf + off, buf_size - off,
837                                  "PBAR5SZ %hhu\n", u.v8);
838         }
839
840         pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
841         off += scnprintf(buf + off, buf_size - off,
842                          "SBAR23SZ %hhu\n", u.v8);
843         if (!ndev->bar4_split) {
844                 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
845                 off += scnprintf(buf + off, buf_size - off,
846                                  "SBAR45SZ %hhu\n", u.v8);
847         } else {
848                 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
849                 off += scnprintf(buf + off, buf_size - off,
850                                  "SBAR4SZ %hhu\n", u.v8);
851                 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
852                 off += scnprintf(buf + off, buf_size - off,
853                                  "SBAR5SZ %hhu\n", u.v8);
854         }
855
856         off += scnprintf(buf + off, buf_size - off,
857                          "\nNTB Incoming XLAT:\n");
858
859         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
860         off += scnprintf(buf + off, buf_size - off,
861                          "XLAT23 -\t\t%#018llx\n", u.v64);
862
863         if (ndev->bar4_split) {
864                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
865                 off += scnprintf(buf + off, buf_size - off,
866                                  "XLAT4 -\t\t\t%#06x\n", u.v32);
867
868                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
869                 off += scnprintf(buf + off, buf_size - off,
870                                  "XLAT5 -\t\t\t%#06x\n", u.v32);
871         } else {
872                 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
873                 off += scnprintf(buf + off, buf_size - off,
874                                  "XLAT45 -\t\t%#018llx\n", u.v64);
875         }
876
877         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
878         off += scnprintf(buf + off, buf_size - off,
879                          "LMT23 -\t\t\t%#018llx\n", u.v64);
880
881         if (ndev->bar4_split) {
882                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
883                 off += scnprintf(buf + off, buf_size - off,
884                                  "LMT4 -\t\t\t%#06x\n", u.v32);
885                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
886                 off += scnprintf(buf + off, buf_size - off,
887                                  "LMT5 -\t\t\t%#06x\n", u.v32);
888         } else {
889                 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
890                 off += scnprintf(buf + off, buf_size - off,
891                                  "LMT45 -\t\t\t%#018llx\n", u.v64);
892         }
893
894         if (pdev_is_xeon(pdev)) {
895                 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
896                         off += scnprintf(buf + off, buf_size - off,
897                                          "\nNTB Outgoing B2B XLAT:\n");
898
899                         u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
900                         off += scnprintf(buf + off, buf_size - off,
901                                          "B2B XLAT23 -\t\t%#018llx\n", u.v64);
902
903                         if (ndev->bar4_split) {
904                                 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
905                                 off += scnprintf(buf + off, buf_size - off,
906                                                  "B2B XLAT4 -\t\t%#06x\n",
907                                                  u.v32);
908                                 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
909                                 off += scnprintf(buf + off, buf_size - off,
910                                                  "B2B XLAT5 -\t\t%#06x\n",
911                                                  u.v32);
912                         } else {
913                                 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
914                                 off += scnprintf(buf + off, buf_size - off,
915                                                  "B2B XLAT45 -\t\t%#018llx\n",
916                                                  u.v64);
917                         }
918
919                         u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
920                         off += scnprintf(buf + off, buf_size - off,
921                                          "B2B LMT23 -\t\t%#018llx\n", u.v64);
922
923                         if (ndev->bar4_split) {
924                                 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
925                                 off += scnprintf(buf + off, buf_size - off,
926                                                  "B2B LMT4 -\t\t%#06x\n",
927                                                  u.v32);
928                                 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
929                                 off += scnprintf(buf + off, buf_size - off,
930                                                  "B2B LMT5 -\t\t%#06x\n",
931                                                  u.v32);
932                         } else {
933                                 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
934                                 off += scnprintf(buf + off, buf_size - off,
935                                                  "B2B LMT45 -\t\t%#018llx\n",
936                                                  u.v64);
937                         }
938
939                         off += scnprintf(buf + off, buf_size - off,
940                                          "\nNTB Secondary BAR:\n");
941
942                         u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
943                         off += scnprintf(buf + off, buf_size - off,
944                                          "SBAR01 -\t\t%#018llx\n", u.v64);
945
946                         u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
947                         off += scnprintf(buf + off, buf_size - off,
948                                          "SBAR23 -\t\t%#018llx\n", u.v64);
949
950                         if (ndev->bar4_split) {
951                                 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
952                                 off += scnprintf(buf + off, buf_size - off,
953                                                  "SBAR4 -\t\t\t%#06x\n", u.v32);
954                                 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
955                                 off += scnprintf(buf + off, buf_size - off,
956                                                  "SBAR5 -\t\t\t%#06x\n", u.v32);
957                         } else {
958                                 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
959                                 off += scnprintf(buf + off, buf_size - off,
960                                                  "SBAR45 -\t\t%#018llx\n",
961                                                  u.v64);
962                         }
963                 }
964
965                 off += scnprintf(buf + off, buf_size - off,
966                                  "\nXEON NTB Statistics:\n");
967
968                 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
969                 off += scnprintf(buf + off, buf_size - off,
970                                  "Upstream Memory Miss -\t%u\n", u.v16);
971
972                 off += scnprintf(buf + off, buf_size - off,
973                                  "\nXEON NTB Hardware Errors:\n");
974
975                 if (!pci_read_config_word(pdev,
976                                           XEON_DEVSTS_OFFSET, &u.v16))
977                         off += scnprintf(buf + off, buf_size - off,
978                                          "DEVSTS -\t\t%#06x\n", u.v16);
979
980                 if (!pci_read_config_word(pdev,
981                                           XEON_LINK_STATUS_OFFSET, &u.v16))
982                         off += scnprintf(buf + off, buf_size - off,
983                                          "LNKSTS -\t\t%#06x\n", u.v16);
984
985                 if (!pci_read_config_dword(pdev,
986                                            XEON_UNCERRSTS_OFFSET, &u.v32))
987                         off += scnprintf(buf + off, buf_size - off,
988                                          "UNCERRSTS -\t\t%#06x\n", u.v32);
989
990                 if (!pci_read_config_dword(pdev,
991                                            XEON_CORERRSTS_OFFSET, &u.v32))
992                         off += scnprintf(buf + off, buf_size - off,
993                                          "CORERRSTS -\t\t%#06x\n", u.v32);
994         }
995
996         ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
997         kfree(buf);
998         return ret;
999 }
1000
1001 static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
1002                                  size_t count, loff_t *offp)
1003 {
1004         struct intel_ntb_dev *ndev = filp->private_data;
1005
1006         if (pdev_is_xeon(ndev->ntb.pdev) ||
1007             pdev_is_atom(ndev->ntb.pdev))
1008                 return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
1009         else if (pdev_is_skx_xeon(ndev->ntb.pdev))
1010                 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
1011
1012         return -ENXIO;
1013 }
1014
1015 static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
1016 {
1017         if (!debugfs_dir) {
1018                 ndev->debugfs_dir = NULL;
1019                 ndev->debugfs_info = NULL;
1020         } else {
1021                 ndev->debugfs_dir =
1022                         debugfs_create_dir(ndev_name(ndev), debugfs_dir);
1023                 if (!ndev->debugfs_dir)
1024                         ndev->debugfs_info = NULL;
1025                 else
1026                         ndev->debugfs_info =
1027                                 debugfs_create_file("info", S_IRUSR,
1028                                                     ndev->debugfs_dir, ndev,
1029                                                     &intel_ntb_debugfs_info);
1030         }
1031 }
1032
1033 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
1034 {
1035         debugfs_remove_recursive(ndev->debugfs_dir);
1036 }
1037
1038 static int intel_ntb_mw_count(struct ntb_dev *ntb)
1039 {
1040         return ntb_ndev(ntb)->mw_count;
1041 }
1042
1043 static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
1044                                   phys_addr_t *base,
1045                                   resource_size_t *size,
1046                                   resource_size_t *align,
1047                                   resource_size_t *align_size)
1048 {
1049         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1050         int bar;
1051
1052         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1053                 idx += 1;
1054
1055         bar = ndev_mw_to_bar(ndev, idx);
1056         if (bar < 0)
1057                 return bar;
1058
1059         if (base)
1060                 *base = pci_resource_start(ndev->ntb.pdev, bar) +
1061                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1062
1063         if (size)
1064                 *size = pci_resource_len(ndev->ntb.pdev, bar) -
1065                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1066
1067         if (align)
1068                 *align = pci_resource_len(ndev->ntb.pdev, bar);
1069
1070         if (align_size)
1071                 *align_size = 1;
1072
1073         return 0;
1074 }
1075
1076 static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
1077                                   dma_addr_t addr, resource_size_t size)
1078 {
1079         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1080         unsigned long base_reg, xlat_reg, limit_reg;
1081         resource_size_t bar_size, mw_size;
1082         void __iomem *mmio;
1083         u64 base, limit, reg_val;
1084         int bar;
1085
1086         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1087                 idx += 1;
1088
1089         bar = ndev_mw_to_bar(ndev, idx);
1090         if (bar < 0)
1091                 return bar;
1092
1093         bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1094
1095         if (idx == ndev->b2b_idx)
1096                 mw_size = bar_size - ndev->b2b_off;
1097         else
1098                 mw_size = bar_size;
1099
1100         /* hardware requires that addr is aligned to bar size */
1101         if (addr & (bar_size - 1))
1102                 return -EINVAL;
1103
1104         /* make sure the range fits in the usable mw size */
1105         if (size > mw_size)
1106                 return -EINVAL;
1107
1108         mmio = ndev->self_mmio;
1109         base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
1110         xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
1111         limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
1112
1113         if (bar < 4 || !ndev->bar4_split) {
1114                 base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
1115
1116                 /* Set the limit if supported, if size is not mw_size */
1117                 if (limit_reg && size != mw_size)
1118                         limit = base + size;
1119                 else
1120                         limit = 0;
1121
1122                 /* set and verify setting the translation address */
1123                 iowrite64(addr, mmio + xlat_reg);
1124                 reg_val = ioread64(mmio + xlat_reg);
1125                 if (reg_val != addr) {
1126                         iowrite64(0, mmio + xlat_reg);
1127                         return -EIO;
1128                 }
1129
1130                 /* set and verify setting the limit */
1131                 iowrite64(limit, mmio + limit_reg);
1132                 reg_val = ioread64(mmio + limit_reg);
1133                 if (reg_val != limit) {
1134                         iowrite64(base, mmio + limit_reg);
1135                         iowrite64(0, mmio + xlat_reg);
1136                         return -EIO;
1137                 }
1138         } else {
1139                 /* split bar addr range must all be 32 bit */
1140                 if (addr & (~0ull << 32))
1141                         return -EINVAL;
1142                 if ((addr + size) & (~0ull << 32))
1143                         return -EINVAL;
1144
1145                 base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
1146
1147                 /* Set the limit if supported, if size is not mw_size */
1148                 if (limit_reg && size != mw_size)
1149                         limit = base + size;
1150                 else
1151                         limit = 0;
1152
1153                 /* set and verify setting the translation address */
1154                 iowrite32(addr, mmio + xlat_reg);
1155                 reg_val = ioread32(mmio + xlat_reg);
1156                 if (reg_val != addr) {
1157                         iowrite32(0, mmio + xlat_reg);
1158                         return -EIO;
1159                 }
1160
1161                 /* set and verify setting the limit */
1162                 iowrite32(limit, mmio + limit_reg);
1163                 reg_val = ioread32(mmio + limit_reg);
1164                 if (reg_val != limit) {
1165                         iowrite32(base, mmio + limit_reg);
1166                         iowrite32(0, mmio + xlat_reg);
1167                         return -EIO;
1168                 }
1169         }
1170
1171         return 0;
1172 }
1173
1174 static int intel_ntb_link_is_up(struct ntb_dev *ntb,
1175                                 enum ntb_speed *speed,
1176                                 enum ntb_width *width)
1177 {
1178         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1179
1180         if (ndev->reg->link_is_up(ndev)) {
1181                 if (speed)
1182                         *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
1183                 if (width)
1184                         *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
1185                 return 1;
1186         } else {
1187                 /* TODO MAYBE: is it possible to observe the link speed and
1188                  * width while link is training? */
1189                 if (speed)
1190                         *speed = NTB_SPEED_NONE;
1191                 if (width)
1192                         *width = NTB_WIDTH_NONE;
1193                 return 0;
1194         }
1195 }
1196
1197 static int intel_ntb_link_enable(struct ntb_dev *ntb,
1198                                  enum ntb_speed max_speed,
1199                                  enum ntb_width max_width)
1200 {
1201         struct intel_ntb_dev *ndev;
1202         u32 ntb_ctl;
1203
1204         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1205
1206         if (ndev->ntb.topo == NTB_TOPO_SEC)
1207                 return -EINVAL;
1208
1209         dev_dbg(ndev_dev(ndev),
1210                 "Enabling link with max_speed %d max_width %d\n",
1211                 max_speed, max_width);
1212         if (max_speed != NTB_SPEED_AUTO)
1213                 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
1214         if (max_width != NTB_WIDTH_AUTO)
1215                 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
1216
1217         ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1218         ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1219         ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1220         ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1221         if (ndev->bar4_split)
1222                 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
1223         iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1224
1225         return 0;
1226 }
1227
1228 static int intel_ntb_link_disable(struct ntb_dev *ntb)
1229 {
1230         struct intel_ntb_dev *ndev;
1231         u32 ntb_cntl;
1232
1233         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1234
1235         if (ndev->ntb.topo == NTB_TOPO_SEC)
1236                 return -EINVAL;
1237
1238         dev_dbg(ndev_dev(ndev), "Disabling link\n");
1239
1240         /* Bring NTB link down */
1241         ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1242         ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1243         ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1244         if (ndev->bar4_split)
1245                 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1246         ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1247         iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1248
1249         return 0;
1250 }
1251
1252 static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1253 {
1254         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1255 }
1256
1257 static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1258 {
1259         return ntb_ndev(ntb)->db_valid_mask;
1260 }
1261
1262 static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1263 {
1264         struct intel_ntb_dev *ndev;
1265
1266         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1267
1268         return ndev->db_vec_count;
1269 }
1270
1271 static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1272 {
1273         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1274
1275         if (db_vector < 0 || db_vector > ndev->db_vec_count)
1276                 return 0;
1277
1278         return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1279 }
1280
1281 static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1282 {
1283         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1284
1285         return ndev_db_read(ndev,
1286                             ndev->self_mmio +
1287                             ndev->self_reg->db_bell);
1288 }
1289
1290 static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1291 {
1292         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1293
1294         return ndev_db_write(ndev, db_bits,
1295                              ndev->self_mmio +
1296                              ndev->self_reg->db_bell);
1297 }
1298
1299 static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1300 {
1301         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1302
1303         return ndev_db_set_mask(ndev, db_bits,
1304                                 ndev->self_mmio +
1305                                 ndev->self_reg->db_mask);
1306 }
1307
1308 static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1309 {
1310         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1311
1312         return ndev_db_clear_mask(ndev, db_bits,
1313                                   ndev->self_mmio +
1314                                   ndev->self_reg->db_mask);
1315 }
1316
1317 static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
1318                                   phys_addr_t *db_addr,
1319                                   resource_size_t *db_size)
1320 {
1321         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1322
1323         return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1324                             ndev->peer_reg->db_bell);
1325 }
1326
1327 static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1328 {
1329         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1330
1331         return ndev_db_write(ndev, db_bits,
1332                              ndev->peer_mmio +
1333                              ndev->peer_reg->db_bell);
1334 }
1335
1336 static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1337 {
1338         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1339 }
1340
1341 static int intel_ntb_spad_count(struct ntb_dev *ntb)
1342 {
1343         struct intel_ntb_dev *ndev;
1344
1345         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1346
1347         return ndev->spad_count;
1348 }
1349
1350 static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1351 {
1352         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1353
1354         return ndev_spad_read(ndev, idx,
1355                               ndev->self_mmio +
1356                               ndev->self_reg->spad);
1357 }
1358
1359 static int intel_ntb_spad_write(struct ntb_dev *ntb,
1360                                 int idx, u32 val)
1361 {
1362         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1363
1364         return ndev_spad_write(ndev, idx, val,
1365                                ndev->self_mmio +
1366                                ndev->self_reg->spad);
1367 }
1368
1369 static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
1370                                     phys_addr_t *spad_addr)
1371 {
1372         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1373
1374         return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
1375                               ndev->peer_reg->spad);
1376 }
1377
1378 static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
1379 {
1380         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1381
1382         return ndev_spad_read(ndev, idx,
1383                               ndev->peer_mmio +
1384                               ndev->peer_reg->spad);
1385 }
1386
1387 static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
1388                                      int idx, u32 val)
1389 {
1390         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1391
1392         return ndev_spad_write(ndev, idx, val,
1393                                ndev->peer_mmio +
1394                                ndev->peer_reg->spad);
1395 }
1396
1397 /* ATOM */
1398
1399 static u64 atom_db_ioread(void __iomem *mmio)
1400 {
1401         return ioread64(mmio);
1402 }
1403
1404 static void atom_db_iowrite(u64 bits, void __iomem *mmio)
1405 {
1406         iowrite64(bits, mmio);
1407 }
1408
1409 static int atom_poll_link(struct intel_ntb_dev *ndev)
1410 {
1411         u32 ntb_ctl;
1412
1413         ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
1414
1415         if (ntb_ctl == ndev->ntb_ctl)
1416                 return 0;
1417
1418         ndev->ntb_ctl = ntb_ctl;
1419
1420         ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
1421
1422         return 1;
1423 }
1424
1425 static int atom_link_is_up(struct intel_ntb_dev *ndev)
1426 {
1427         return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
1428 }
1429
1430 static int atom_link_is_err(struct intel_ntb_dev *ndev)
1431 {
1432         if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
1433             & ATOM_LTSSMSTATEJMP_FORCEDETECT)
1434                 return 1;
1435
1436         if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
1437             & ATOM_IBIST_ERR_OFLOW)
1438                 return 1;
1439
1440         return 0;
1441 }
1442
1443 static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
1444 {
1445         switch (ppd & ATOM_PPD_TOPO_MASK) {
1446         case ATOM_PPD_TOPO_B2B_USD:
1447                 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
1448                 return NTB_TOPO_B2B_USD;
1449
1450         case ATOM_PPD_TOPO_B2B_DSD:
1451                 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
1452                 return NTB_TOPO_B2B_DSD;
1453
1454         case ATOM_PPD_TOPO_PRI_USD:
1455         case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1456         case ATOM_PPD_TOPO_SEC_USD:
1457         case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1458                 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
1459                 return NTB_TOPO_NONE;
1460         }
1461
1462         dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
1463         return NTB_TOPO_NONE;
1464 }
1465
1466 static void atom_link_hb(struct work_struct *work)
1467 {
1468         struct intel_ntb_dev *ndev = hb_ndev(work);
1469         unsigned long poll_ts;
1470         void __iomem *mmio;
1471         u32 status32;
1472
1473         poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
1474
1475         /* Delay polling the link status if an interrupt was received,
1476          * unless the cached link status says the link is down.
1477          */
1478         if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
1479                 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
1480                 return;
1481         }
1482
1483         if (atom_poll_link(ndev))
1484                 ntb_link_event(&ndev->ntb);
1485
1486         if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
1487                 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
1488                 return;
1489         }
1490
1491         /* Link is down with error: recover the link! */
1492
1493         mmio = ndev->self_mmio;
1494
1495         /* Driver resets the NTB ModPhy lanes - magic! */
1496         iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
1497         iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
1498         iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
1499         iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
1500
1501         /* Driver waits 100ms to allow the NTB ModPhy to settle */
1502         msleep(100);
1503
1504         /* Clear AER Errors, write to clear */
1505         status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
1506         dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
1507         status32 &= PCI_ERR_COR_REP_ROLL;
1508         iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
1509
1510         /* Clear unexpected electrical idle event in LTSSM, write to clear */
1511         status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
1512         dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
1513         status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
1514         iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
1515
1516         /* Clear DeSkew Buffer error, write to clear */
1517         status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
1518         dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
1519         status32 |= ATOM_DESKEWSTS_DBERR;
1520         iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
1521
1522         status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
1523         dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
1524         status32 &= ATOM_IBIST_ERR_OFLOW;
1525         iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
1526
1527         /* Releases the NTB state machine to allow the link to retrain */
1528         status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
1529         dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
1530         status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
1531         iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
1532
1533         /* There is a potential race between the 2 NTB devices recovering at the
1534          * same time.  If the times are the same, the link will not recover and
1535          * the driver will be stuck in this loop forever.  Add a random interval
1536          * to the recovery time to prevent this race.
1537          */
1538         schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
1539                               + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
1540 }
1541
1542 static int atom_init_isr(struct intel_ntb_dev *ndev)
1543 {
1544         int rc;
1545
1546         rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
1547                            ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
1548         if (rc)
1549                 return rc;
1550
1551         /* ATOM doesn't have link status interrupt, poll on that platform */
1552         ndev->last_ts = jiffies;
1553         INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
1554         schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
1555
1556         return 0;
1557 }
1558
1559 static void atom_deinit_isr(struct intel_ntb_dev *ndev)
1560 {
1561         cancel_delayed_work_sync(&ndev->hb_timer);
1562         ndev_deinit_isr(ndev);
1563 }
1564
1565 static int atom_init_ntb(struct intel_ntb_dev *ndev)
1566 {
1567         ndev->mw_count = ATOM_MW_COUNT;
1568         ndev->spad_count = ATOM_SPAD_COUNT;
1569         ndev->db_count = ATOM_DB_COUNT;
1570
1571         switch (ndev->ntb.topo) {
1572         case NTB_TOPO_B2B_USD:
1573         case NTB_TOPO_B2B_DSD:
1574                 ndev->self_reg = &atom_pri_reg;
1575                 ndev->peer_reg = &atom_b2b_reg;
1576                 ndev->xlat_reg = &atom_sec_xlat;
1577
1578                 /* Enable Bus Master and Memory Space on the secondary side */
1579                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1580                           ndev->self_mmio + ATOM_SPCICMD_OFFSET);
1581
1582                 break;
1583
1584         default:
1585                 return -EINVAL;
1586         }
1587
1588         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1589
1590         return 0;
1591 }
1592
1593 static int atom_init_dev(struct intel_ntb_dev *ndev)
1594 {
1595         u32 ppd;
1596         int rc;
1597
1598         rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
1599         if (rc)
1600                 return -EIO;
1601
1602         ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
1603         if (ndev->ntb.topo == NTB_TOPO_NONE)
1604                 return -EINVAL;
1605
1606         rc = atom_init_ntb(ndev);
1607         if (rc)
1608                 return rc;
1609
1610         rc = atom_init_isr(ndev);
1611         if (rc)
1612                 return rc;
1613
1614         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1615                 /* Initiate PCI-E link training */
1616                 rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
1617                                             ppd | ATOM_PPD_INIT_LINK);
1618                 if (rc)
1619                         return rc;
1620         }
1621
1622         return 0;
1623 }
1624
1625 static void atom_deinit_dev(struct intel_ntb_dev *ndev)
1626 {
1627         atom_deinit_isr(ndev);
1628 }
1629
1630 /* Skylake Xeon NTB */
1631
1632 static int skx_poll_link(struct intel_ntb_dev *ndev)
1633 {
1634         u16 reg_val;
1635         int rc;
1636
1637         ndev->reg->db_iowrite(ndev->db_link_mask,
1638                               ndev->self_mmio +
1639                               ndev->self_reg->db_clear);
1640
1641         rc = pci_read_config_word(ndev->ntb.pdev,
1642                                   SKX_LINK_STATUS_OFFSET, &reg_val);
1643         if (rc)
1644                 return 0;
1645
1646         if (reg_val == ndev->lnk_sta)
1647                 return 0;
1648
1649         ndev->lnk_sta = reg_val;
1650
1651         return 1;
1652 }
1653
1654 static u64 skx_db_ioread(void __iomem *mmio)
1655 {
1656         return ioread64(mmio);
1657 }
1658
1659 static void skx_db_iowrite(u64 bits, void __iomem *mmio)
1660 {
1661         iowrite64(bits, mmio);
1662 }
1663
1664 static int skx_init_isr(struct intel_ntb_dev *ndev)
1665 {
1666         int i;
1667
1668         /*
1669          * The MSIX vectors and the interrupt status bits are not lined up
1670          * on Skylake. By default the link status bit is bit 32, however it
1671          * is by default MSIX vector0. We need to fixup to line them up.
1672          * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
1673          */
1674
1675         for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++)
1676                 iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i);
1677
1678         /* move link status down one as workaround */
1679         if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
1680                 iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2,
1681                          ndev->self_mmio + SKX_INTVEC_OFFSET +
1682                          (SKX_DB_MSIX_VECTOR_COUNT - 1));
1683         }
1684
1685         return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT,
1686                              SKX_DB_MSIX_VECTOR_COUNT,
1687                              SKX_DB_MSIX_VECTOR_SHIFT,
1688                              SKX_DB_TOTAL_SHIFT);
1689 }
1690
1691 static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
1692                             const struct intel_b2b_addr *addr,
1693                             const struct intel_b2b_addr *peer_addr)
1694 {
1695         struct pci_dev *pdev;
1696         void __iomem *mmio;
1697         resource_size_t bar_size;
1698         phys_addr_t bar_addr;
1699         int b2b_bar;
1700         u8 bar_sz;
1701
1702         pdev = ndev_pdev(ndev);
1703         mmio = ndev->self_mmio;
1704
1705         if (ndev->b2b_idx == UINT_MAX) {
1706                 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1707                 b2b_bar = 0;
1708                 ndev->b2b_off = 0;
1709         } else {
1710                 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1711                 if (b2b_bar < 0)
1712                         return -EIO;
1713
1714                 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
1715
1716                 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1717
1718                 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
1719
1720                 if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
1721                         dev_dbg(ndev_dev(ndev),
1722                                 "b2b using first half of bar\n");
1723                         ndev->b2b_off = bar_size >> 1;
1724                 } else if (bar_size >= XEON_B2B_MIN_SIZE) {
1725                         dev_dbg(ndev_dev(ndev),
1726                                 "b2b using whole bar\n");
1727                         ndev->b2b_off = 0;
1728                         --ndev->mw_count;
1729                 } else {
1730                         dev_dbg(ndev_dev(ndev),
1731                                 "b2b bar size is too small\n");
1732                         return -EIO;
1733                 }
1734         }
1735
1736         /*
1737          * Reset the secondary bar sizes to match the primary bar sizes,
1738          * except disable or halve the size of the b2b secondary bar.
1739          */
1740         pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
1741         dev_dbg(ndev_dev(ndev), "IMBAR1SZ %#x\n", bar_sz);
1742         if (b2b_bar == 1) {
1743                 if (ndev->b2b_off)
1744                         bar_sz -= 1;
1745                 else
1746                         bar_sz = 0;
1747         }
1748
1749         pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
1750         pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
1751         dev_dbg(ndev_dev(ndev), "EMBAR1SZ %#x\n", bar_sz);
1752
1753         pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
1754         dev_dbg(ndev_dev(ndev), "IMBAR2SZ %#x\n", bar_sz);
1755         if (b2b_bar == 2) {
1756                 if (ndev->b2b_off)
1757                         bar_sz -= 1;
1758                 else
1759                         bar_sz = 0;
1760         }
1761
1762         pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
1763         pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
1764         dev_dbg(ndev_dev(ndev), "EMBAR2SZ %#x\n", bar_sz);
1765
1766         /* SBAR01 hit by first part of the b2b bar */
1767         if (b2b_bar == 0)
1768                 bar_addr = addr->bar0_addr;
1769         else if (b2b_bar == 1)
1770                 bar_addr = addr->bar2_addr64;
1771         else if (b2b_bar == 2)
1772                 bar_addr = addr->bar4_addr64;
1773         else
1774                 return -EIO;
1775
1776         /* setup incoming bar limits == base addrs (zero length windows) */
1777         bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
1778         iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
1779         bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
1780         dev_dbg(ndev_dev(ndev), "IMBAR1XLMT %#018llx\n", bar_addr);
1781
1782         bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1783         iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
1784         bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
1785         dev_dbg(ndev_dev(ndev), "IMBAR2XLMT %#018llx\n", bar_addr);
1786
1787         /* zero incoming translation addrs */
1788         iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
1789         iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET);
1790
1791         ndev->peer_mmio = ndev->self_mmio;
1792
1793         return 0;
1794 }
1795
1796 static int skx_init_ntb(struct intel_ntb_dev *ndev)
1797 {
1798         int rc;
1799
1800
1801         ndev->mw_count = XEON_MW_COUNT;
1802         ndev->spad_count = SKX_SPAD_COUNT;
1803         ndev->db_count = SKX_DB_COUNT;
1804         ndev->db_link_mask = SKX_DB_LINK_BIT;
1805
1806         /* DB fixup for using 31 right now */
1807         if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
1808                 ndev->db_link_mask |= BIT_ULL(31);
1809
1810         switch (ndev->ntb.topo) {
1811         case NTB_TOPO_B2B_USD:
1812         case NTB_TOPO_B2B_DSD:
1813                 ndev->self_reg = &skx_pri_reg;
1814                 ndev->peer_reg = &skx_b2b_reg;
1815                 ndev->xlat_reg = &skx_sec_xlat;
1816
1817                 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1818                         rc = skx_setup_b2b_mw(ndev,
1819                                               &xeon_b2b_dsd_addr,
1820                                               &xeon_b2b_usd_addr);
1821                 } else {
1822                         rc = skx_setup_b2b_mw(ndev,
1823                                               &xeon_b2b_usd_addr,
1824                                               &xeon_b2b_dsd_addr);
1825                 }
1826
1827                 if (rc)
1828                         return rc;
1829
1830                 /* Enable Bus Master and Memory Space on the secondary side */
1831                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1832                           ndev->self_mmio + SKX_SPCICMD_OFFSET);
1833
1834                 break;
1835
1836         default:
1837                 return -EINVAL;
1838         }
1839
1840         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1841
1842         ndev->reg->db_iowrite(ndev->db_valid_mask,
1843                               ndev->self_mmio +
1844                               ndev->self_reg->db_mask);
1845
1846         return 0;
1847 }
1848
1849 static int skx_init_dev(struct intel_ntb_dev *ndev)
1850 {
1851         struct pci_dev *pdev;
1852         u8 ppd;
1853         int rc;
1854
1855         pdev = ndev_pdev(ndev);
1856
1857         ndev->reg = &skx_reg;
1858
1859         rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1860         if (rc)
1861                 return -EIO;
1862
1863         ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1864         dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
1865                 ntb_topo_string(ndev->ntb.topo));
1866         if (ndev->ntb.topo == NTB_TOPO_NONE)
1867                 return -EINVAL;
1868
1869         if (pdev_is_skx_xeon(pdev))
1870                 ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
1871
1872         rc = skx_init_ntb(ndev);
1873         if (rc)
1874                 return rc;
1875
1876         return skx_init_isr(ndev);
1877 }
1878
1879 static int intel_ntb3_link_enable(struct ntb_dev *ntb,
1880                                   enum ntb_speed max_speed,
1881                                   enum ntb_width max_width)
1882 {
1883         struct intel_ntb_dev *ndev;
1884         u32 ntb_ctl;
1885
1886         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1887
1888         dev_dbg(ndev_dev(ndev),
1889                 "Enabling link with max_speed %d max_width %d\n",
1890                 max_speed, max_width);
1891
1892         if (max_speed != NTB_SPEED_AUTO)
1893                 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
1894         if (max_width != NTB_WIDTH_AUTO)
1895                 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
1896
1897         ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1898         ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1899         ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1900         ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1901         iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1902
1903         return 0;
1904 }
1905 static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
1906                                    dma_addr_t addr, resource_size_t size)
1907 {
1908         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1909         unsigned long xlat_reg, limit_reg;
1910         resource_size_t bar_size, mw_size;
1911         void __iomem *mmio;
1912         u64 base, limit, reg_val;
1913         int bar;
1914
1915         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1916                 idx += 1;
1917
1918         bar = ndev_mw_to_bar(ndev, idx);
1919         if (bar < 0)
1920                 return bar;
1921
1922         bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1923
1924         if (idx == ndev->b2b_idx)
1925                 mw_size = bar_size - ndev->b2b_off;
1926         else
1927                 mw_size = bar_size;
1928
1929         /* hardware requires that addr is aligned to bar size */
1930         if (addr & (bar_size - 1))
1931                 return -EINVAL;
1932
1933         /* make sure the range fits in the usable mw size */
1934         if (size > mw_size)
1935                 return -EINVAL;
1936
1937         mmio = ndev->self_mmio;
1938         xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
1939         limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
1940         base = pci_resource_start(ndev->ntb.pdev, bar);
1941
1942         /* Set the limit if supported, if size is not mw_size */
1943         if (limit_reg && size != mw_size)
1944                 limit = base + size;
1945         else
1946                 limit = base + mw_size;
1947
1948         /* set and verify setting the translation address */
1949         iowrite64(addr, mmio + xlat_reg);
1950         reg_val = ioread64(mmio + xlat_reg);
1951         if (reg_val != addr) {
1952                 iowrite64(0, mmio + xlat_reg);
1953                 return -EIO;
1954         }
1955
1956         dev_dbg(ndev_dev(ndev), "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
1957
1958         /* set and verify setting the limit */
1959         iowrite64(limit, mmio + limit_reg);
1960         reg_val = ioread64(mmio + limit_reg);
1961         if (reg_val != limit) {
1962                 iowrite64(base, mmio + limit_reg);
1963                 iowrite64(0, mmio + xlat_reg);
1964                 return -EIO;
1965         }
1966
1967         dev_dbg(ndev_dev(ndev), "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
1968
1969         /* setup the EP */
1970         limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
1971         base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx));
1972         base &= ~0xf;
1973
1974         if (limit_reg && size != mw_size)
1975                 limit = base + size;
1976         else
1977                 limit = base + mw_size;
1978
1979         /* set and verify setting the limit */
1980         iowrite64(limit, mmio + limit_reg);
1981         reg_val = ioread64(mmio + limit_reg);
1982         if (reg_val != limit) {
1983                 iowrite64(base, mmio + limit_reg);
1984                 iowrite64(0, mmio + xlat_reg);
1985                 return -EIO;
1986         }
1987
1988         dev_dbg(ndev_dev(ndev), "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
1989
1990         return 0;
1991 }
1992
1993 static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1994 {
1995         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1996         int bit;
1997
1998         if (db_bits & ~ndev->db_valid_mask)
1999                 return -EINVAL;
2000
2001         while (db_bits) {
2002                 bit = __ffs(db_bits);
2003                 iowrite32(1, ndev->peer_mmio +
2004                                 ndev->peer_reg->db_bell + (bit * 4));
2005                 db_bits &= db_bits - 1;
2006         }
2007
2008         return 0;
2009 }
2010
2011 static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
2012 {
2013         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
2014
2015         return ndev_db_read(ndev,
2016                             ndev->self_mmio +
2017                             ndev->self_reg->db_clear);
2018 }
2019
2020 static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
2021 {
2022         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
2023
2024         return ndev_db_write(ndev, db_bits,
2025                              ndev->self_mmio +
2026                              ndev->self_reg->db_clear);
2027 }
2028
2029 /* XEON */
2030
2031 static u64 xeon_db_ioread(void __iomem *mmio)
2032 {
2033         return (u64)ioread16(mmio);
2034 }
2035
2036 static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
2037 {
2038         iowrite16((u16)bits, mmio);
2039 }
2040
2041 static int xeon_poll_link(struct intel_ntb_dev *ndev)
2042 {
2043         u16 reg_val;
2044         int rc;
2045
2046         ndev->reg->db_iowrite(ndev->db_link_mask,
2047                               ndev->self_mmio +
2048                               ndev->self_reg->db_bell);
2049
2050         rc = pci_read_config_word(ndev->ntb.pdev,
2051                                   XEON_LINK_STATUS_OFFSET, &reg_val);
2052         if (rc)
2053                 return 0;
2054
2055         if (reg_val == ndev->lnk_sta)
2056                 return 0;
2057
2058         ndev->lnk_sta = reg_val;
2059
2060         return 1;
2061 }
2062
2063 static int xeon_link_is_up(struct intel_ntb_dev *ndev)
2064 {
2065         if (ndev->ntb.topo == NTB_TOPO_SEC)
2066                 return 1;
2067
2068         return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
2069 }
2070
2071 static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
2072 {
2073         switch (ppd & XEON_PPD_TOPO_MASK) {
2074         case XEON_PPD_TOPO_B2B_USD:
2075                 return NTB_TOPO_B2B_USD;
2076
2077         case XEON_PPD_TOPO_B2B_DSD:
2078                 return NTB_TOPO_B2B_DSD;
2079
2080         case XEON_PPD_TOPO_PRI_USD:
2081         case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
2082                 return NTB_TOPO_PRI;
2083
2084         case XEON_PPD_TOPO_SEC_USD:
2085         case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
2086                 return NTB_TOPO_SEC;
2087         }
2088
2089         return NTB_TOPO_NONE;
2090 }
2091
2092 static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
2093 {
2094         if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
2095                 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
2096                 return 1;
2097         }
2098         return 0;
2099 }
2100
2101 static int xeon_init_isr(struct intel_ntb_dev *ndev)
2102 {
2103         return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
2104                              XEON_DB_MSIX_VECTOR_COUNT,
2105                              XEON_DB_MSIX_VECTOR_SHIFT,
2106                              XEON_DB_TOTAL_SHIFT);
2107 }
2108
2109 static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
2110 {
2111         ndev_deinit_isr(ndev);
2112 }
2113
2114 static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
2115                              const struct intel_b2b_addr *addr,
2116                              const struct intel_b2b_addr *peer_addr)
2117 {
2118         struct pci_dev *pdev;
2119         void __iomem *mmio;
2120         resource_size_t bar_size;
2121         phys_addr_t bar_addr;
2122         int b2b_bar;
2123         u8 bar_sz;
2124
2125         pdev = ndev_pdev(ndev);
2126         mmio = ndev->self_mmio;
2127
2128         if (ndev->b2b_idx == UINT_MAX) {
2129                 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
2130                 b2b_bar = 0;
2131                 ndev->b2b_off = 0;
2132         } else {
2133                 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
2134                 if (b2b_bar < 0)
2135                         return -EIO;
2136
2137                 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
2138
2139                 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
2140
2141                 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
2142
2143                 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
2144                         dev_dbg(ndev_dev(ndev),
2145                                 "b2b using first half of bar\n");
2146                         ndev->b2b_off = bar_size >> 1;
2147                 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
2148                         dev_dbg(ndev_dev(ndev),
2149                                 "b2b using whole bar\n");
2150                         ndev->b2b_off = 0;
2151                         --ndev->mw_count;
2152                 } else {
2153                         dev_dbg(ndev_dev(ndev),
2154                                 "b2b bar size is too small\n");
2155                         return -EIO;
2156                 }
2157         }
2158
2159         /* Reset the secondary bar sizes to match the primary bar sizes,
2160          * except disable or halve the size of the b2b secondary bar.
2161          *
2162          * Note: code for each specific bar size register, because the register
2163          * offsets are not in a consistent order (bar5sz comes after ppd, odd).
2164          */
2165         pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
2166         dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
2167         if (b2b_bar == 2) {
2168                 if (ndev->b2b_off)
2169                         bar_sz -= 1;
2170                 else
2171                         bar_sz = 0;
2172         }
2173         pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
2174         pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
2175         dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
2176
2177         if (!ndev->bar4_split) {
2178                 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
2179                 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
2180                 if (b2b_bar == 4) {
2181                         if (ndev->b2b_off)
2182                                 bar_sz -= 1;
2183                         else
2184                                 bar_sz = 0;
2185                 }
2186                 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
2187                 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
2188                 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
2189         } else {
2190                 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
2191                 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
2192                 if (b2b_bar == 4) {
2193                         if (ndev->b2b_off)
2194                                 bar_sz -= 1;
2195                         else
2196                                 bar_sz = 0;
2197                 }
2198                 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
2199                 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
2200                 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
2201
2202                 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
2203                 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
2204                 if (b2b_bar == 5) {
2205                         if (ndev->b2b_off)
2206                                 bar_sz -= 1;
2207                         else
2208                                 bar_sz = 0;
2209                 }
2210                 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
2211                 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
2212                 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
2213         }
2214
2215         /* SBAR01 hit by first part of the b2b bar */
2216         if (b2b_bar == 0)
2217                 bar_addr = addr->bar0_addr;
2218         else if (b2b_bar == 2)
2219                 bar_addr = addr->bar2_addr64;
2220         else if (b2b_bar == 4 && !ndev->bar4_split)
2221                 bar_addr = addr->bar4_addr64;
2222         else if (b2b_bar == 4)
2223                 bar_addr = addr->bar4_addr32;
2224         else if (b2b_bar == 5)
2225                 bar_addr = addr->bar5_addr32;
2226         else
2227                 return -EIO;
2228
2229         dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
2230         iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
2231
2232         /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
2233          * The b2b bar is either disabled above, or configured half-size, and
2234          * it starts at the PBAR xlat + offset.
2235          */
2236
2237         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
2238         iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
2239         bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
2240         dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
2241
2242         if (!ndev->bar4_split) {
2243                 bar_addr = addr->bar4_addr64 +
2244                         (b2b_bar == 4 ? ndev->b2b_off : 0);
2245                 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
2246                 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
2247                 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
2248         } else {
2249                 bar_addr = addr->bar4_addr32 +
2250                         (b2b_bar == 4 ? ndev->b2b_off : 0);
2251                 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
2252                 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
2253                 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
2254
2255                 bar_addr = addr->bar5_addr32 +
2256                         (b2b_bar == 5 ? ndev->b2b_off : 0);
2257                 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
2258                 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
2259                 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
2260         }
2261
2262         /* setup incoming bar limits == base addrs (zero length windows) */
2263
2264         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
2265         iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
2266         bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
2267         dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
2268
2269         if (!ndev->bar4_split) {
2270                 bar_addr = addr->bar4_addr64 +
2271                         (b2b_bar == 4 ? ndev->b2b_off : 0);
2272                 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
2273                 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
2274                 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
2275         } else {
2276                 bar_addr = addr->bar4_addr32 +
2277                         (b2b_bar == 4 ? ndev->b2b_off : 0);
2278                 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
2279                 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
2280                 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
2281
2282                 bar_addr = addr->bar5_addr32 +
2283                         (b2b_bar == 5 ? ndev->b2b_off : 0);
2284                 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
2285                 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
2286                 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
2287         }
2288
2289         /* zero incoming translation addrs */
2290         iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
2291
2292         if (!ndev->bar4_split) {
2293                 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
2294         } else {
2295                 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
2296                 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
2297         }
2298
2299         /* zero outgoing translation limits (whole bar size windows) */
2300         iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
2301         if (!ndev->bar4_split) {
2302                 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
2303         } else {
2304                 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
2305                 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
2306         }
2307
2308         /* set outgoing translation offsets */
2309         bar_addr = peer_addr->bar2_addr64;
2310         iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
2311         bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
2312         dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
2313
2314         if (!ndev->bar4_split) {
2315                 bar_addr = peer_addr->bar4_addr64;
2316                 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
2317                 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
2318                 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
2319         } else {
2320                 bar_addr = peer_addr->bar4_addr32;
2321                 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
2322                 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
2323                 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
2324
2325                 bar_addr = peer_addr->bar5_addr32;
2326                 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
2327                 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
2328                 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
2329         }
2330
2331         /* set the translation offset for b2b registers */
2332         if (b2b_bar == 0)
2333                 bar_addr = peer_addr->bar0_addr;
2334         else if (b2b_bar == 2)
2335                 bar_addr = peer_addr->bar2_addr64;
2336         else if (b2b_bar == 4 && !ndev->bar4_split)
2337                 bar_addr = peer_addr->bar4_addr64;
2338         else if (b2b_bar == 4)
2339                 bar_addr = peer_addr->bar4_addr32;
2340         else if (b2b_bar == 5)
2341                 bar_addr = peer_addr->bar5_addr32;
2342         else
2343                 return -EIO;
2344
2345         /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
2346         dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
2347         iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
2348         iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
2349
2350         if (b2b_bar) {
2351                 /* map peer ntb mmio config space registers */
2352                 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
2353                                             XEON_B2B_MIN_SIZE);
2354                 if (!ndev->peer_mmio)
2355                         return -EIO;
2356
2357                 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
2358         }
2359
2360         return 0;
2361 }
2362
2363 static int xeon_init_ntb(struct intel_ntb_dev *ndev)
2364 {
2365         int rc;
2366         u32 ntb_ctl;
2367
2368         if (ndev->bar4_split)
2369                 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
2370         else
2371                 ndev->mw_count = XEON_MW_COUNT;
2372
2373         ndev->spad_count = XEON_SPAD_COUNT;
2374         ndev->db_count = XEON_DB_COUNT;
2375         ndev->db_link_mask = XEON_DB_LINK_BIT;
2376
2377         switch (ndev->ntb.topo) {
2378         case NTB_TOPO_PRI:
2379                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
2380                         dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
2381                         return -EINVAL;
2382                 }
2383
2384                 /* enable link to allow secondary side device to appear */
2385                 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
2386                 ntb_ctl &= ~NTB_CTL_DISABLE;
2387                 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
2388
2389                 /* use half the spads for the peer */
2390                 ndev->spad_count >>= 1;
2391                 ndev->self_reg = &xeon_pri_reg;
2392                 ndev->peer_reg = &xeon_sec_reg;
2393                 ndev->xlat_reg = &xeon_sec_xlat;
2394                 break;
2395
2396         case NTB_TOPO_SEC:
2397                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
2398                         dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
2399                         return -EINVAL;
2400                 }
2401                 /* use half the spads for the peer */
2402                 ndev->spad_count >>= 1;
2403                 ndev->self_reg = &xeon_sec_reg;
2404                 ndev->peer_reg = &xeon_pri_reg;
2405                 ndev->xlat_reg = &xeon_pri_xlat;
2406                 break;
2407
2408         case NTB_TOPO_B2B_USD:
2409         case NTB_TOPO_B2B_DSD:
2410                 ndev->self_reg = &xeon_pri_reg;
2411                 ndev->peer_reg = &xeon_b2b_reg;
2412                 ndev->xlat_reg = &xeon_sec_xlat;
2413
2414                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
2415                         ndev->peer_reg = &xeon_pri_reg;
2416
2417                         if (b2b_mw_idx < 0)
2418                                 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
2419                         else
2420                                 ndev->b2b_idx = b2b_mw_idx;
2421
2422                         if (ndev->b2b_idx >= ndev->mw_count) {
2423                                 dev_dbg(ndev_dev(ndev),
2424                                         "b2b_mw_idx %d invalid for mw_count %u\n",
2425                                         b2b_mw_idx, ndev->mw_count);
2426                                 return -EINVAL;
2427                         }
2428
2429                         dev_dbg(ndev_dev(ndev),
2430                                 "setting up b2b mw idx %d means %d\n",
2431                                 b2b_mw_idx, ndev->b2b_idx);
2432
2433                 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
2434                         dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
2435                         ndev->db_count -= 1;
2436                 }
2437
2438                 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
2439                         rc = xeon_setup_b2b_mw(ndev,
2440                                                &xeon_b2b_dsd_addr,
2441                                                &xeon_b2b_usd_addr);
2442                 } else {
2443                         rc = xeon_setup_b2b_mw(ndev,
2444                                                &xeon_b2b_usd_addr,
2445                                                &xeon_b2b_dsd_addr);
2446                 }
2447                 if (rc)
2448                         return rc;
2449
2450                 /* Enable Bus Master and Memory Space on the secondary side */
2451                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
2452                           ndev->self_mmio + XEON_SPCICMD_OFFSET);
2453
2454                 break;
2455
2456         default:
2457                 return -EINVAL;
2458         }
2459
2460         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
2461
2462         ndev->reg->db_iowrite(ndev->db_valid_mask,
2463                               ndev->self_mmio +
2464                               ndev->self_reg->db_mask);
2465
2466         return 0;
2467 }
2468
2469 static int xeon_init_dev(struct intel_ntb_dev *ndev)
2470 {
2471         struct pci_dev *pdev;
2472         u8 ppd;
2473         int rc, mem;
2474
2475         pdev = ndev_pdev(ndev);
2476
2477         switch (pdev->device) {
2478         /* There is a Xeon hardware errata related to writes to SDOORBELL or
2479          * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
2480          * which may hang the system.  To workaround this use the second memory
2481          * window to access the interrupt and scratch pad registers on the
2482          * remote system.
2483          */
2484         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
2485         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
2486         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
2487         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
2488         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
2489         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
2490         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2491         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2492         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2493         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2494         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2495         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
2496         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2497         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2498         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
2499                 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
2500                 break;
2501         }
2502
2503         switch (pdev->device) {
2504         /* There is a hardware errata related to accessing any register in
2505          * SB01BASE in the presence of bidirectional traffic crossing the NTB.
2506          */
2507         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2508         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2509         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2510         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2511         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2512         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
2513         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2514         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2515         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
2516                 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
2517                 break;
2518         }
2519
2520         switch (pdev->device) {
2521         /* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
2522          * mirrored to the remote system.  Shrink the number of bits by one,
2523          * since bit 14 is the last bit.
2524          */
2525         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
2526         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
2527         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
2528         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
2529         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
2530         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
2531         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2532         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2533         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2534         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2535         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2536         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
2537         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2538         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2539         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
2540                 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
2541                 break;
2542         }
2543
2544         ndev->reg = &xeon_reg;
2545
2546         rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
2547         if (rc)
2548                 return -EIO;
2549
2550         ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
2551         dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
2552                 ntb_topo_string(ndev->ntb.topo));
2553         if (ndev->ntb.topo == NTB_TOPO_NONE)
2554                 return -EINVAL;
2555
2556         if (ndev->ntb.topo != NTB_TOPO_SEC) {
2557                 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
2558                 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
2559                         ppd, ndev->bar4_split);
2560         } else {
2561                 /* This is a way for transparent BAR to figure out if we are
2562                  * doing split BAR or not. There is no way for the hw on the
2563                  * transparent side to know and set the PPD.
2564                  */
2565                 mem = pci_select_bars(pdev, IORESOURCE_MEM);
2566                 ndev->bar4_split = hweight32(mem) ==
2567                         HSX_SPLIT_BAR_MW_COUNT + 1;
2568                 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
2569                         mem, ndev->bar4_split);
2570         }
2571
2572         rc = xeon_init_ntb(ndev);
2573         if (rc)
2574                 return rc;
2575
2576         return xeon_init_isr(ndev);
2577 }
2578
2579 static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
2580 {
2581         xeon_deinit_isr(ndev);
2582 }
2583
2584 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
2585 {
2586         int rc;
2587
2588         pci_set_drvdata(pdev, ndev);
2589
2590         rc = pci_enable_device(pdev);
2591         if (rc)
2592                 goto err_pci_enable;
2593
2594         rc = pci_request_regions(pdev, NTB_NAME);
2595         if (rc)
2596                 goto err_pci_regions;
2597
2598         pci_set_master(pdev);
2599
2600         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2601         if (rc) {
2602                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2603                 if (rc)
2604                         goto err_dma_mask;
2605                 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
2606         }
2607
2608         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2609         if (rc) {
2610                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2611                 if (rc)
2612                         goto err_dma_mask;
2613                 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
2614         }
2615
2616         ndev->self_mmio = pci_iomap(pdev, 0, 0);
2617         if (!ndev->self_mmio) {
2618                 rc = -EIO;
2619                 goto err_mmio;
2620         }
2621         ndev->peer_mmio = ndev->self_mmio;
2622         ndev->peer_addr = pci_resource_start(pdev, 0);
2623
2624         return 0;
2625
2626 err_mmio:
2627 err_dma_mask:
2628         pci_clear_master(pdev);
2629         pci_release_regions(pdev);
2630 err_pci_regions:
2631         pci_disable_device(pdev);
2632 err_pci_enable:
2633         pci_set_drvdata(pdev, NULL);
2634         return rc;
2635 }
2636
2637 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
2638 {
2639         struct pci_dev *pdev = ndev_pdev(ndev);
2640
2641         if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
2642                 pci_iounmap(pdev, ndev->peer_mmio);
2643         pci_iounmap(pdev, ndev->self_mmio);
2644
2645         pci_clear_master(pdev);
2646         pci_release_regions(pdev);
2647         pci_disable_device(pdev);
2648         pci_set_drvdata(pdev, NULL);
2649 }
2650
2651 static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
2652                                     struct pci_dev *pdev)
2653 {
2654         ndev->ntb.pdev = pdev;
2655         ndev->ntb.topo = NTB_TOPO_NONE;
2656         ndev->ntb.ops = &intel_ntb_ops;
2657
2658         ndev->b2b_off = 0;
2659         ndev->b2b_idx = UINT_MAX;
2660
2661         ndev->bar4_split = 0;
2662
2663         ndev->mw_count = 0;
2664         ndev->spad_count = 0;
2665         ndev->db_count = 0;
2666         ndev->db_vec_count = 0;
2667         ndev->db_vec_shift = 0;
2668
2669         ndev->ntb_ctl = 0;
2670         ndev->lnk_sta = 0;
2671
2672         ndev->db_valid_mask = 0;
2673         ndev->db_link_mask = 0;
2674         ndev->db_mask = 0;
2675
2676         spin_lock_init(&ndev->db_mask_lock);
2677 }
2678
2679 static int intel_ntb_pci_probe(struct pci_dev *pdev,
2680                                const struct pci_device_id *id)
2681 {
2682         struct intel_ntb_dev *ndev;
2683         int rc, node;
2684
2685         node = dev_to_node(&pdev->dev);
2686
2687         if (pdev_is_atom(pdev)) {
2688                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2689                 if (!ndev) {
2690                         rc = -ENOMEM;
2691                         goto err_ndev;
2692                 }
2693
2694                 ndev_init_struct(ndev, pdev);
2695
2696                 rc = intel_ntb_init_pci(ndev, pdev);
2697                 if (rc)
2698                         goto err_init_pci;
2699
2700                 rc = atom_init_dev(ndev);
2701                 if (rc)
2702                         goto err_init_dev;
2703
2704         } else if (pdev_is_xeon(pdev)) {
2705                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2706                 if (!ndev) {
2707                         rc = -ENOMEM;
2708                         goto err_ndev;
2709                 }
2710
2711                 ndev_init_struct(ndev, pdev);
2712
2713                 rc = intel_ntb_init_pci(ndev, pdev);
2714                 if (rc)
2715                         goto err_init_pci;
2716
2717                 rc = xeon_init_dev(ndev);
2718                 if (rc)
2719                         goto err_init_dev;
2720
2721         } else if (pdev_is_skx_xeon(pdev)) {
2722                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2723                 if (!ndev) {
2724                         rc = -ENOMEM;
2725                         goto err_ndev;
2726                 }
2727
2728                 ndev_init_struct(ndev, pdev);
2729                 ndev->ntb.ops = &intel_ntb3_ops;
2730
2731                 rc = intel_ntb_init_pci(ndev, pdev);
2732                 if (rc)
2733                         goto err_init_pci;
2734
2735                 rc = skx_init_dev(ndev);
2736                 if (rc)
2737                         goto err_init_dev;
2738
2739         } else {
2740                 rc = -EINVAL;
2741                 goto err_ndev;
2742         }
2743
2744         ndev_reset_unsafe_flags(ndev);
2745
2746         ndev->reg->poll_link(ndev);
2747
2748         ndev_init_debugfs(ndev);
2749
2750         rc = ntb_register_device(&ndev->ntb);
2751         if (rc)
2752                 goto err_register;
2753
2754         dev_info(&pdev->dev, "NTB device registered.\n");
2755
2756         return 0;
2757
2758 err_register:
2759         ndev_deinit_debugfs(ndev);
2760         if (pdev_is_atom(pdev))
2761                 atom_deinit_dev(ndev);
2762         else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
2763                 xeon_deinit_dev(ndev);
2764 err_init_dev:
2765         intel_ntb_deinit_pci(ndev);
2766 err_init_pci:
2767         kfree(ndev);
2768 err_ndev:
2769         return rc;
2770 }
2771
2772 static void intel_ntb_pci_remove(struct pci_dev *pdev)
2773 {
2774         struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
2775
2776         ntb_unregister_device(&ndev->ntb);
2777         ndev_deinit_debugfs(ndev);
2778         if (pdev_is_atom(pdev))
2779                 atom_deinit_dev(ndev);
2780         else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
2781                 xeon_deinit_dev(ndev);
2782         intel_ntb_deinit_pci(ndev);
2783         kfree(ndev);
2784 }
2785
2786 static const struct intel_ntb_reg atom_reg = {
2787         .poll_link              = atom_poll_link,
2788         .link_is_up             = atom_link_is_up,
2789         .db_ioread              = atom_db_ioread,
2790         .db_iowrite             = atom_db_iowrite,
2791         .db_size                = sizeof(u64),
2792         .ntb_ctl                = ATOM_NTBCNTL_OFFSET,
2793         .mw_bar                 = {2, 4},
2794 };
2795
2796 static const struct intel_ntb_alt_reg atom_pri_reg = {
2797         .db_bell                = ATOM_PDOORBELL_OFFSET,
2798         .db_mask                = ATOM_PDBMSK_OFFSET,
2799         .spad                   = ATOM_SPAD_OFFSET,
2800 };
2801
2802 static const struct intel_ntb_alt_reg atom_b2b_reg = {
2803         .db_bell                = ATOM_B2B_DOORBELL_OFFSET,
2804         .spad                   = ATOM_B2B_SPAD_OFFSET,
2805 };
2806
2807 static const struct intel_ntb_xlat_reg atom_sec_xlat = {
2808         /* FIXME : .bar0_base   = ATOM_SBAR0BASE_OFFSET, */
2809         /* FIXME : .bar2_limit  = ATOM_SBAR2LMT_OFFSET, */
2810         .bar2_xlat              = ATOM_SBAR2XLAT_OFFSET,
2811 };
2812
2813 static const struct intel_ntb_reg xeon_reg = {
2814         .poll_link              = xeon_poll_link,
2815         .link_is_up             = xeon_link_is_up,
2816         .db_ioread              = xeon_db_ioread,
2817         .db_iowrite             = xeon_db_iowrite,
2818         .db_size                = sizeof(u32),
2819         .ntb_ctl                = XEON_NTBCNTL_OFFSET,
2820         .mw_bar                 = {2, 4, 5},
2821 };
2822
2823 static const struct intel_ntb_alt_reg xeon_pri_reg = {
2824         .db_bell                = XEON_PDOORBELL_OFFSET,
2825         .db_mask                = XEON_PDBMSK_OFFSET,
2826         .spad                   = XEON_SPAD_OFFSET,
2827 };
2828
2829 static const struct intel_ntb_alt_reg xeon_sec_reg = {
2830         .db_bell                = XEON_SDOORBELL_OFFSET,
2831         .db_mask                = XEON_SDBMSK_OFFSET,
2832         /* second half of the scratchpads */
2833         .spad                   = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
2834 };
2835
2836 static const struct intel_ntb_alt_reg xeon_b2b_reg = {
2837         .db_bell                = XEON_B2B_DOORBELL_OFFSET,
2838         .spad                   = XEON_B2B_SPAD_OFFSET,
2839 };
2840
2841 static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
2842         /* Note: no primary .bar0_base visible to the secondary side.
2843          *
2844          * The secondary side cannot get the base address stored in primary
2845          * bars.  The base address is necessary to set the limit register to
2846          * any value other than zero, or unlimited.
2847          *
2848          * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2849          * window by setting the limit equal to base, nor can it limit the size
2850          * of the memory window by setting the limit to base + size.
2851          */
2852         .bar2_limit             = XEON_PBAR23LMT_OFFSET,
2853         .bar2_xlat              = XEON_PBAR23XLAT_OFFSET,
2854 };
2855
2856 static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
2857         .bar0_base              = XEON_SBAR0BASE_OFFSET,
2858         .bar2_limit             = XEON_SBAR23LMT_OFFSET,
2859         .bar2_xlat              = XEON_SBAR23XLAT_OFFSET,
2860 };
2861
2862 static struct intel_b2b_addr xeon_b2b_usd_addr = {
2863         .bar2_addr64            = XEON_B2B_BAR2_ADDR64,
2864         .bar4_addr64            = XEON_B2B_BAR4_ADDR64,
2865         .bar4_addr32            = XEON_B2B_BAR4_ADDR32,
2866         .bar5_addr32            = XEON_B2B_BAR5_ADDR32,
2867 };
2868
2869 static struct intel_b2b_addr xeon_b2b_dsd_addr = {
2870         .bar2_addr64            = XEON_B2B_BAR2_ADDR64,
2871         .bar4_addr64            = XEON_B2B_BAR4_ADDR64,
2872         .bar4_addr32            = XEON_B2B_BAR4_ADDR32,
2873         .bar5_addr32            = XEON_B2B_BAR5_ADDR32,
2874 };
2875
2876 static const struct intel_ntb_reg skx_reg = {
2877         .poll_link              = skx_poll_link,
2878         .link_is_up             = xeon_link_is_up,
2879         .db_ioread              = skx_db_ioread,
2880         .db_iowrite             = skx_db_iowrite,
2881         .db_size                = sizeof(u64),
2882         .ntb_ctl                = SKX_NTBCNTL_OFFSET,
2883         .mw_bar                 = {2, 4},
2884 };
2885
2886 static const struct intel_ntb_alt_reg skx_pri_reg = {
2887         .db_bell                = SKX_EM_DOORBELL_OFFSET,
2888         .db_clear               = SKX_IM_INT_STATUS_OFFSET,
2889         .db_mask                = SKX_IM_INT_DISABLE_OFFSET,
2890         .spad                   = SKX_IM_SPAD_OFFSET,
2891 };
2892
2893 static const struct intel_ntb_alt_reg skx_b2b_reg = {
2894         .db_bell                = SKX_IM_DOORBELL_OFFSET,
2895         .db_clear               = SKX_EM_INT_STATUS_OFFSET,
2896         .db_mask                = SKX_EM_INT_DISABLE_OFFSET,
2897         .spad                   = SKX_B2B_SPAD_OFFSET,
2898 };
2899
2900 static const struct intel_ntb_xlat_reg skx_sec_xlat = {
2901 /*      .bar0_base              = SKX_EMBAR0_OFFSET, */
2902         .bar2_limit             = SKX_IMBAR1XLMT_OFFSET,
2903         .bar2_xlat              = SKX_IMBAR1XBASE_OFFSET,
2904 };
2905
2906 /* operations for primary side of local ntb */
2907 static const struct ntb_dev_ops intel_ntb_ops = {
2908         .mw_count               = intel_ntb_mw_count,
2909         .mw_get_range           = intel_ntb_mw_get_range,
2910         .mw_set_trans           = intel_ntb_mw_set_trans,
2911         .link_is_up             = intel_ntb_link_is_up,
2912         .link_enable            = intel_ntb_link_enable,
2913         .link_disable           = intel_ntb_link_disable,
2914         .db_is_unsafe           = intel_ntb_db_is_unsafe,
2915         .db_valid_mask          = intel_ntb_db_valid_mask,
2916         .db_vector_count        = intel_ntb_db_vector_count,
2917         .db_vector_mask         = intel_ntb_db_vector_mask,
2918         .db_read                = intel_ntb_db_read,
2919         .db_clear               = intel_ntb_db_clear,
2920         .db_set_mask            = intel_ntb_db_set_mask,
2921         .db_clear_mask          = intel_ntb_db_clear_mask,
2922         .peer_db_addr           = intel_ntb_peer_db_addr,
2923         .peer_db_set            = intel_ntb_peer_db_set,
2924         .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
2925         .spad_count             = intel_ntb_spad_count,
2926         .spad_read              = intel_ntb_spad_read,
2927         .spad_write             = intel_ntb_spad_write,
2928         .peer_spad_addr         = intel_ntb_peer_spad_addr,
2929         .peer_spad_read         = intel_ntb_peer_spad_read,
2930         .peer_spad_write        = intel_ntb_peer_spad_write,
2931 };
2932
2933 static const struct ntb_dev_ops intel_ntb3_ops = {
2934         .mw_count               = intel_ntb_mw_count,
2935         .mw_get_range           = intel_ntb_mw_get_range,
2936         .mw_set_trans           = intel_ntb3_mw_set_trans,
2937         .link_is_up             = intel_ntb_link_is_up,
2938         .link_enable            = intel_ntb3_link_enable,
2939         .link_disable           = intel_ntb_link_disable,
2940         .db_valid_mask          = intel_ntb_db_valid_mask,
2941         .db_vector_count        = intel_ntb_db_vector_count,
2942         .db_vector_mask         = intel_ntb_db_vector_mask,
2943         .db_read                = intel_ntb3_db_read,
2944         .db_clear               = intel_ntb3_db_clear,
2945         .db_set_mask            = intel_ntb_db_set_mask,
2946         .db_clear_mask          = intel_ntb_db_clear_mask,
2947         .peer_db_addr           = intel_ntb_peer_db_addr,
2948         .peer_db_set            = intel_ntb3_peer_db_set,
2949         .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
2950         .spad_count             = intel_ntb_spad_count,
2951         .spad_read              = intel_ntb_spad_read,
2952         .spad_write             = intel_ntb_spad_write,
2953         .peer_spad_addr         = intel_ntb_peer_spad_addr,
2954         .peer_spad_read         = intel_ntb_peer_spad_read,
2955         .peer_spad_write        = intel_ntb_peer_spad_write,
2956 };
2957
2958 static const struct file_operations intel_ntb_debugfs_info = {
2959         .owner = THIS_MODULE,
2960         .open = simple_open,
2961         .read = ndev_debugfs_read,
2962 };
2963
2964 static const struct pci_device_id intel_ntb_pci_tbl[] = {
2965         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
2966         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
2967         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2968         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2969         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2970         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
2971         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2972         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2973         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2974         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2975         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
2976         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2977         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2978         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2979         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2980         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
2981         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
2982         {0}
2983 };
2984 MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
2985
2986 static struct pci_driver intel_ntb_pci_driver = {
2987         .name = KBUILD_MODNAME,
2988         .id_table = intel_ntb_pci_tbl,
2989         .probe = intel_ntb_pci_probe,
2990         .remove = intel_ntb_pci_remove,
2991 };
2992
2993 static int __init intel_ntb_pci_driver_init(void)
2994 {
2995         pr_info("%s %s\n", NTB_DESC, NTB_VER);
2996
2997         if (debugfs_initialized())
2998                 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2999
3000         return pci_register_driver(&intel_ntb_pci_driver);
3001 }
3002 module_init(intel_ntb_pci_driver_init);
3003
3004 static void __exit intel_ntb_pci_driver_exit(void)
3005 {
3006         pci_unregister_driver(&intel_ntb_pci_driver);
3007
3008         debugfs_remove_recursive(debugfs_dir);
3009 }
3010 module_exit(intel_ntb_pci_driver_exit);
3011