Merge remote-tracking branches 'spi/topic/atmel', 'spi/topic/bcm63xx', 'spi/topic...
[sfrench/cifs-2.6.git] / drivers / rapidio / devices / tsi721.c
1 /*
2  * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge
3  *
4  * Copyright 2011 Integrated Device Technology, Inc.
5  * Alexandre Bounine <alexandre.bounine@idt.com>
6  * Chul Kim <chul.kim@idt.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program; if not, write to the Free Software Foundation, Inc., 59
20  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
21  */
22
23 #include <linux/io.h>
24 #include <linux/errno.h>
25 #include <linux/init.h>
26 #include <linux/ioport.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/rio.h>
31 #include <linux/rio_drv.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/interrupt.h>
34 #include <linux/kfifo.h>
35 #include <linux/delay.h>
36
37 #include "tsi721.h"
38
39 #ifdef DEBUG
40 u32 tsi_dbg_level;
41 module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
42 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
43 #endif
44
45 static int pcie_mrrs = -1;
46 module_param(pcie_mrrs, int, S_IRUGO);
47 MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)");
48
49 static u8 mbox_sel = 0x0f;
50 module_param(mbox_sel, byte, S_IRUGO);
51 MODULE_PARM_DESC(mbox_sel,
52                  "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
53
54 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
55 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
56
57 /**
58  * tsi721_lcread - read from local SREP config space
59  * @mport: RapidIO master port info
60  * @index: ID of RapdiIO interface
61  * @offset: Offset into configuration space
62  * @len: Length (in bytes) of the maintenance transaction
63  * @data: Value to be read into
64  *
65  * Generates a local SREP space read. Returns %0 on
66  * success or %-EINVAL on failure.
67  */
68 static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
69                          int len, u32 *data)
70 {
71         struct tsi721_device *priv = mport->priv;
72
73         if (len != sizeof(u32))
74                 return -EINVAL; /* only 32-bit access is supported */
75
76         *data = ioread32(priv->regs + offset);
77
78         return 0;
79 }
80
81 /**
82  * tsi721_lcwrite - write into local SREP config space
83  * @mport: RapidIO master port info
84  * @index: ID of RapdiIO interface
85  * @offset: Offset into configuration space
86  * @len: Length (in bytes) of the maintenance transaction
87  * @data: Value to be written
88  *
89  * Generates a local write into SREP configuration space. Returns %0 on
90  * success or %-EINVAL on failure.
91  */
92 static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
93                           int len, u32 data)
94 {
95         struct tsi721_device *priv = mport->priv;
96
97         if (len != sizeof(u32))
98                 return -EINVAL; /* only 32-bit access is supported */
99
100         iowrite32(data, priv->regs + offset);
101
102         return 0;
103 }
104
105 /**
106  * tsi721_maint_dma - Helper function to generate RapidIO maintenance
107  *                    transactions using designated Tsi721 DMA channel.
108  * @priv: pointer to tsi721 private data
109  * @sys_size: RapdiIO transport system size
110  * @destid: Destination ID of transaction
111  * @hopcount: Number of hops to target device
112  * @offset: Offset into configuration space
113  * @len: Length (in bytes) of the maintenance transaction
114  * @data: Location to be read from or write into
115  * @do_wr: Operation flag (1 == MAINT_WR)
116  *
117  * Generates a RapidIO maintenance transaction (Read or Write).
118  * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
119  */
120 static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
121                         u16 destid, u8 hopcount, u32 offset, int len,
122                         u32 *data, int do_wr)
123 {
124         void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
125         struct tsi721_dma_desc *bd_ptr;
126         u32 rd_count, swr_ptr, ch_stat;
127         int i, err = 0;
128         u32 op = do_wr ? MAINT_WR : MAINT_RD;
129
130         if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
131                 return -EINVAL;
132
133         bd_ptr = priv->mdma.bd_base;
134
135         rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
136
137         /* Initialize DMA descriptor */
138         bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
139         bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04);
140         bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset);
141         bd_ptr[0].raddr_hi = 0;
142         if (do_wr)
143                 bd_ptr[0].data[0] = cpu_to_be32p(data);
144         else
145                 bd_ptr[0].data[0] = 0xffffffff;
146
147         mb();
148
149         /* Start DMA operation */
150         iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT);
151         ioread32(regs + TSI721_DMAC_DWRCNT);
152         i = 0;
153
154         /* Wait until DMA transfer is finished */
155         while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
156                                                         & TSI721_DMAC_STS_RUN) {
157                 udelay(1);
158                 if (++i >= 5000000) {
159                         tsi_debug(MAINT, &priv->pdev->dev,
160                                 "DMA[%d] read timeout ch_status=%x",
161                                 priv->mdma.ch_id, ch_stat);
162                         if (!do_wr)
163                                 *data = 0xffffffff;
164                         err = -EIO;
165                         goto err_out;
166                 }
167         }
168
169         if (ch_stat & TSI721_DMAC_STS_ABORT) {
170                 /* If DMA operation aborted due to error,
171                  * reinitialize DMA channel
172                  */
173                 tsi_debug(MAINT, &priv->pdev->dev, "DMA ABORT ch_stat=%x",
174                           ch_stat);
175                 tsi_debug(MAINT, &priv->pdev->dev,
176                           "OP=%d : destid=%x hc=%x off=%x",
177                           do_wr ? MAINT_WR : MAINT_RD,
178                           destid, hopcount, offset);
179                 iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
180                 iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
181                 udelay(10);
182                 iowrite32(0, regs + TSI721_DMAC_DWRCNT);
183                 udelay(1);
184                 if (!do_wr)
185                         *data = 0xffffffff;
186                 err = -EIO;
187                 goto err_out;
188         }
189
190         if (!do_wr)
191                 *data = be32_to_cpu(bd_ptr[0].data[0]);
192
193         /*
194          * Update descriptor status FIFO RD pointer.
195          * NOTE: Skipping check and clear FIFO entries because we are waiting
196          * for transfer to be completed.
197          */
198         swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
199         iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
200 err_out:
201
202         return err;
203 }
204
205 /**
206  * tsi721_cread_dma - Generate a RapidIO maintenance read transaction
207  *                    using Tsi721 BDMA engine.
208  * @mport: RapidIO master port control structure
209  * @index: ID of RapdiIO interface
210  * @destid: Destination ID of transaction
211  * @hopcount: Number of hops to target device
212  * @offset: Offset into configuration space
213  * @len: Length (in bytes) of the maintenance transaction
214  * @val: Location to be read into
215  *
216  * Generates a RapidIO maintenance read transaction.
217  * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
218  */
219 static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
220                         u8 hopcount, u32 offset, int len, u32 *data)
221 {
222         struct tsi721_device *priv = mport->priv;
223
224         return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
225                                 offset, len, data, 0);
226 }
227
228 /**
229  * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction
230  *                     using Tsi721 BDMA engine
231  * @mport: RapidIO master port control structure
232  * @index: ID of RapdiIO interface
233  * @destid: Destination ID of transaction
234  * @hopcount: Number of hops to target device
235  * @offset: Offset into configuration space
236  * @len: Length (in bytes) of the maintenance transaction
237  * @val: Value to be written
238  *
239  * Generates a RapidIO maintenance write transaction.
240  * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
241  */
242 static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
243                          u8 hopcount, u32 offset, int len, u32 data)
244 {
245         struct tsi721_device *priv = mport->priv;
246         u32 temp = data;
247
248         return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
249                                 offset, len, &temp, 1);
250 }
251
252 /**
253  * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler
254  * @priv:  tsi721 device private structure
255  *
256  * Handles inbound port-write interrupts. Copies PW message from an internal
257  * buffer into PW message FIFO and schedules deferred routine to process
258  * queued messages.
259  */
260 static int
261 tsi721_pw_handler(struct tsi721_device *priv)
262 {
263         u32 pw_stat;
264         u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)];
265
266
267         pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT);
268
269         if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) {
270                 pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0));
271                 pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1));
272                 pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2));
273                 pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3));
274
275                 /* Queue PW message (if there is room in FIFO),
276                  * otherwise discard it.
277                  */
278                 spin_lock(&priv->pw_fifo_lock);
279                 if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE)
280                         kfifo_in(&priv->pw_fifo, pw_buf,
281                                                 TSI721_RIO_PW_MSG_SIZE);
282                 else
283                         priv->pw_discard_count++;
284                 spin_unlock(&priv->pw_fifo_lock);
285         }
286
287         /* Clear pending PW interrupts */
288         iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
289                   priv->regs + TSI721_RIO_PW_RX_STAT);
290
291         schedule_work(&priv->pw_work);
292
293         return 0;
294 }
295
296 static void tsi721_pw_dpc(struct work_struct *work)
297 {
298         struct tsi721_device *priv = container_of(work, struct tsi721_device,
299                                                     pw_work);
300         union rio_pw_msg pwmsg;
301
302         /*
303          * Process port-write messages
304          */
305         while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)&pwmsg,
306                          TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) {
307                 /* Pass the port-write message to RIO core for processing */
308                 rio_inb_pwrite_handler(&priv->mport, &pwmsg);
309         }
310 }
311
312 /**
313  * tsi721_pw_enable - enable/disable port-write interface init
314  * @mport: Master port implementing the port write unit
315  * @enable:    1=enable; 0=disable port-write message handling
316  */
317 static int tsi721_pw_enable(struct rio_mport *mport, int enable)
318 {
319         struct tsi721_device *priv = mport->priv;
320         u32 rval;
321
322         rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE);
323
324         if (enable)
325                 rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX;
326         else
327                 rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX;
328
329         /* Clear pending PW interrupts */
330         iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
331                   priv->regs + TSI721_RIO_PW_RX_STAT);
332         /* Update enable bits */
333         iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE);
334
335         return 0;
336 }
337
338 /**
339  * tsi721_dsend - Send a RapidIO doorbell
340  * @mport: RapidIO master port info
341  * @index: ID of RapidIO interface
342  * @destid: Destination ID of target device
343  * @data: 16-bit info field of RapidIO doorbell
344  *
345  * Sends a RapidIO doorbell message. Always returns %0.
346  */
347 static int tsi721_dsend(struct rio_mport *mport, int index,
348                         u16 destid, u16 data)
349 {
350         struct tsi721_device *priv = mport->priv;
351         u32 offset;
352
353         offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) |
354                  (destid << 2);
355
356         tsi_debug(DBELL, &priv->pdev->dev,
357                   "Send Doorbell 0x%04x to destID 0x%x", data, destid);
358         iowrite16be(data, priv->odb_base + offset);
359
360         return 0;
361 }
362
363 /**
364  * tsi721_dbell_handler - Tsi721 doorbell interrupt handler
365  * @priv: tsi721 device-specific data structure
366  *
367  * Handles inbound doorbell interrupts. Copies doorbell entry from an internal
368  * buffer into DB message FIFO and schedules deferred  routine to process
369  * queued DBs.
370  */
371 static int
372 tsi721_dbell_handler(struct tsi721_device *priv)
373 {
374         u32 regval;
375
376         /* Disable IDB interrupts */
377         regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
378         regval &= ~TSI721_SR_CHINT_IDBQRCV;
379         iowrite32(regval,
380                 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
381
382         schedule_work(&priv->idb_work);
383
384         return 0;
385 }
386
387 static void tsi721_db_dpc(struct work_struct *work)
388 {
389         struct tsi721_device *priv = container_of(work, struct tsi721_device,
390                                                     idb_work);
391         struct rio_mport *mport;
392         struct rio_dbell *dbell;
393         int found = 0;
394         u32 wr_ptr, rd_ptr;
395         u64 *idb_entry;
396         u32 regval;
397         union {
398                 u64 msg;
399                 u8  bytes[8];
400         } idb;
401
402         /*
403          * Process queued inbound doorbells
404          */
405         mport = &priv->mport;
406
407         wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
408         rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
409
410         while (wr_ptr != rd_ptr) {
411                 idb_entry = (u64 *)(priv->idb_base +
412                                         (TSI721_IDB_ENTRY_SIZE * rd_ptr));
413                 rd_ptr++;
414                 rd_ptr %= IDB_QSIZE;
415                 idb.msg = *idb_entry;
416                 *idb_entry = 0;
417
418                 /* Process one doorbell */
419                 list_for_each_entry(dbell, &mport->dbells, node) {
420                         if ((dbell->res->start <= DBELL_INF(idb.bytes)) &&
421                             (dbell->res->end >= DBELL_INF(idb.bytes))) {
422                                 found = 1;
423                                 break;
424                         }
425                 }
426
427                 if (found) {
428                         dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes),
429                                     DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
430                 } else {
431                         tsi_debug(DBELL, &priv->pdev->dev,
432                                   "spurious IDB sid %2.2x tid %2.2x info %4.4x",
433                                   DBELL_SID(idb.bytes), DBELL_TID(idb.bytes),
434                                   DBELL_INF(idb.bytes));
435                 }
436
437                 wr_ptr = ioread32(priv->regs +
438                                   TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
439         }
440
441         iowrite32(rd_ptr & (IDB_QSIZE - 1),
442                 priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
443
444         /* Re-enable IDB interrupts */
445         regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
446         regval |= TSI721_SR_CHINT_IDBQRCV;
447         iowrite32(regval,
448                 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
449
450         wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
451         if (wr_ptr != rd_ptr)
452                 schedule_work(&priv->idb_work);
453 }
454
455 /**
456  * tsi721_irqhandler - Tsi721 interrupt handler
457  * @irq: Linux interrupt number
458  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
459  *
460  * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
461  * interrupt events and calls an event-specific handler(s).
462  */
463 static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
464 {
465         struct tsi721_device *priv = (struct tsi721_device *)ptr;
466         u32 dev_int;
467         u32 dev_ch_int;
468         u32 intval;
469         u32 ch_inte;
470
471         /* For MSI mode disable all device-level interrupts */
472         if (priv->flags & TSI721_USING_MSI)
473                 iowrite32(0, priv->regs + TSI721_DEV_INTE);
474
475         dev_int = ioread32(priv->regs + TSI721_DEV_INT);
476         if (!dev_int)
477                 return IRQ_NONE;
478
479         dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT);
480
481         if (dev_int & TSI721_DEV_INT_SR2PC_CH) {
482                 /* Service SR2PC Channel interrupts */
483                 if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) {
484                         /* Service Inbound Doorbell interrupt */
485                         intval = ioread32(priv->regs +
486                                                 TSI721_SR_CHINT(IDB_QUEUE));
487                         if (intval & TSI721_SR_CHINT_IDBQRCV)
488                                 tsi721_dbell_handler(priv);
489                         else
490                                 tsi_info(&priv->pdev->dev,
491                                         "Unsupported SR_CH_INT %x", intval);
492
493                         /* Clear interrupts */
494                         iowrite32(intval,
495                                 priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
496                         ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
497                 }
498         }
499
500         if (dev_int & TSI721_DEV_INT_SMSG_CH) {
501                 int ch;
502
503                 /*
504                  * Service channel interrupts from Messaging Engine
505                  */
506
507                 if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */
508                         /* Disable signaled OB MSG Channel interrupts */
509                         ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
510                         ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M);
511                         iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
512
513                         /*
514                          * Process Inbound Message interrupt for each MBOX
515                          */
516                         for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) {
517                                 if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch)))
518                                         continue;
519                                 tsi721_imsg_handler(priv, ch);
520                         }
521                 }
522
523                 if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */
524                         /* Disable signaled OB MSG Channel interrupts */
525                         ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
526                         ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M);
527                         iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
528
529                         /*
530                          * Process Outbound Message interrupts for each MBOX
531                          */
532
533                         for (ch = 0; ch < RIO_MAX_MBOX; ch++) {
534                                 if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch)))
535                                         continue;
536                                 tsi721_omsg_handler(priv, ch);
537                         }
538                 }
539         }
540
541         if (dev_int & TSI721_DEV_INT_SRIO) {
542                 /* Service SRIO MAC interrupts */
543                 intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
544                 if (intval & TSI721_RIO_EM_INT_STAT_PW_RX)
545                         tsi721_pw_handler(priv);
546         }
547
548 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
549         if (dev_int & TSI721_DEV_INT_BDMA_CH) {
550                 int ch;
551
552                 if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
553                         tsi_debug(DMA, &priv->pdev->dev,
554                                   "IRQ from DMA channel 0x%08x", dev_ch_int);
555
556                         for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
557                                 if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
558                                         continue;
559                                 tsi721_bdma_handler(&priv->bdma[ch]);
560                         }
561                 }
562         }
563 #endif
564
565         /* For MSI mode re-enable device-level interrupts */
566         if (priv->flags & TSI721_USING_MSI) {
567                 dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
568                         TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
569                 iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
570         }
571
572         return IRQ_HANDLED;
573 }
574
575 static void tsi721_interrupts_init(struct tsi721_device *priv)
576 {
577         u32 intr;
578
579         /* Enable IDB interrupts */
580         iowrite32(TSI721_SR_CHINT_ALL,
581                 priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
582         iowrite32(TSI721_SR_CHINT_IDBQRCV,
583                 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
584
585         /* Enable SRIO MAC interrupts */
586         iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
587                 priv->regs + TSI721_RIO_EM_DEV_INT_EN);
588
589         /* Enable interrupts from channels in use */
590 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
591         intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
592                 (TSI721_INT_BDMA_CHAN_M &
593                  ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
594 #else
595         intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
596 #endif
597         iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE);
598
599         if (priv->flags & TSI721_USING_MSIX)
600                 intr = TSI721_DEV_INT_SRIO;
601         else
602                 intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
603                         TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
604
605         iowrite32(intr, priv->regs + TSI721_DEV_INTE);
606         ioread32(priv->regs + TSI721_DEV_INTE);
607 }
608
609 #ifdef CONFIG_PCI_MSI
610 /**
611  * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging
612  * @irq: Linux interrupt number
613  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
614  *
615  * Handles outbound messaging interrupts signaled using MSI-X.
616  */
617 static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
618 {
619         struct tsi721_device *priv = (struct tsi721_device *)ptr;
620         int mbox;
621
622         mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX;
623         tsi721_omsg_handler(priv, mbox);
624         return IRQ_HANDLED;
625 }
626
627 /**
628  * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging
629  * @irq: Linux interrupt number
630  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
631  *
632  * Handles inbound messaging interrupts signaled using MSI-X.
633  */
634 static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
635 {
636         struct tsi721_device *priv = (struct tsi721_device *)ptr;
637         int mbox;
638
639         mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX;
640         tsi721_imsg_handler(priv, mbox + 4);
641         return IRQ_HANDLED;
642 }
643
644 /**
645  * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler
646  * @irq: Linux interrupt number
647  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
648  *
649  * Handles Tsi721 interrupts from SRIO MAC.
650  */
651 static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
652 {
653         struct tsi721_device *priv = (struct tsi721_device *)ptr;
654         u32 srio_int;
655
656         /* Service SRIO MAC interrupts */
657         srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
658         if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX)
659                 tsi721_pw_handler(priv);
660
661         return IRQ_HANDLED;
662 }
663
664 /**
665  * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler
666  * @irq: Linux interrupt number
667  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
668  *
669  * Handles Tsi721 interrupts from SR2PC Channel.
670  * NOTE: At this moment services only one SR2PC channel associated with inbound
671  * doorbells.
672  */
673 static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
674 {
675         struct tsi721_device *priv = (struct tsi721_device *)ptr;
676         u32 sr_ch_int;
677
678         /* Service Inbound DB interrupt from SR2PC channel */
679         sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
680         if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV)
681                 tsi721_dbell_handler(priv);
682
683         /* Clear interrupts */
684         iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
685         /* Read back to ensure that interrupt was cleared */
686         sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
687
688         return IRQ_HANDLED;
689 }
690
691 /**
692  * tsi721_request_msix - register interrupt service for MSI-X mode.
693  * @priv: tsi721 device-specific data structure
694  *
695  * Registers MSI-X interrupt service routines for interrupts that are active
696  * immediately after mport initialization. Messaging interrupt service routines
697  * should be registered during corresponding open requests.
698  */
699 static int tsi721_request_msix(struct tsi721_device *priv)
700 {
701         int err = 0;
702
703         err = request_irq(priv->msix[TSI721_VECT_IDB].vector,
704                         tsi721_sr2pc_ch_msix, 0,
705                         priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv);
706         if (err)
707                 return err;
708
709         err = request_irq(priv->msix[TSI721_VECT_PWRX].vector,
710                         tsi721_srio_msix, 0,
711                         priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv);
712         if (err) {
713                 free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
714                 return err;
715         }
716
717         return 0;
718 }
719
720 /**
721  * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721.
722  * @priv: pointer to tsi721 private data
723  *
724  * Configures MSI-X support for Tsi721. Supports only an exact number
725  * of requested vectors.
726  */
727 static int tsi721_enable_msix(struct tsi721_device *priv)
728 {
729         struct msix_entry entries[TSI721_VECT_MAX];
730         int err;
731         int i;
732
733         entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE);
734         entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT;
735
736         /*
737          * Initialize MSI-X entries for Messaging Engine:
738          * this driver supports four RIO mailboxes (inbound and outbound)
739          * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore
740          * offset +4 is added to IB MBOX number.
741          */
742         for (i = 0; i < RIO_MAX_MBOX; i++) {
743                 entries[TSI721_VECT_IMB0_RCV + i].entry =
744                                         TSI721_MSIX_IMSG_DQ_RCV(i + 4);
745                 entries[TSI721_VECT_IMB0_INT + i].entry =
746                                         TSI721_MSIX_IMSG_INT(i + 4);
747                 entries[TSI721_VECT_OMB0_DONE + i].entry =
748                                         TSI721_MSIX_OMSG_DONE(i);
749                 entries[TSI721_VECT_OMB0_INT + i].entry =
750                                         TSI721_MSIX_OMSG_INT(i);
751         }
752
753 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
754         /*
755          * Initialize MSI-X entries for Block DMA Engine:
756          * this driver supports XXX DMA channels
757          * (one is reserved for SRIO maintenance transactions)
758          */
759         for (i = 0; i < TSI721_DMA_CHNUM; i++) {
760                 entries[TSI721_VECT_DMA0_DONE + i].entry =
761                                         TSI721_MSIX_DMACH_DONE(i);
762                 entries[TSI721_VECT_DMA0_INT + i].entry =
763                                         TSI721_MSIX_DMACH_INT(i);
764         }
765 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
766
767         err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries));
768         if (err) {
769                 tsi_err(&priv->pdev->dev,
770                         "Failed to enable MSI-X (err=%d)", err);
771                 return err;
772         }
773
774         /*
775          * Copy MSI-X vector information into tsi721 private structure
776          */
777         priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector;
778         snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX,
779                  DRV_NAME "-idb@pci:%s", pci_name(priv->pdev));
780         priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector;
781         snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX,
782                  DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev));
783
784         for (i = 0; i < RIO_MAX_MBOX; i++) {
785                 priv->msix[TSI721_VECT_IMB0_RCV + i].vector =
786                                 entries[TSI721_VECT_IMB0_RCV + i].vector;
787                 snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name,
788                          IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s",
789                          i, pci_name(priv->pdev));
790
791                 priv->msix[TSI721_VECT_IMB0_INT + i].vector =
792                                 entries[TSI721_VECT_IMB0_INT + i].vector;
793                 snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name,
794                          IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s",
795                          i, pci_name(priv->pdev));
796
797                 priv->msix[TSI721_VECT_OMB0_DONE + i].vector =
798                                 entries[TSI721_VECT_OMB0_DONE + i].vector;
799                 snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name,
800                          IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s",
801                          i, pci_name(priv->pdev));
802
803                 priv->msix[TSI721_VECT_OMB0_INT + i].vector =
804                                 entries[TSI721_VECT_OMB0_INT + i].vector;
805                 snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name,
806                          IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s",
807                          i, pci_name(priv->pdev));
808         }
809
810 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
811         for (i = 0; i < TSI721_DMA_CHNUM; i++) {
812                 priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
813                                 entries[TSI721_VECT_DMA0_DONE + i].vector;
814                 snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
815                          IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
816                          i, pci_name(priv->pdev));
817
818                 priv->msix[TSI721_VECT_DMA0_INT + i].vector =
819                                 entries[TSI721_VECT_DMA0_INT + i].vector;
820                 snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
821                          IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
822                          i, pci_name(priv->pdev));
823         }
824 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
825
826         return 0;
827 }
828 #endif /* CONFIG_PCI_MSI */
829
830 static int tsi721_request_irq(struct tsi721_device *priv)
831 {
832         int err;
833
834 #ifdef CONFIG_PCI_MSI
835         if (priv->flags & TSI721_USING_MSIX)
836                 err = tsi721_request_msix(priv);
837         else
838 #endif
839                 err = request_irq(priv->pdev->irq, tsi721_irqhandler,
840                           (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED,
841                           DRV_NAME, (void *)priv);
842
843         if (err)
844                 tsi_err(&priv->pdev->dev,
845                         "Unable to allocate interrupt, err=%d", err);
846
847         return err;
848 }
849
850 static void tsi721_free_irq(struct tsi721_device *priv)
851 {
852 #ifdef CONFIG_PCI_MSI
853         if (priv->flags & TSI721_USING_MSIX) {
854                 free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
855                 free_irq(priv->msix[TSI721_VECT_PWRX].vector, (void *)priv);
856         } else
857 #endif
858         free_irq(priv->pdev->irq, (void *)priv);
859 }
860
861 static int
862 tsi721_obw_alloc(struct tsi721_device *priv, struct tsi721_obw_bar *pbar,
863                  u32 size, int *win_id)
864 {
865         u64 win_base;
866         u64 bar_base;
867         u64 bar_end;
868         u32 align;
869         struct tsi721_ob_win *win;
870         struct tsi721_ob_win *new_win = NULL;
871         int new_win_idx = -1;
872         int i = 0;
873
874         bar_base = pbar->base;
875         bar_end =  bar_base + pbar->size;
876         win_base = bar_base;
877         align = size/TSI721_PC2SR_ZONES;
878
879         while (i < TSI721_IBWIN_NUM) {
880                 for (i = 0; i < TSI721_IBWIN_NUM; i++) {
881                         if (!priv->ob_win[i].active) {
882                                 if (new_win == NULL) {
883                                         new_win = &priv->ob_win[i];
884                                         new_win_idx = i;
885                                 }
886                                 continue;
887                         }
888
889                         /*
890                          * If this window belongs to the current BAR check it
891                          * for overlap
892                          */
893                         win = &priv->ob_win[i];
894
895                         if (win->base >= bar_base && win->base < bar_end) {
896                                 if (win_base < (win->base + win->size) &&
897                                                 (win_base + size) > win->base) {
898                                         /* Overlap detected */
899                                         win_base = win->base + win->size;
900                                         win_base = ALIGN(win_base, align);
901                                         break;
902                                 }
903                         }
904                 }
905         }
906
907         if (win_base + size > bar_end)
908                 return -ENOMEM;
909
910         if (!new_win) {
911                 tsi_err(&priv->pdev->dev, "OBW count tracking failed");
912                 return -EIO;
913         }
914
915         new_win->active = true;
916         new_win->base = win_base;
917         new_win->size = size;
918         new_win->pbar = pbar;
919         priv->obwin_cnt--;
920         pbar->free -= size;
921         *win_id = new_win_idx;
922         return 0;
923 }
924
925 static int tsi721_map_outb_win(struct rio_mport *mport, u16 destid, u64 rstart,
926                         u32 size, u32 flags, dma_addr_t *laddr)
927 {
928         struct tsi721_device *priv = mport->priv;
929         int i;
930         struct tsi721_obw_bar *pbar;
931         struct tsi721_ob_win *ob_win;
932         int obw = -1;
933         u32 rval;
934         u64 rio_addr;
935         u32 zsize;
936         int ret = -ENOMEM;
937
938         tsi_debug(OBW, &priv->pdev->dev,
939                   "did=%d ra=0x%llx sz=0x%x", destid, rstart, size);
940
941         if (!is_power_of_2(size) || (size < 0x8000) || (rstart & (size - 1)))
942                 return -EINVAL;
943
944         if (priv->obwin_cnt == 0)
945                 return -EBUSY;
946
947         for (i = 0; i < 2; i++) {
948                 if (priv->p2r_bar[i].free >= size) {
949                         pbar = &priv->p2r_bar[i];
950                         ret = tsi721_obw_alloc(priv, pbar, size, &obw);
951                         if (!ret)
952                                 break;
953                 }
954         }
955
956         if (ret)
957                 return ret;
958
959         WARN_ON(obw == -1);
960         ob_win = &priv->ob_win[obw];
961         ob_win->destid = destid;
962         ob_win->rstart = rstart;
963         tsi_debug(OBW, &priv->pdev->dev,
964                   "allocated OBW%d @%llx", obw, ob_win->base);
965
966         /*
967          * Configure Outbound Window
968          */
969
970         zsize = size/TSI721_PC2SR_ZONES;
971         rio_addr = rstart;
972
973         /*
974          * Program Address Translation Zones:
975          *  This implementation uses all 8 zones associated wit window.
976          */
977         for (i = 0; i < TSI721_PC2SR_ZONES; i++) {
978
979                 while (ioread32(priv->regs + TSI721_ZONE_SEL) &
980                         TSI721_ZONE_SEL_GO) {
981                         udelay(1);
982                 }
983
984                 rval = (u32)(rio_addr & TSI721_LUT_DATA0_ADD) |
985                         TSI721_LUT_DATA0_NREAD | TSI721_LUT_DATA0_NWR;
986                 iowrite32(rval, priv->regs + TSI721_LUT_DATA0);
987                 rval = (u32)(rio_addr >> 32);
988                 iowrite32(rval, priv->regs + TSI721_LUT_DATA1);
989                 rval = destid;
990                 iowrite32(rval, priv->regs + TSI721_LUT_DATA2);
991
992                 rval = TSI721_ZONE_SEL_GO | (obw << 3) | i;
993                 iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
994
995                 rio_addr += zsize;
996         }
997
998         iowrite32(TSI721_OBWIN_SIZE(size) << 8,
999                   priv->regs + TSI721_OBWINSZ(obw));
1000         iowrite32((u32)(ob_win->base >> 32), priv->regs + TSI721_OBWINUB(obw));
1001         iowrite32((u32)(ob_win->base & TSI721_OBWINLB_BA) | TSI721_OBWINLB_WEN,
1002                   priv->regs + TSI721_OBWINLB(obw));
1003
1004         *laddr = ob_win->base;
1005         return 0;
1006 }
1007
1008 static void tsi721_unmap_outb_win(struct rio_mport *mport,
1009                                   u16 destid, u64 rstart)
1010 {
1011         struct tsi721_device *priv = mport->priv;
1012         struct tsi721_ob_win *ob_win;
1013         int i;
1014
1015         tsi_debug(OBW, &priv->pdev->dev, "did=%d ra=0x%llx", destid, rstart);
1016
1017         for (i = 0; i < TSI721_OBWIN_NUM; i++) {
1018                 ob_win = &priv->ob_win[i];
1019
1020                 if (ob_win->active &&
1021                     ob_win->destid == destid && ob_win->rstart == rstart) {
1022                         tsi_debug(OBW, &priv->pdev->dev,
1023                                   "free OBW%d @%llx", i, ob_win->base);
1024                         ob_win->active = false;
1025                         iowrite32(0, priv->regs + TSI721_OBWINLB(i));
1026                         ob_win->pbar->free += ob_win->size;
1027                         priv->obwin_cnt++;
1028                         break;
1029                 }
1030         }
1031 }
1032
1033 /**
1034  * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO)
1035  * translation regions.
1036  * @priv: pointer to tsi721 private data
1037  *
1038  * Disables SREP translation regions.
1039  */
1040 static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
1041 {
1042         int i, z;
1043         u32 rval;
1044
1045         /* Disable all PC2SR translation windows */
1046         for (i = 0; i < TSI721_OBWIN_NUM; i++)
1047                 iowrite32(0, priv->regs + TSI721_OBWINLB(i));
1048
1049         /* Initialize zone lookup tables to avoid ECC errors on reads */
1050         iowrite32(0, priv->regs + TSI721_LUT_DATA0);
1051         iowrite32(0, priv->regs + TSI721_LUT_DATA1);
1052         iowrite32(0, priv->regs + TSI721_LUT_DATA2);
1053
1054         for (i = 0; i < TSI721_OBWIN_NUM; i++) {
1055                 for (z = 0; z < TSI721_PC2SR_ZONES; z++) {
1056                         while (ioread32(priv->regs + TSI721_ZONE_SEL) &
1057                                 TSI721_ZONE_SEL_GO) {
1058                                 udelay(1);
1059                         }
1060                         rval = TSI721_ZONE_SEL_GO | (i << 3) | z;
1061                         iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
1062                 }
1063         }
1064
1065         if (priv->p2r_bar[0].size == 0 && priv->p2r_bar[1].size == 0) {
1066                 priv->obwin_cnt = 0;
1067                 return;
1068         }
1069
1070         priv->p2r_bar[0].free = priv->p2r_bar[0].size;
1071         priv->p2r_bar[1].free = priv->p2r_bar[1].size;
1072
1073         for (i = 0; i < TSI721_OBWIN_NUM; i++)
1074                 priv->ob_win[i].active = false;
1075
1076         priv->obwin_cnt = TSI721_OBWIN_NUM;
1077 }
1078
1079 /**
1080  * tsi721_rio_map_inb_mem -- Mapping inbound memory region.
1081  * @mport: RapidIO master port
1082  * @lstart: Local memory space start address.
1083  * @rstart: RapidIO space start address.
1084  * @size: The mapping region size.
1085  * @flags: Flags for mapping. 0 for using default flags.
1086  *
1087  * Return: 0 -- Success.
1088  *
1089  * This function will create the inbound mapping
1090  * from rstart to lstart.
1091  */
1092 static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
1093                 u64 rstart, u64 size, u32 flags)
1094 {
1095         struct tsi721_device *priv = mport->priv;
1096         int i, avail = -1;
1097         u32 regval;
1098         struct tsi721_ib_win *ib_win;
1099         bool direct = (lstart == rstart);
1100         u64 ibw_size;
1101         dma_addr_t loc_start;
1102         u64 ibw_start;
1103         struct tsi721_ib_win_mapping *map = NULL;
1104         int ret = -EBUSY;
1105
1106         /* Max IBW size supported by HW is 16GB */
1107         if (size > 0x400000000UL)
1108                 return -EINVAL;
1109
1110         if (direct) {
1111                 /* Calculate minimal acceptable window size and base address */
1112
1113                 ibw_size = roundup_pow_of_two(size);
1114                 ibw_start = lstart & ~(ibw_size - 1);
1115
1116                 tsi_debug(IBW, &priv->pdev->dev,
1117                         "Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx",
1118                         rstart, &lstart, size, ibw_start);
1119
1120                 while ((lstart + size) > (ibw_start + ibw_size)) {
1121                         ibw_size *= 2;
1122                         ibw_start = lstart & ~(ibw_size - 1);
1123                         /* Check for crossing IBW max size 16GB */
1124                         if (ibw_size > 0x400000000UL)
1125                                 return -EBUSY;
1126                 }
1127
1128                 loc_start = ibw_start;
1129
1130                 map = kzalloc(sizeof(struct tsi721_ib_win_mapping), GFP_ATOMIC);
1131                 if (map == NULL)
1132                         return -ENOMEM;
1133
1134         } else {
1135                 tsi_debug(IBW, &priv->pdev->dev,
1136                         "Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
1137                         rstart, &lstart, size);
1138
1139                 if (!is_power_of_2(size) || size < 0x1000 ||
1140                     ((u64)lstart & (size - 1)) || (rstart & (size - 1)))
1141                         return -EINVAL;
1142                 if (priv->ibwin_cnt == 0)
1143                         return -EBUSY;
1144                 ibw_start = rstart;
1145                 ibw_size = size;
1146                 loc_start = lstart;
1147         }
1148
1149         /*
1150          * Scan for overlapping with active regions and mark the first available
1151          * IB window at the same time.
1152          */
1153         for (i = 0; i < TSI721_IBWIN_NUM; i++) {
1154                 ib_win = &priv->ib_win[i];
1155
1156                 if (!ib_win->active) {
1157                         if (avail == -1) {
1158                                 avail = i;
1159                                 ret = 0;
1160                         }
1161                 } else if (ibw_start < (ib_win->rstart + ib_win->size) &&
1162                            (ibw_start + ibw_size) > ib_win->rstart) {
1163                         /* Return error if address translation involved */
1164                         if (!direct || ib_win->xlat) {
1165                                 ret = -EFAULT;
1166                                 break;
1167                         }
1168
1169                         /*
1170                          * Direct mappings usually are larger than originally
1171                          * requested fragments - check if this new request fits
1172                          * into it.
1173                          */
1174                         if (rstart >= ib_win->rstart &&
1175                             (rstart + size) <= (ib_win->rstart +
1176                                                         ib_win->size)) {
1177                                 /* We are in - no further mapping required */
1178                                 map->lstart = lstart;
1179                                 list_add_tail(&map->node, &ib_win->mappings);
1180                                 return 0;
1181                         }
1182
1183                         ret = -EFAULT;
1184                         break;
1185                 }
1186         }
1187
1188         if (ret)
1189                 goto out;
1190         i = avail;
1191
1192         /* Sanity check: available IB window must be disabled at this point */
1193         regval = ioread32(priv->regs + TSI721_IBWIN_LB(i));
1194         if (WARN_ON(regval & TSI721_IBWIN_LB_WEN)) {
1195                 ret = -EIO;
1196                 goto out;
1197         }
1198
1199         ib_win = &priv->ib_win[i];
1200         ib_win->active = true;
1201         ib_win->rstart = ibw_start;
1202         ib_win->lstart = loc_start;
1203         ib_win->size = ibw_size;
1204         ib_win->xlat = (lstart != rstart);
1205         INIT_LIST_HEAD(&ib_win->mappings);
1206
1207         /*
1208          * When using direct IBW mapping and have larger than requested IBW size
1209          * we can have multiple local memory blocks mapped through the same IBW
1210          * To handle this situation we maintain list of "clients" for such IBWs.
1211          */
1212         if (direct) {
1213                 map->lstart = lstart;
1214                 list_add_tail(&map->node, &ib_win->mappings);
1215         }
1216
1217         iowrite32(TSI721_IBWIN_SIZE(ibw_size) << 8,
1218                         priv->regs + TSI721_IBWIN_SZ(i));
1219
1220         iowrite32(((u64)loc_start >> 32), priv->regs + TSI721_IBWIN_TUA(i));
1221         iowrite32(((u64)loc_start & TSI721_IBWIN_TLA_ADD),
1222                   priv->regs + TSI721_IBWIN_TLA(i));
1223
1224         iowrite32(ibw_start >> 32, priv->regs + TSI721_IBWIN_UB(i));
1225         iowrite32((ibw_start & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN,
1226                 priv->regs + TSI721_IBWIN_LB(i));
1227
1228         priv->ibwin_cnt--;
1229
1230         tsi_debug(IBW, &priv->pdev->dev,
1231                 "Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
1232                 i, ibw_start, &loc_start, ibw_size);
1233
1234         return 0;
1235 out:
1236         kfree(map);
1237         return ret;
1238 }
1239
1240 /**
1241  * tsi721_rio_unmap_inb_mem -- Unmapping inbound memory region.
1242  * @mport: RapidIO master port
1243  * @lstart: Local memory space start address.
1244  */
1245 static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport,
1246                                 dma_addr_t lstart)
1247 {
1248         struct tsi721_device *priv = mport->priv;
1249         struct tsi721_ib_win *ib_win;
1250         int i;
1251
1252         tsi_debug(IBW, &priv->pdev->dev,
1253                 "Unmap IBW mapped to PCIe_%pad", &lstart);
1254
1255         /* Search for matching active inbound translation window */
1256         for (i = 0; i < TSI721_IBWIN_NUM; i++) {
1257                 ib_win = &priv->ib_win[i];
1258
1259                 /* Address translating IBWs must to be an exact march */
1260                 if (!ib_win->active ||
1261                     (ib_win->xlat && lstart != ib_win->lstart))
1262                         continue;
1263
1264                 if (lstart >= ib_win->lstart &&
1265                     lstart < (ib_win->lstart + ib_win->size)) {
1266
1267                         if (!ib_win->xlat) {
1268                                 struct tsi721_ib_win_mapping *map;
1269                                 int found = 0;
1270
1271                                 list_for_each_entry(map,
1272                                                     &ib_win->mappings, node) {
1273                                         if (map->lstart == lstart) {
1274                                                 list_del(&map->node);
1275                                                 kfree(map);
1276                                                 found = 1;
1277                                                 break;
1278                                         }
1279                                 }
1280
1281                                 if (!found)
1282                                         continue;
1283
1284                                 if (!list_empty(&ib_win->mappings))
1285                                         break;
1286                         }
1287
1288                         tsi_debug(IBW, &priv->pdev->dev, "Disable IBWIN_%d", i);
1289                         iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
1290                         ib_win->active = false;
1291                         priv->ibwin_cnt++;
1292                         break;
1293                 }
1294         }
1295
1296         if (i == TSI721_IBWIN_NUM)
1297                 tsi_debug(IBW, &priv->pdev->dev,
1298                         "IB window mapped to %pad not found", &lstart);
1299 }
1300
1301 /**
1302  * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe)
1303  * translation regions.
1304  * @priv: pointer to tsi721 private data
1305  *
1306  * Disables inbound windows.
1307  */
1308 static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv)
1309 {
1310         int i;
1311
1312         /* Disable all SR2PC inbound windows */
1313         for (i = 0; i < TSI721_IBWIN_NUM; i++)
1314                 iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
1315         priv->ibwin_cnt = TSI721_IBWIN_NUM;
1316 }
1317
1318 /*
1319  * tsi721_close_sr2pc_mapping - closes all active inbound (SRIO->PCIe)
1320  * translation regions.
1321  * @priv: pointer to tsi721 device private data
1322  */
1323 static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv)
1324 {
1325         struct tsi721_ib_win *ib_win;
1326         int i;
1327
1328         /* Disable all active SR2PC inbound windows */
1329         for (i = 0; i < TSI721_IBWIN_NUM; i++) {
1330                 ib_win = &priv->ib_win[i];
1331                 if (ib_win->active) {
1332                         iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
1333                         ib_win->active = false;
1334                 }
1335         }
1336 }
1337
1338 /**
1339  * tsi721_port_write_init - Inbound port write interface init
1340  * @priv: pointer to tsi721 private data
1341  *
1342  * Initializes inbound port write handler.
1343  * Returns %0 on success or %-ENOMEM on failure.
1344  */
1345 static int tsi721_port_write_init(struct tsi721_device *priv)
1346 {
1347         priv->pw_discard_count = 0;
1348         INIT_WORK(&priv->pw_work, tsi721_pw_dpc);
1349         spin_lock_init(&priv->pw_fifo_lock);
1350         if (kfifo_alloc(&priv->pw_fifo,
1351                         TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
1352                 tsi_err(&priv->pdev->dev, "PW FIFO allocation failed");
1353                 return -ENOMEM;
1354         }
1355
1356         /* Use reliable port-write capture mode */
1357         iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL);
1358         return 0;
1359 }
1360
1361 static void tsi721_port_write_free(struct tsi721_device *priv)
1362 {
1363         kfifo_free(&priv->pw_fifo);
1364 }
1365
1366 static int tsi721_doorbell_init(struct tsi721_device *priv)
1367 {
1368         /* Outbound Doorbells do not require any setup.
1369          * Tsi721 uses dedicated PCI BAR1 to generate doorbells.
1370          * That BAR1 was mapped during the probe routine.
1371          */
1372
1373         /* Initialize Inbound Doorbell processing DPC and queue */
1374         priv->db_discard_count = 0;
1375         INIT_WORK(&priv->idb_work, tsi721_db_dpc);
1376
1377         /* Allocate buffer for inbound doorbells queue */
1378         priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
1379                                 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
1380                                 &priv->idb_dma, GFP_KERNEL);
1381         if (!priv->idb_base)
1382                 return -ENOMEM;
1383
1384         tsi_debug(DBELL, &priv->pdev->dev,
1385                   "Allocated IDB buffer @ %p (phys = %pad)",
1386                   priv->idb_base, &priv->idb_dma);
1387
1388         iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE),
1389                 priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE));
1390         iowrite32(((u64)priv->idb_dma >> 32),
1391                 priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE));
1392         iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR),
1393                 priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE));
1394         /* Enable accepting all inbound doorbells */
1395         iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE));
1396
1397         iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE));
1398
1399         iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
1400
1401         return 0;
1402 }
1403
1404 static void tsi721_doorbell_free(struct tsi721_device *priv)
1405 {
1406         if (priv->idb_base == NULL)
1407                 return;
1408
1409         /* Free buffer allocated for inbound doorbell queue */
1410         dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
1411                           priv->idb_base, priv->idb_dma);
1412         priv->idb_base = NULL;
1413 }
1414
1415 /**
1416  * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
1417  * @priv: pointer to tsi721 private data
1418  *
1419  * Initialize BDMA channel allocated for RapidIO maintenance read/write
1420  * request generation
1421  * Returns %0 on success or %-ENOMEM on failure.
1422  */
1423 static int tsi721_bdma_maint_init(struct tsi721_device *priv)
1424 {
1425         struct tsi721_dma_desc *bd_ptr;
1426         u64             *sts_ptr;
1427         dma_addr_t      bd_phys, sts_phys;
1428         int             sts_size;
1429         int             bd_num = 2;
1430         void __iomem    *regs;
1431
1432         tsi_debug(MAINT, &priv->pdev->dev,
1433                   "Init BDMA_%d Maintenance requests", TSI721_DMACH_MAINT);
1434
1435         /*
1436          * Initialize DMA channel for maintenance requests
1437          */
1438
1439         priv->mdma.ch_id = TSI721_DMACH_MAINT;
1440         regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
1441
1442         /* Allocate space for DMA descriptors */
1443         bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
1444                                         bd_num * sizeof(struct tsi721_dma_desc),
1445                                         &bd_phys, GFP_KERNEL);
1446         if (!bd_ptr)
1447                 return -ENOMEM;
1448
1449         priv->mdma.bd_num = bd_num;
1450         priv->mdma.bd_phys = bd_phys;
1451         priv->mdma.bd_base = bd_ptr;
1452
1453         tsi_debug(MAINT, &priv->pdev->dev, "DMA descriptors @ %p (phys = %pad)",
1454                   bd_ptr, &bd_phys);
1455
1456         /* Allocate space for descriptor status FIFO */
1457         sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
1458                                         bd_num : TSI721_DMA_MINSTSSZ;
1459         sts_size = roundup_pow_of_two(sts_size);
1460         sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
1461                                      sts_size * sizeof(struct tsi721_dma_sts),
1462                                      &sts_phys, GFP_KERNEL);
1463         if (!sts_ptr) {
1464                 /* Free space allocated for DMA descriptors */
1465                 dma_free_coherent(&priv->pdev->dev,
1466                                   bd_num * sizeof(struct tsi721_dma_desc),
1467                                   bd_ptr, bd_phys);
1468                 priv->mdma.bd_base = NULL;
1469                 return -ENOMEM;
1470         }
1471
1472         priv->mdma.sts_phys = sts_phys;
1473         priv->mdma.sts_base = sts_ptr;
1474         priv->mdma.sts_size = sts_size;
1475
1476         tsi_debug(MAINT, &priv->pdev->dev,
1477                 "desc status FIFO @ %p (phys = %pad) size=0x%x",
1478                 sts_ptr, &sts_phys, sts_size);
1479
1480         /* Initialize DMA descriptors ring */
1481         bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
1482         bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
1483                                                  TSI721_DMAC_DPTRL_MASK);
1484         bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
1485
1486         /* Setup DMA descriptor pointers */
1487         iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH);
1488         iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
1489                 regs + TSI721_DMAC_DPTRL);
1490
1491         /* Setup descriptor status FIFO */
1492         iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
1493         iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
1494                 regs + TSI721_DMAC_DSBL);
1495         iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
1496                 regs + TSI721_DMAC_DSSZ);
1497
1498         /* Clear interrupt bits */
1499         iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
1500
1501         ioread32(regs + TSI721_DMAC_INT);
1502
1503         /* Toggle DMA channel initialization */
1504         iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
1505         ioread32(regs + TSI721_DMAC_CTL);
1506         udelay(10);
1507
1508         return 0;
1509 }
1510
1511 static int tsi721_bdma_maint_free(struct tsi721_device *priv)
1512 {
1513         u32 ch_stat;
1514         struct tsi721_bdma_maint *mdma = &priv->mdma;
1515         void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
1516
1517         if (mdma->bd_base == NULL)
1518                 return 0;
1519
1520         /* Check if DMA channel still running */
1521         ch_stat = ioread32(regs + TSI721_DMAC_STS);
1522         if (ch_stat & TSI721_DMAC_STS_RUN)
1523                 return -EFAULT;
1524
1525         /* Put DMA channel into init state */
1526         iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
1527
1528         /* Free space allocated for DMA descriptors */
1529         dma_free_coherent(&priv->pdev->dev,
1530                 mdma->bd_num * sizeof(struct tsi721_dma_desc),
1531                 mdma->bd_base, mdma->bd_phys);
1532         mdma->bd_base = NULL;
1533
1534         /* Free space allocated for status FIFO */
1535         dma_free_coherent(&priv->pdev->dev,
1536                 mdma->sts_size * sizeof(struct tsi721_dma_sts),
1537                 mdma->sts_base, mdma->sts_phys);
1538         mdma->sts_base = NULL;
1539         return 0;
1540 }
1541
1542 /* Enable Inbound Messaging Interrupts */
1543 static void
1544 tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
1545                                   u32 inte_mask)
1546 {
1547         u32 rval;
1548
1549         if (!inte_mask)
1550                 return;
1551
1552         /* Clear pending Inbound Messaging interrupts */
1553         iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
1554
1555         /* Enable Inbound Messaging interrupts */
1556         rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
1557         iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch));
1558
1559         if (priv->flags & TSI721_USING_MSIX)
1560                 return; /* Finished if we are in MSI-X mode */
1561
1562         /*
1563          * For MSI and INTA interrupt signalling we need to enable next levels
1564          */
1565
1566         /* Enable Device Channel Interrupt */
1567         rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1568         iowrite32(rval | TSI721_INT_IMSG_CHAN(ch),
1569                   priv->regs + TSI721_DEV_CHAN_INTE);
1570 }
1571
1572 /* Disable Inbound Messaging Interrupts */
1573 static void
1574 tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch,
1575                                    u32 inte_mask)
1576 {
1577         u32 rval;
1578
1579         if (!inte_mask)
1580                 return;
1581
1582         /* Clear pending Inbound Messaging interrupts */
1583         iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
1584
1585         /* Disable Inbound Messaging interrupts */
1586         rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
1587         rval &= ~inte_mask;
1588         iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch));
1589
1590         if (priv->flags & TSI721_USING_MSIX)
1591                 return; /* Finished if we are in MSI-X mode */
1592
1593         /*
1594          * For MSI and INTA interrupt signalling we need to disable next levels
1595          */
1596
1597         /* Disable Device Channel Interrupt */
1598         rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1599         rval &= ~TSI721_INT_IMSG_CHAN(ch);
1600         iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
1601 }
1602
1603 /* Enable Outbound Messaging interrupts */
1604 static void
1605 tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch,
1606                                   u32 inte_mask)
1607 {
1608         u32 rval;
1609
1610         if (!inte_mask)
1611                 return;
1612
1613         /* Clear pending Outbound Messaging interrupts */
1614         iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
1615
1616         /* Enable Outbound Messaging channel interrupts */
1617         rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
1618         iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch));
1619
1620         if (priv->flags & TSI721_USING_MSIX)
1621                 return; /* Finished if we are in MSI-X mode */
1622
1623         /*
1624          * For MSI and INTA interrupt signalling we need to enable next levels
1625          */
1626
1627         /* Enable Device Channel Interrupt */
1628         rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1629         iowrite32(rval | TSI721_INT_OMSG_CHAN(ch),
1630                   priv->regs + TSI721_DEV_CHAN_INTE);
1631 }
1632
1633 /* Disable Outbound Messaging interrupts */
1634 static void
1635 tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch,
1636                                    u32 inte_mask)
1637 {
1638         u32 rval;
1639
1640         if (!inte_mask)
1641                 return;
1642
1643         /* Clear pending Outbound Messaging interrupts */
1644         iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
1645
1646         /* Disable Outbound Messaging interrupts */
1647         rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
1648         rval &= ~inte_mask;
1649         iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch));
1650
1651         if (priv->flags & TSI721_USING_MSIX)
1652                 return; /* Finished if we are in MSI-X mode */
1653
1654         /*
1655          * For MSI and INTA interrupt signalling we need to disable next levels
1656          */
1657
1658         /* Disable Device Channel Interrupt */
1659         rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1660         rval &= ~TSI721_INT_OMSG_CHAN(ch);
1661         iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
1662 }
1663
1664 /**
1665  * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue
1666  * @mport: Master port with outbound message queue
1667  * @rdev: Target of outbound message
1668  * @mbox: Outbound mailbox
1669  * @buffer: Message to add to outbound queue
1670  * @len: Length of message
1671  */
1672 static int
1673 tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
1674                         void *buffer, size_t len)
1675 {
1676         struct tsi721_device *priv = mport->priv;
1677         struct tsi721_omsg_desc *desc;
1678         u32 tx_slot;
1679         unsigned long flags;
1680
1681         if (!priv->omsg_init[mbox] ||
1682             len > TSI721_MSG_MAX_SIZE || len < 8)
1683                 return -EINVAL;
1684
1685         spin_lock_irqsave(&priv->omsg_ring[mbox].lock, flags);
1686
1687         tx_slot = priv->omsg_ring[mbox].tx_slot;
1688
1689         /* Copy copy message into transfer buffer */
1690         memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len);
1691
1692         if (len & 0x7)
1693                 len += 8;
1694
1695         /* Build descriptor associated with buffer */
1696         desc = priv->omsg_ring[mbox].omd_base;
1697         desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid);
1698 #ifdef TSI721_OMSG_DESC_INT
1699         /* Request IOF_DONE interrupt generation for each N-th frame in queue */
1700         if (tx_slot % 4 == 0)
1701                 desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF);
1702 #endif
1703         desc[tx_slot].msg_info =
1704                 cpu_to_le32((mport->sys_size << 26) | (mbox << 22) |
1705                             (0xe << 12) | (len & 0xff8));
1706         desc[tx_slot].bufptr_lo =
1707                 cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] &
1708                             0xffffffff);
1709         desc[tx_slot].bufptr_hi =
1710                 cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32);
1711
1712         priv->omsg_ring[mbox].wr_count++;
1713
1714         /* Go to next descriptor */
1715         if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) {
1716                 priv->omsg_ring[mbox].tx_slot = 0;
1717                 /* Move through the ring link descriptor at the end */
1718                 priv->omsg_ring[mbox].wr_count++;
1719         }
1720
1721         mb();
1722
1723         /* Set new write count value */
1724         iowrite32(priv->omsg_ring[mbox].wr_count,
1725                 priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1726         ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1727
1728         spin_unlock_irqrestore(&priv->omsg_ring[mbox].lock, flags);
1729
1730         return 0;
1731 }
1732
1733 /**
1734  * tsi721_omsg_handler - Outbound Message Interrupt Handler
1735  * @priv: pointer to tsi721 private data
1736  * @ch:   number of OB MSG channel to service
1737  *
1738  * Services channel interrupts from outbound messaging engine.
1739  */
1740 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
1741 {
1742         u32 omsg_int;
1743         struct rio_mport *mport = &priv->mport;
1744         void *dev_id = NULL;
1745         u32 tx_slot = 0xffffffff;
1746         int do_callback = 0;
1747
1748         spin_lock(&priv->omsg_ring[ch].lock);
1749
1750         omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch));
1751
1752         if (omsg_int & TSI721_OBDMAC_INT_ST_FULL)
1753                 tsi_info(&priv->pdev->dev,
1754                         "OB MBOX%d: Status FIFO is full", ch);
1755
1756         if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) {
1757                 u32 srd_ptr;
1758                 u64 *sts_ptr, last_ptr = 0, prev_ptr = 0;
1759                 int i, j;
1760
1761                 /*
1762                  * Find last successfully processed descriptor
1763                  */
1764
1765                 /* Check and clear descriptor status FIFO entries */
1766                 srd_ptr = priv->omsg_ring[ch].sts_rdptr;
1767                 sts_ptr = priv->omsg_ring[ch].sts_base;
1768                 j = srd_ptr * 8;
1769                 while (sts_ptr[j]) {
1770                         for (i = 0; i < 8 && sts_ptr[j]; i++, j++) {
1771                                 prev_ptr = last_ptr;
1772                                 last_ptr = le64_to_cpu(sts_ptr[j]);
1773                                 sts_ptr[j] = 0;
1774                         }
1775
1776                         ++srd_ptr;
1777                         srd_ptr %= priv->omsg_ring[ch].sts_size;
1778                         j = srd_ptr * 8;
1779                 }
1780
1781                 if (last_ptr == 0)
1782                         goto no_sts_update;
1783
1784                 priv->omsg_ring[ch].sts_rdptr = srd_ptr;
1785                 iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch));
1786
1787                 if (!mport->outb_msg[ch].mcback)
1788                         goto no_sts_update;
1789
1790                 /* Inform upper layer about transfer completion */
1791
1792                 tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/
1793                                                 sizeof(struct tsi721_omsg_desc);
1794
1795                 /*
1796                  * Check if this is a Link Descriptor (LD).
1797                  * If yes, ignore LD and use descriptor processed
1798                  * before LD.
1799                  */
1800                 if (tx_slot == priv->omsg_ring[ch].size) {
1801                         if (prev_ptr)
1802                                 tx_slot = (prev_ptr -
1803                                         (u64)priv->omsg_ring[ch].omd_phys)/
1804                                                 sizeof(struct tsi721_omsg_desc);
1805                         else
1806                                 goto no_sts_update;
1807                 }
1808
1809                 if (tx_slot >= priv->omsg_ring[ch].size)
1810                         tsi_debug(OMSG, &priv->pdev->dev,
1811                                   "OB_MSG tx_slot=%x > size=%x",
1812                                   tx_slot, priv->omsg_ring[ch].size);
1813                 WARN_ON(tx_slot >= priv->omsg_ring[ch].size);
1814
1815                 /* Move slot index to the next message to be sent */
1816                 ++tx_slot;
1817                 if (tx_slot == priv->omsg_ring[ch].size)
1818                         tx_slot = 0;
1819
1820                 dev_id = priv->omsg_ring[ch].dev_id;
1821                 do_callback = 1;
1822         }
1823
1824 no_sts_update:
1825
1826         if (omsg_int & TSI721_OBDMAC_INT_ERROR) {
1827                 /*
1828                 * Outbound message operation aborted due to error,
1829                 * reinitialize OB MSG channel
1830                 */
1831
1832                 tsi_debug(OMSG, &priv->pdev->dev, "OB MSG ABORT ch_stat=%x",
1833                           ioread32(priv->regs + TSI721_OBDMAC_STS(ch)));
1834
1835                 iowrite32(TSI721_OBDMAC_INT_ERROR,
1836                                 priv->regs + TSI721_OBDMAC_INT(ch));
1837                 iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
1838                                 priv->regs + TSI721_OBDMAC_CTL(ch));
1839                 ioread32(priv->regs + TSI721_OBDMAC_CTL(ch));
1840
1841                 /* Inform upper level to clear all pending tx slots */
1842                 dev_id = priv->omsg_ring[ch].dev_id;
1843                 tx_slot = priv->omsg_ring[ch].tx_slot;
1844                 do_callback = 1;
1845
1846                 /* Synch tx_slot tracking */
1847                 iowrite32(priv->omsg_ring[ch].tx_slot,
1848                         priv->regs + TSI721_OBDMAC_DRDCNT(ch));
1849                 ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch));
1850                 priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot;
1851                 priv->omsg_ring[ch].sts_rdptr = 0;
1852         }
1853
1854         /* Clear channel interrupts */
1855         iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch));
1856
1857         if (!(priv->flags & TSI721_USING_MSIX)) {
1858                 u32 ch_inte;
1859
1860                 /* Re-enable channel interrupts */
1861                 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1862                 ch_inte |= TSI721_INT_OMSG_CHAN(ch);
1863                 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
1864         }
1865
1866         spin_unlock(&priv->omsg_ring[ch].lock);
1867
1868         if (mport->outb_msg[ch].mcback && do_callback)
1869                 mport->outb_msg[ch].mcback(mport, dev_id, ch, tx_slot);
1870 }
1871
1872 /**
1873  * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox
1874  * @mport: Master port implementing Outbound Messaging Engine
1875  * @dev_id: Device specific pointer to pass on event
1876  * @mbox: Mailbox to open
1877  * @entries: Number of entries in the outbound mailbox ring
1878  */
1879 static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1880                                  int mbox, int entries)
1881 {
1882         struct tsi721_device *priv = mport->priv;
1883         struct tsi721_omsg_desc *bd_ptr;
1884         int i, rc = 0;
1885
1886         if ((entries < TSI721_OMSGD_MIN_RING_SIZE) ||
1887             (entries > (TSI721_OMSGD_RING_SIZE)) ||
1888             (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
1889                 rc = -EINVAL;
1890                 goto out;
1891         }
1892
1893         if ((mbox_sel & (1 << mbox)) == 0) {
1894                 rc = -ENODEV;
1895                 goto out;
1896         }
1897
1898         priv->omsg_ring[mbox].dev_id = dev_id;
1899         priv->omsg_ring[mbox].size = entries;
1900         priv->omsg_ring[mbox].sts_rdptr = 0;
1901         spin_lock_init(&priv->omsg_ring[mbox].lock);
1902
1903         /* Outbound Msg Buffer allocation based on
1904            the number of maximum descriptor entries */
1905         for (i = 0; i < entries; i++) {
1906                 priv->omsg_ring[mbox].omq_base[i] =
1907                         dma_alloc_coherent(
1908                                 &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE,
1909                                 &priv->omsg_ring[mbox].omq_phys[i],
1910                                 GFP_KERNEL);
1911                 if (priv->omsg_ring[mbox].omq_base[i] == NULL) {
1912                         tsi_debug(OMSG, &priv->pdev->dev,
1913                                   "ENOMEM for OB_MSG_%d data buffer", mbox);
1914                         rc = -ENOMEM;
1915                         goto out_buf;
1916                 }
1917         }
1918
1919         /* Outbound message descriptor allocation */
1920         priv->omsg_ring[mbox].omd_base = dma_alloc_coherent(
1921                                 &priv->pdev->dev,
1922                                 (entries + 1) * sizeof(struct tsi721_omsg_desc),
1923                                 &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL);
1924         if (priv->omsg_ring[mbox].omd_base == NULL) {
1925                 tsi_debug(OMSG, &priv->pdev->dev,
1926                         "ENOMEM for OB_MSG_%d descriptor memory", mbox);
1927                 rc = -ENOMEM;
1928                 goto out_buf;
1929         }
1930
1931         priv->omsg_ring[mbox].tx_slot = 0;
1932
1933         /* Outbound message descriptor status FIFO allocation */
1934         priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1935         priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
1936                         priv->omsg_ring[mbox].sts_size *
1937                                                 sizeof(struct tsi721_dma_sts),
1938                         &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
1939         if (priv->omsg_ring[mbox].sts_base == NULL) {
1940                 tsi_debug(OMSG, &priv->pdev->dev,
1941                         "ENOMEM for OB_MSG_%d status FIFO", mbox);
1942                 rc = -ENOMEM;
1943                 goto out_desc;
1944         }
1945
1946         /*
1947          * Configure Outbound Messaging Engine
1948          */
1949
1950         /* Setup Outbound Message descriptor pointer */
1951         iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32),
1952                         priv->regs + TSI721_OBDMAC_DPTRH(mbox));
1953         iowrite32(((u64)priv->omsg_ring[mbox].omd_phys &
1954                                         TSI721_OBDMAC_DPTRL_MASK),
1955                         priv->regs + TSI721_OBDMAC_DPTRL(mbox));
1956
1957         /* Setup Outbound Message descriptor status FIFO */
1958         iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32),
1959                         priv->regs + TSI721_OBDMAC_DSBH(mbox));
1960         iowrite32(((u64)priv->omsg_ring[mbox].sts_phys &
1961                                         TSI721_OBDMAC_DSBL_MASK),
1962                         priv->regs + TSI721_OBDMAC_DSBL(mbox));
1963         iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size),
1964                 priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox));
1965
1966         /* Enable interrupts */
1967
1968 #ifdef CONFIG_PCI_MSI
1969         if (priv->flags & TSI721_USING_MSIX) {
1970                 int idx = TSI721_VECT_OMB0_DONE + mbox;
1971
1972                 /* Request interrupt service if we are in MSI-X mode */
1973                 rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
1974                                  priv->msix[idx].irq_name, (void *)priv);
1975
1976                 if (rc) {
1977                         tsi_debug(OMSG, &priv->pdev->dev,
1978                                 "Unable to get MSI-X IRQ for OBOX%d-DONE",
1979                                 mbox);
1980                         goto out_stat;
1981                 }
1982
1983                 idx = TSI721_VECT_OMB0_INT + mbox;
1984                 rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
1985                                  priv->msix[idx].irq_name, (void *)priv);
1986
1987                 if (rc) {
1988                         tsi_debug(OMSG, &priv->pdev->dev,
1989                                 "Unable to get MSI-X IRQ for MBOX%d-INT", mbox);
1990                         idx = TSI721_VECT_OMB0_DONE + mbox;
1991                         free_irq(priv->msix[idx].vector, (void *)priv);
1992                         goto out_stat;
1993                 }
1994         }
1995 #endif /* CONFIG_PCI_MSI */
1996
1997         tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL);
1998
1999         /* Initialize Outbound Message descriptors ring */
2000         bd_ptr = priv->omsg_ring[mbox].omd_base;
2001         bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29);
2002         bd_ptr[entries].msg_info = 0;
2003         bd_ptr[entries].next_lo =
2004                 cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys &
2005                 TSI721_OBDMAC_DPTRL_MASK);
2006         bd_ptr[entries].next_hi =
2007                 cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32);
2008         priv->omsg_ring[mbox].wr_count = 0;
2009         mb();
2010
2011         /* Initialize Outbound Message engine */
2012         iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
2013                   priv->regs + TSI721_OBDMAC_CTL(mbox));
2014         ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
2015         udelay(10);
2016
2017         priv->omsg_init[mbox] = 1;
2018
2019         return 0;
2020
2021 #ifdef CONFIG_PCI_MSI
2022 out_stat:
2023         dma_free_coherent(&priv->pdev->dev,
2024                 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
2025                 priv->omsg_ring[mbox].sts_base,
2026                 priv->omsg_ring[mbox].sts_phys);
2027
2028         priv->omsg_ring[mbox].sts_base = NULL;
2029 #endif /* CONFIG_PCI_MSI */
2030
2031 out_desc:
2032         dma_free_coherent(&priv->pdev->dev,
2033                 (entries + 1) * sizeof(struct tsi721_omsg_desc),
2034                 priv->omsg_ring[mbox].omd_base,
2035                 priv->omsg_ring[mbox].omd_phys);
2036
2037         priv->omsg_ring[mbox].omd_base = NULL;
2038
2039 out_buf:
2040         for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
2041                 if (priv->omsg_ring[mbox].omq_base[i]) {
2042                         dma_free_coherent(&priv->pdev->dev,
2043                                 TSI721_MSG_BUFFER_SIZE,
2044                                 priv->omsg_ring[mbox].omq_base[i],
2045                                 priv->omsg_ring[mbox].omq_phys[i]);
2046
2047                         priv->omsg_ring[mbox].omq_base[i] = NULL;
2048                 }
2049         }
2050
2051 out:
2052         return rc;
2053 }
2054
2055 /**
2056  * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox
2057  * @mport: Master port implementing the outbound message unit
2058  * @mbox: Mailbox to close
2059  */
2060 static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox)
2061 {
2062         struct tsi721_device *priv = mport->priv;
2063         u32 i;
2064
2065         if (!priv->omsg_init[mbox])
2066                 return;
2067         priv->omsg_init[mbox] = 0;
2068
2069         /* Disable Interrupts */
2070
2071         tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL);
2072
2073 #ifdef CONFIG_PCI_MSI
2074         if (priv->flags & TSI721_USING_MSIX) {
2075                 free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
2076                          (void *)priv);
2077                 free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
2078                          (void *)priv);
2079         }
2080 #endif /* CONFIG_PCI_MSI */
2081
2082         /* Free OMSG Descriptor Status FIFO */
2083         dma_free_coherent(&priv->pdev->dev,
2084                 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
2085                 priv->omsg_ring[mbox].sts_base,
2086                 priv->omsg_ring[mbox].sts_phys);
2087
2088         priv->omsg_ring[mbox].sts_base = NULL;
2089
2090         /* Free OMSG descriptors */
2091         dma_free_coherent(&priv->pdev->dev,
2092                 (priv->omsg_ring[mbox].size + 1) *
2093                         sizeof(struct tsi721_omsg_desc),
2094                 priv->omsg_ring[mbox].omd_base,
2095                 priv->omsg_ring[mbox].omd_phys);
2096
2097         priv->omsg_ring[mbox].omd_base = NULL;
2098
2099         /* Free message buffers */
2100         for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
2101                 if (priv->omsg_ring[mbox].omq_base[i]) {
2102                         dma_free_coherent(&priv->pdev->dev,
2103                                 TSI721_MSG_BUFFER_SIZE,
2104                                 priv->omsg_ring[mbox].omq_base[i],
2105                                 priv->omsg_ring[mbox].omq_phys[i]);
2106
2107                         priv->omsg_ring[mbox].omq_base[i] = NULL;
2108                 }
2109         }
2110 }
2111
2112 /**
2113  * tsi721_imsg_handler - Inbound Message Interrupt Handler
2114  * @priv: pointer to tsi721 private data
2115  * @ch: inbound message channel number to service
2116  *
2117  * Services channel interrupts from inbound messaging engine.
2118  */
2119 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
2120 {
2121         u32 mbox = ch - 4;
2122         u32 imsg_int;
2123         struct rio_mport *mport = &priv->mport;
2124
2125         spin_lock(&priv->imsg_ring[mbox].lock);
2126
2127         imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch));
2128
2129         if (imsg_int & TSI721_IBDMAC_INT_SRTO)
2130                 tsi_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout", mbox);
2131
2132         if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR)
2133                 tsi_info(&priv->pdev->dev, "IB MBOX%d PCIe error", mbox);
2134
2135         if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW)
2136                 tsi_info(&priv->pdev->dev, "IB MBOX%d IB free queue low", mbox);
2137
2138         /* Clear IB channel interrupts */
2139         iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch));
2140
2141         /* If an IB Msg is received notify the upper layer */
2142         if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV &&
2143                 mport->inb_msg[mbox].mcback)
2144                 mport->inb_msg[mbox].mcback(mport,
2145                                 priv->imsg_ring[mbox].dev_id, mbox, -1);
2146
2147         if (!(priv->flags & TSI721_USING_MSIX)) {
2148                 u32 ch_inte;
2149
2150                 /* Re-enable channel interrupts */
2151                 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
2152                 ch_inte |= TSI721_INT_IMSG_CHAN(ch);
2153                 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
2154         }
2155
2156         spin_unlock(&priv->imsg_ring[mbox].lock);
2157 }
2158
2159 /**
2160  * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox
2161  * @mport: Master port implementing the Inbound Messaging Engine
2162  * @dev_id: Device specific pointer to pass on event
2163  * @mbox: Mailbox to open
2164  * @entries: Number of entries in the inbound mailbox ring
2165  */
2166 static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
2167                                 int mbox, int entries)
2168 {
2169         struct tsi721_device *priv = mport->priv;
2170         int ch = mbox + 4;
2171         int i;
2172         u64 *free_ptr;
2173         int rc = 0;
2174
2175         if ((entries < TSI721_IMSGD_MIN_RING_SIZE) ||
2176             (entries > TSI721_IMSGD_RING_SIZE) ||
2177             (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
2178                 rc = -EINVAL;
2179                 goto out;
2180         }
2181
2182         if ((mbox_sel & (1 << mbox)) == 0) {
2183                 rc = -ENODEV;
2184                 goto out;
2185         }
2186
2187         /* Initialize IB Messaging Ring */
2188         priv->imsg_ring[mbox].dev_id = dev_id;
2189         priv->imsg_ring[mbox].size = entries;
2190         priv->imsg_ring[mbox].rx_slot = 0;
2191         priv->imsg_ring[mbox].desc_rdptr = 0;
2192         priv->imsg_ring[mbox].fq_wrptr = 0;
2193         for (i = 0; i < priv->imsg_ring[mbox].size; i++)
2194                 priv->imsg_ring[mbox].imq_base[i] = NULL;
2195         spin_lock_init(&priv->imsg_ring[mbox].lock);
2196
2197         /* Allocate buffers for incoming messages */
2198         priv->imsg_ring[mbox].buf_base =
2199                 dma_alloc_coherent(&priv->pdev->dev,
2200                                    entries * TSI721_MSG_BUFFER_SIZE,
2201                                    &priv->imsg_ring[mbox].buf_phys,
2202                                    GFP_KERNEL);
2203
2204         if (priv->imsg_ring[mbox].buf_base == NULL) {
2205                 tsi_err(&priv->pdev->dev,
2206                         "Failed to allocate buffers for IB MBOX%d", mbox);
2207                 rc = -ENOMEM;
2208                 goto out;
2209         }
2210
2211         /* Allocate memory for circular free list */
2212         priv->imsg_ring[mbox].imfq_base =
2213                 dma_alloc_coherent(&priv->pdev->dev,
2214                                    entries * 8,
2215                                    &priv->imsg_ring[mbox].imfq_phys,
2216                                    GFP_KERNEL);
2217
2218         if (priv->imsg_ring[mbox].imfq_base == NULL) {
2219                 tsi_err(&priv->pdev->dev,
2220                         "Failed to allocate free queue for IB MBOX%d", mbox);
2221                 rc = -ENOMEM;
2222                 goto out_buf;
2223         }
2224
2225         /* Allocate memory for Inbound message descriptors */
2226         priv->imsg_ring[mbox].imd_base =
2227                 dma_alloc_coherent(&priv->pdev->dev,
2228                                    entries * sizeof(struct tsi721_imsg_desc),
2229                                    &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL);
2230
2231         if (priv->imsg_ring[mbox].imd_base == NULL) {
2232                 tsi_err(&priv->pdev->dev,
2233                         "Failed to allocate descriptor memory for IB MBOX%d",
2234                         mbox);
2235                 rc = -ENOMEM;
2236                 goto out_dma;
2237         }
2238
2239         /* Fill free buffer pointer list */
2240         free_ptr = priv->imsg_ring[mbox].imfq_base;
2241         for (i = 0; i < entries; i++)
2242                 free_ptr[i] = cpu_to_le64(
2243                                 (u64)(priv->imsg_ring[mbox].buf_phys) +
2244                                 i * 0x1000);
2245
2246         mb();
2247
2248         /*
2249          * For mapping of inbound SRIO Messages into appropriate queues we need
2250          * to set Inbound Device ID register in the messaging engine. We do it
2251          * once when first inbound mailbox is requested.
2252          */
2253         if (!(priv->flags & TSI721_IMSGID_SET)) {
2254                 iowrite32((u32)priv->mport.host_deviceid,
2255                         priv->regs + TSI721_IB_DEVID);
2256                 priv->flags |= TSI721_IMSGID_SET;
2257         }
2258
2259         /*
2260          * Configure Inbound Messaging channel (ch = mbox + 4)
2261          */
2262
2263         /* Setup Inbound Message free queue */
2264         iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32),
2265                 priv->regs + TSI721_IBDMAC_FQBH(ch));
2266         iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys &
2267                         TSI721_IBDMAC_FQBL_MASK),
2268                 priv->regs+TSI721_IBDMAC_FQBL(ch));
2269         iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
2270                 priv->regs + TSI721_IBDMAC_FQSZ(ch));
2271
2272         /* Setup Inbound Message descriptor queue */
2273         iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32),
2274                 priv->regs + TSI721_IBDMAC_DQBH(ch));
2275         iowrite32(((u32)priv->imsg_ring[mbox].imd_phys &
2276                    (u32)TSI721_IBDMAC_DQBL_MASK),
2277                 priv->regs+TSI721_IBDMAC_DQBL(ch));
2278         iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
2279                 priv->regs + TSI721_IBDMAC_DQSZ(ch));
2280
2281         /* Enable interrupts */
2282
2283 #ifdef CONFIG_PCI_MSI
2284         if (priv->flags & TSI721_USING_MSIX) {
2285                 int idx = TSI721_VECT_IMB0_RCV + mbox;
2286
2287                 /* Request interrupt service if we are in MSI-X mode */
2288                 rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
2289                                  priv->msix[idx].irq_name, (void *)priv);
2290
2291                 if (rc) {
2292                         tsi_debug(IMSG, &priv->pdev->dev,
2293                                 "Unable to get MSI-X IRQ for IBOX%d-DONE",
2294                                 mbox);
2295                         goto out_desc;
2296                 }
2297
2298                 idx = TSI721_VECT_IMB0_INT + mbox;
2299                 rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
2300                                  priv->msix[idx].irq_name, (void *)priv);
2301
2302                 if (rc) {
2303                         tsi_debug(IMSG, &priv->pdev->dev,
2304                                 "Unable to get MSI-X IRQ for IBOX%d-INT", mbox);
2305                         free_irq(
2306                                 priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
2307                                 (void *)priv);
2308                         goto out_desc;
2309                 }
2310         }
2311 #endif /* CONFIG_PCI_MSI */
2312
2313         tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL);
2314
2315         /* Initialize Inbound Message Engine */
2316         iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch));
2317         ioread32(priv->regs + TSI721_IBDMAC_CTL(ch));
2318         udelay(10);
2319         priv->imsg_ring[mbox].fq_wrptr = entries - 1;
2320         iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch));
2321
2322         priv->imsg_init[mbox] = 1;
2323         return 0;
2324
2325 #ifdef CONFIG_PCI_MSI
2326 out_desc:
2327         dma_free_coherent(&priv->pdev->dev,
2328                 priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
2329                 priv->imsg_ring[mbox].imd_base,
2330                 priv->imsg_ring[mbox].imd_phys);
2331
2332         priv->imsg_ring[mbox].imd_base = NULL;
2333 #endif /* CONFIG_PCI_MSI */
2334
2335 out_dma:
2336         dma_free_coherent(&priv->pdev->dev,
2337                 priv->imsg_ring[mbox].size * 8,
2338                 priv->imsg_ring[mbox].imfq_base,
2339                 priv->imsg_ring[mbox].imfq_phys);
2340
2341         priv->imsg_ring[mbox].imfq_base = NULL;
2342
2343 out_buf:
2344         dma_free_coherent(&priv->pdev->dev,
2345                 priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
2346                 priv->imsg_ring[mbox].buf_base,
2347                 priv->imsg_ring[mbox].buf_phys);
2348
2349         priv->imsg_ring[mbox].buf_base = NULL;
2350
2351 out:
2352         return rc;
2353 }
2354
2355 /**
2356  * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox
2357  * @mport: Master port implementing the Inbound Messaging Engine
2358  * @mbox: Mailbox to close
2359  */
2360 static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
2361 {
2362         struct tsi721_device *priv = mport->priv;
2363         u32 rx_slot;
2364         int ch = mbox + 4;
2365
2366         if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */
2367                 return;
2368         priv->imsg_init[mbox] = 0;
2369
2370         /* Disable Inbound Messaging Engine */
2371
2372         /* Disable Interrupts */
2373         tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK);
2374
2375 #ifdef CONFIG_PCI_MSI
2376         if (priv->flags & TSI721_USING_MSIX) {
2377                 free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
2378                                 (void *)priv);
2379                 free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
2380                                 (void *)priv);
2381         }
2382 #endif /* CONFIG_PCI_MSI */
2383
2384         /* Clear Inbound Buffer Queue */
2385         for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++)
2386                 priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
2387
2388         /* Free memory allocated for message buffers */
2389         dma_free_coherent(&priv->pdev->dev,
2390                 priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
2391                 priv->imsg_ring[mbox].buf_base,
2392                 priv->imsg_ring[mbox].buf_phys);
2393
2394         priv->imsg_ring[mbox].buf_base = NULL;
2395
2396         /* Free memory allocated for free pointr list */
2397         dma_free_coherent(&priv->pdev->dev,
2398                 priv->imsg_ring[mbox].size * 8,
2399                 priv->imsg_ring[mbox].imfq_base,
2400                 priv->imsg_ring[mbox].imfq_phys);
2401
2402         priv->imsg_ring[mbox].imfq_base = NULL;
2403
2404         /* Free memory allocated for RX descriptors */
2405         dma_free_coherent(&priv->pdev->dev,
2406                 priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
2407                 priv->imsg_ring[mbox].imd_base,
2408                 priv->imsg_ring[mbox].imd_phys);
2409
2410         priv->imsg_ring[mbox].imd_base = NULL;
2411 }
2412
2413 /**
2414  * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue
2415  * @mport: Master port implementing the Inbound Messaging Engine
2416  * @mbox: Inbound mailbox number
2417  * @buf: Buffer to add to inbound queue
2418  */
2419 static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
2420 {
2421         struct tsi721_device *priv = mport->priv;
2422         u32 rx_slot;
2423         int rc = 0;
2424
2425         rx_slot = priv->imsg_ring[mbox].rx_slot;
2426         if (priv->imsg_ring[mbox].imq_base[rx_slot]) {
2427                 tsi_err(&priv->pdev->dev,
2428                         "Error adding inbound buffer %d, buffer exists",
2429                         rx_slot);
2430                 rc = -EINVAL;
2431                 goto out;
2432         }
2433
2434         priv->imsg_ring[mbox].imq_base[rx_slot] = buf;
2435
2436         if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size)
2437                 priv->imsg_ring[mbox].rx_slot = 0;
2438
2439 out:
2440         return rc;
2441 }
2442
2443 /**
2444  * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue
2445  * @mport: Master port implementing the Inbound Messaging Engine
2446  * @mbox: Inbound mailbox number
2447  *
2448  * Returns pointer to the message on success or NULL on failure.
2449  */
2450 static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox)
2451 {
2452         struct tsi721_device *priv = mport->priv;
2453         struct tsi721_imsg_desc *desc;
2454         u32 rx_slot;
2455         void *rx_virt = NULL;
2456         u64 rx_phys;
2457         void *buf = NULL;
2458         u64 *free_ptr;
2459         int ch = mbox + 4;
2460         int msg_size;
2461
2462         if (!priv->imsg_init[mbox])
2463                 return NULL;
2464
2465         desc = priv->imsg_ring[mbox].imd_base;
2466         desc += priv->imsg_ring[mbox].desc_rdptr;
2467
2468         if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO))
2469                 goto out;
2470
2471         rx_slot = priv->imsg_ring[mbox].rx_slot;
2472         while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) {
2473                 if (++rx_slot == priv->imsg_ring[mbox].size)
2474                         rx_slot = 0;
2475         }
2476
2477         rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) |
2478                         le32_to_cpu(desc->bufptr_lo);
2479
2480         rx_virt = priv->imsg_ring[mbox].buf_base +
2481                   (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys);
2482
2483         buf = priv->imsg_ring[mbox].imq_base[rx_slot];
2484         msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT;
2485         if (msg_size == 0)
2486                 msg_size = RIO_MAX_MSG_SIZE;
2487
2488         memcpy(buf, rx_virt, msg_size);
2489         priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
2490
2491         desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO);
2492         if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size)
2493                 priv->imsg_ring[mbox].desc_rdptr = 0;
2494
2495         iowrite32(priv->imsg_ring[mbox].desc_rdptr,
2496                 priv->regs + TSI721_IBDMAC_DQRP(ch));
2497
2498         /* Return free buffer into the pointer list */
2499         free_ptr = priv->imsg_ring[mbox].imfq_base;
2500         free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys);
2501
2502         if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size)
2503                 priv->imsg_ring[mbox].fq_wrptr = 0;
2504
2505         iowrite32(priv->imsg_ring[mbox].fq_wrptr,
2506                 priv->regs + TSI721_IBDMAC_FQWP(ch));
2507 out:
2508         return buf;
2509 }
2510
2511 /**
2512  * tsi721_messages_init - Initialization of Messaging Engine
2513  * @priv: pointer to tsi721 private data
2514  *
2515  * Configures Tsi721 messaging engine.
2516  */
2517 static int tsi721_messages_init(struct tsi721_device *priv)
2518 {
2519         int     ch;
2520
2521         iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG);
2522         iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT);
2523         iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT);
2524
2525         /* Set SRIO Message Request/Response Timeout */
2526         iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO);
2527
2528         /* Initialize Inbound Messaging Engine Registers */
2529         for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) {
2530                 /* Clear interrupt bits */
2531                 iowrite32(TSI721_IBDMAC_INT_MASK,
2532                         priv->regs + TSI721_IBDMAC_INT(ch));
2533                 /* Clear Status */
2534                 iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch));
2535
2536                 iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK,
2537                                 priv->regs + TSI721_SMSG_ECC_COR_LOG(ch));
2538                 iowrite32(TSI721_SMSG_ECC_NCOR_MASK,
2539                                 priv->regs + TSI721_SMSG_ECC_NCOR(ch));
2540         }
2541
2542         return 0;
2543 }
2544
2545 /**
2546  * tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue
2547  * @mport: Master port implementing the Inbound Messaging Engine
2548  * @mbox: Inbound mailbox number
2549  *
2550  * Returns pointer to the message on success or NULL on failure.
2551  */
2552 static int tsi721_query_mport(struct rio_mport *mport,
2553                               struct rio_mport_attr *attr)
2554 {
2555         struct tsi721_device *priv = mport->priv;
2556         u32 rval;
2557
2558         rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0));
2559         if (rval & RIO_PORT_N_ERR_STS_PORT_OK) {
2560                 rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0));
2561                 attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28;
2562                 rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0));
2563                 attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27;
2564         } else
2565                 attr->link_speed = RIO_LINK_DOWN;
2566
2567 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2568         attr->flags = RIO_MPORT_DMA | RIO_MPORT_DMA_SG;
2569         attr->dma_max_sge = 0;
2570         attr->dma_max_size = TSI721_BDMA_MAX_BCOUNT;
2571         attr->dma_align = 0;
2572 #else
2573         attr->flags = 0;
2574 #endif
2575         return 0;
2576 }
2577
2578 /**
2579  * tsi721_disable_ints - disables all device interrupts
2580  * @priv: pointer to tsi721 private data
2581  */
2582 static void tsi721_disable_ints(struct tsi721_device *priv)
2583 {
2584         int ch;
2585
2586         /* Disable all device level interrupts */
2587         iowrite32(0, priv->regs + TSI721_DEV_INTE);
2588
2589         /* Disable all Device Channel interrupts */
2590         iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE);
2591
2592         /* Disable all Inbound Msg Channel interrupts */
2593         for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++)
2594                 iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch));
2595
2596         /* Disable all Outbound Msg Channel interrupts */
2597         for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++)
2598                 iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch));
2599
2600         /* Disable all general messaging interrupts */
2601         iowrite32(0, priv->regs + TSI721_SMSG_INTE);
2602
2603         /* Disable all BDMA Channel interrupts */
2604         for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
2605                 iowrite32(0,
2606                         priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
2607
2608         /* Disable all general BDMA interrupts */
2609         iowrite32(0, priv->regs + TSI721_BDMA_INTE);
2610
2611         /* Disable all SRIO Channel interrupts */
2612         for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++)
2613                 iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch));
2614
2615         /* Disable all general SR2PC interrupts */
2616         iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE);
2617
2618         /* Disable all PC2SR interrupts */
2619         iowrite32(0, priv->regs + TSI721_PC2SR_INTE);
2620
2621         /* Disable all I2C interrupts */
2622         iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE);
2623
2624         /* Disable SRIO MAC interrupts */
2625         iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE);
2626         iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN);
2627 }
2628
2629 static struct rio_ops tsi721_rio_ops = {
2630         .lcread                 = tsi721_lcread,
2631         .lcwrite                = tsi721_lcwrite,
2632         .cread                  = tsi721_cread_dma,
2633         .cwrite                 = tsi721_cwrite_dma,
2634         .dsend                  = tsi721_dsend,
2635         .open_inb_mbox          = tsi721_open_inb_mbox,
2636         .close_inb_mbox         = tsi721_close_inb_mbox,
2637         .open_outb_mbox         = tsi721_open_outb_mbox,
2638         .close_outb_mbox        = tsi721_close_outb_mbox,
2639         .add_outb_message       = tsi721_add_outb_message,
2640         .add_inb_buffer         = tsi721_add_inb_buffer,
2641         .get_inb_message        = tsi721_get_inb_message,
2642         .map_inb                = tsi721_rio_map_inb_mem,
2643         .unmap_inb              = tsi721_rio_unmap_inb_mem,
2644         .pwenable               = tsi721_pw_enable,
2645         .query_mport            = tsi721_query_mport,
2646         .map_outb               = tsi721_map_outb_win,
2647         .unmap_outb             = tsi721_unmap_outb_win,
2648 };
2649
2650 static void tsi721_mport_release(struct device *dev)
2651 {
2652         struct rio_mport *mport = to_rio_mport(dev);
2653
2654         tsi_debug(EXIT, dev, "%s id=%d", mport->name, mport->id);
2655 }
2656
2657 /**
2658  * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port
2659  * @priv: pointer to tsi721 private data
2660  *
2661  * Configures Tsi721 as RapidIO master port.
2662  */
2663 static int tsi721_setup_mport(struct tsi721_device *priv)
2664 {
2665         struct pci_dev *pdev = priv->pdev;
2666         int err = 0;
2667         struct rio_mport *mport = &priv->mport;
2668
2669         err = rio_mport_initialize(mport);
2670         if (err)
2671                 return err;
2672
2673         mport->ops = &tsi721_rio_ops;
2674         mport->index = 0;
2675         mport->sys_size = 0; /* small system */
2676         mport->priv = (void *)priv;
2677         mport->phys_efptr = 0x100;
2678         mport->phys_rmap = 1;
2679         mport->dev.parent = &pdev->dev;
2680         mport->dev.release = tsi721_mport_release;
2681
2682         INIT_LIST_HEAD(&mport->dbells);
2683
2684         rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
2685         rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
2686         rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
2687         snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)",
2688                  dev_driver_string(&pdev->dev), dev_name(&pdev->dev));
2689
2690         /* Hook up interrupt handler */
2691
2692 #ifdef CONFIG_PCI_MSI
2693         if (!tsi721_enable_msix(priv))
2694                 priv->flags |= TSI721_USING_MSIX;
2695         else if (!pci_enable_msi(pdev))
2696                 priv->flags |= TSI721_USING_MSI;
2697         else
2698                 tsi_debug(MPORT, &pdev->dev,
2699                          "MSI/MSI-X is not available. Using legacy INTx.");
2700 #endif /* CONFIG_PCI_MSI */
2701
2702         err = tsi721_request_irq(priv);
2703
2704         if (err) {
2705                 tsi_err(&pdev->dev, "Unable to get PCI IRQ %02X (err=0x%x)",
2706                         pdev->irq, err);
2707                 return err;
2708         }
2709
2710 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2711         err = tsi721_register_dma(priv);
2712         if (err)
2713                 goto err_exit;
2714 #endif
2715         /* Enable SRIO link */
2716         iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
2717                   TSI721_DEVCTL_SRBOOT_CMPL,
2718                   priv->regs + TSI721_DEVCTL);
2719
2720         if (mport->host_deviceid >= 0)
2721                 iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
2722                           RIO_PORT_GEN_DISCOVERED,
2723                           priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2724         else
2725                 iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2726
2727         err = rio_register_mport(mport);
2728         if (err) {
2729                 tsi721_unregister_dma(priv);
2730                 goto err_exit;
2731         }
2732
2733         return 0;
2734
2735 err_exit:
2736         tsi721_free_irq(priv);
2737         return err;
2738 }
2739
2740 static int tsi721_probe(struct pci_dev *pdev,
2741                                   const struct pci_device_id *id)
2742 {
2743         struct tsi721_device *priv;
2744         int err;
2745
2746         priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL);
2747         if (!priv) {
2748                 err = -ENOMEM;
2749                 goto err_exit;
2750         }
2751
2752         err = pci_enable_device(pdev);
2753         if (err) {
2754                 tsi_err(&pdev->dev, "Failed to enable PCI device");
2755                 goto err_clean;
2756         }
2757
2758         priv->pdev = pdev;
2759
2760 #ifdef DEBUG
2761         {
2762                 int i;
2763
2764                 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2765                         tsi_debug(INIT, &pdev->dev, "res%d %pR",
2766                                   i, &pdev->resource[i]);
2767                 }
2768         }
2769 #endif
2770         /*
2771          * Verify BAR configuration
2772          */
2773
2774         /* BAR_0 (registers) must be 512KB+ in 32-bit address space */
2775         if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) ||
2776             pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 ||
2777             pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) {
2778                 tsi_err(&pdev->dev, "Missing or misconfigured CSR BAR0");
2779                 err = -ENODEV;
2780                 goto err_disable_pdev;
2781         }
2782
2783         /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */
2784         if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) ||
2785             pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 ||
2786             pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) {
2787                 tsi_err(&pdev->dev, "Missing or misconfigured Doorbell BAR1");
2788                 err = -ENODEV;
2789                 goto err_disable_pdev;
2790         }
2791
2792         /*
2793          * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address
2794          * space.
2795          * NOTE: BAR_2 and BAR_4 are not used by this version of driver.
2796          * It may be a good idea to keep them disabled using HW configuration
2797          * to save PCI memory space.
2798          */
2799
2800         priv->p2r_bar[0].size = priv->p2r_bar[1].size = 0;
2801
2802         if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64) {
2803                 if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_PREFETCH)
2804                         tsi_debug(INIT, &pdev->dev,
2805                                  "Prefetchable OBW BAR2 will not be used");
2806                 else {
2807                         priv->p2r_bar[0].base = pci_resource_start(pdev, BAR_2);
2808                         priv->p2r_bar[0].size = pci_resource_len(pdev, BAR_2);
2809                 }
2810         }
2811
2812         if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64) {
2813                 if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_PREFETCH)
2814                         tsi_debug(INIT, &pdev->dev,
2815                                  "Prefetchable OBW BAR4 will not be used");
2816                 else {
2817                         priv->p2r_bar[1].base = pci_resource_start(pdev, BAR_4);
2818                         priv->p2r_bar[1].size = pci_resource_len(pdev, BAR_4);
2819                 }
2820         }
2821
2822         err = pci_request_regions(pdev, DRV_NAME);
2823         if (err) {
2824                 tsi_err(&pdev->dev, "Unable to obtain PCI resources");
2825                 goto err_disable_pdev;
2826         }
2827
2828         pci_set_master(pdev);
2829
2830         priv->regs = pci_ioremap_bar(pdev, BAR_0);
2831         if (!priv->regs) {
2832                 tsi_err(&pdev->dev, "Unable to map device registers space");
2833                 err = -ENOMEM;
2834                 goto err_free_res;
2835         }
2836
2837         priv->odb_base = pci_ioremap_bar(pdev, BAR_1);
2838         if (!priv->odb_base) {
2839                 tsi_err(&pdev->dev, "Unable to map outbound doorbells space");
2840                 err = -ENOMEM;
2841                 goto err_unmap_bars;
2842         }
2843
2844         /* Configure DMA attributes. */
2845         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2846                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2847                 if (err) {
2848                         tsi_err(&pdev->dev, "Unable to set DMA mask");
2849                         goto err_unmap_bars;
2850                 }
2851
2852                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2853                         tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
2854         } else {
2855                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2856                 if (err)
2857                         tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
2858         }
2859
2860         BUG_ON(!pci_is_pcie(pdev));
2861
2862         /* Clear "no snoop" and "relaxed ordering" bits. */
2863         pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
2864                 PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
2865
2866         /* Override PCIe Maximum Read Request Size setting if requested */
2867         if (pcie_mrrs >= 0) {
2868                 if (pcie_mrrs <= 5)
2869                         pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
2870                                         PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12);
2871                 else
2872                         tsi_info(&pdev->dev,
2873                                  "Invalid MRRS override value %d", pcie_mrrs);
2874         }
2875
2876         /* Adjust PCIe completion timeout. */
2877         pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
2878
2879         /*
2880          * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
2881          */
2882         pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01);
2883         pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL,
2884                                                 TSI721_MSIXTBL_OFFSET);
2885         pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA,
2886                                                 TSI721_MSIXPBA_OFFSET);
2887         pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0);
2888         /* End of FIXUP */
2889
2890         tsi721_disable_ints(priv);
2891
2892         tsi721_init_pc2sr_mapping(priv);
2893         tsi721_init_sr2pc_mapping(priv);
2894
2895         if (tsi721_bdma_maint_init(priv)) {
2896                 tsi_err(&pdev->dev, "BDMA initialization failed");
2897                 err = -ENOMEM;
2898                 goto err_unmap_bars;
2899         }
2900
2901         err = tsi721_doorbell_init(priv);
2902         if (err)
2903                 goto err_free_bdma;
2904
2905         tsi721_port_write_init(priv);
2906
2907         err = tsi721_messages_init(priv);
2908         if (err)
2909                 goto err_free_consistent;
2910
2911         err = tsi721_setup_mport(priv);
2912         if (err)
2913                 goto err_free_consistent;
2914
2915         pci_set_drvdata(pdev, priv);
2916         tsi721_interrupts_init(priv);
2917
2918         return 0;
2919
2920 err_free_consistent:
2921         tsi721_port_write_free(priv);
2922         tsi721_doorbell_free(priv);
2923 err_free_bdma:
2924         tsi721_bdma_maint_free(priv);
2925 err_unmap_bars:
2926         if (priv->regs)
2927                 iounmap(priv->regs);
2928         if (priv->odb_base)
2929                 iounmap(priv->odb_base);
2930 err_free_res:
2931         pci_release_regions(pdev);
2932         pci_clear_master(pdev);
2933 err_disable_pdev:
2934         pci_disable_device(pdev);
2935 err_clean:
2936         kfree(priv);
2937 err_exit:
2938         return err;
2939 }
2940
2941 static void tsi721_remove(struct pci_dev *pdev)
2942 {
2943         struct tsi721_device *priv = pci_get_drvdata(pdev);
2944
2945         tsi_debug(EXIT, &pdev->dev, "enter");
2946
2947         tsi721_disable_ints(priv);
2948         tsi721_free_irq(priv);
2949         flush_scheduled_work();
2950         rio_unregister_mport(&priv->mport);
2951
2952         tsi721_unregister_dma(priv);
2953         tsi721_bdma_maint_free(priv);
2954         tsi721_doorbell_free(priv);
2955         tsi721_port_write_free(priv);
2956         tsi721_close_sr2pc_mapping(priv);
2957
2958         if (priv->regs)
2959                 iounmap(priv->regs);
2960         if (priv->odb_base)
2961                 iounmap(priv->odb_base);
2962 #ifdef CONFIG_PCI_MSI
2963         if (priv->flags & TSI721_USING_MSIX)
2964                 pci_disable_msix(priv->pdev);
2965         else if (priv->flags & TSI721_USING_MSI)
2966                 pci_disable_msi(priv->pdev);
2967 #endif
2968         pci_release_regions(pdev);
2969         pci_clear_master(pdev);
2970         pci_disable_device(pdev);
2971         pci_set_drvdata(pdev, NULL);
2972         kfree(priv);
2973         tsi_debug(EXIT, &pdev->dev, "exit");
2974 }
2975
2976 static void tsi721_shutdown(struct pci_dev *pdev)
2977 {
2978         struct tsi721_device *priv = pci_get_drvdata(pdev);
2979
2980         tsi_debug(EXIT, &pdev->dev, "enter");
2981
2982         tsi721_disable_ints(priv);
2983         tsi721_dma_stop_all(priv);
2984         pci_clear_master(pdev);
2985         pci_disable_device(pdev);
2986 }
2987
2988 static const struct pci_device_id tsi721_pci_tbl[] = {
2989         { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) },
2990         { 0, }  /* terminate list */
2991 };
2992
2993 MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl);
2994
2995 static struct pci_driver tsi721_driver = {
2996         .name           = "tsi721",
2997         .id_table       = tsi721_pci_tbl,
2998         .probe          = tsi721_probe,
2999         .remove         = tsi721_remove,
3000         .shutdown       = tsi721_shutdown,
3001 };
3002
3003 module_pci_driver(tsi721_driver);
3004
3005 MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver");
3006 MODULE_AUTHOR("Integrated Device Technology, Inc.");
3007 MODULE_LICENSE("GPL");