Merge tag 'iommu-fixes-v5.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / scsi / mvumi.c
1 /*
2  * Marvell UMI driver
3  *
4  * Copyright 2011 Marvell. <jyli@marvell.com>
5  *
6  * This file is licensed under GPLv2.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License as
10  * published by the Free Software Foundation; version 2 of the
11  * License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21  * USA
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/init.h>
28 #include <linux/device.h>
29 #include <linux/pci.h>
30 #include <linux/list.h>
31 #include <linux/spinlock.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/ktime.h>
35 #include <linux/blkdev.h>
36 #include <linux/io.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_transport.h>
42 #include <scsi/scsi_eh.h>
43 #include <linux/uaccess.h>
44 #include <linux/kthread.h>
45
46 #include "mvumi.h"
47
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("jyli@marvell.com");
50 MODULE_DESCRIPTION("Marvell UMI Driver");
51
52 static const struct pci_device_id mvumi_pci_table[] = {
53         { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
54         { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
55         { 0 }
56 };
57
58 MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
59
60 static void tag_init(struct mvumi_tag *st, unsigned short size)
61 {
62         unsigned short i;
63         BUG_ON(size != st->size);
64         st->top = size;
65         for (i = 0; i < size; i++)
66                 st->stack[i] = size - 1 - i;
67 }
68
69 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
70 {
71         BUG_ON(st->top <= 0);
72         return st->stack[--st->top];
73 }
74
75 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
76                                                         unsigned short tag)
77 {
78         BUG_ON(st->top >= st->size);
79         st->stack[st->top++] = tag;
80 }
81
82 static bool tag_is_empty(struct mvumi_tag *st)
83 {
84         if (st->top == 0)
85                 return 1;
86         else
87                 return 0;
88 }
89
90 static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
91 {
92         int i;
93
94         for (i = 0; i < MAX_BASE_ADDRESS; i++)
95                 if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
96                                                                 addr_array[i])
97                         pci_iounmap(dev, addr_array[i]);
98 }
99
100 static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
101 {
102         int i;
103
104         for (i = 0; i < MAX_BASE_ADDRESS; i++) {
105                 if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
106                         addr_array[i] = pci_iomap(dev, i, 0);
107                         if (!addr_array[i]) {
108                                 dev_err(&dev->dev, "failed to map Bar[%d]\n",
109                                                                         i);
110                                 mvumi_unmap_pci_addr(dev, addr_array);
111                                 return -ENOMEM;
112                         }
113                 } else
114                         addr_array[i] = NULL;
115
116                 dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
117         }
118
119         return 0;
120 }
121
122 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
123                                 enum resource_type type, unsigned int size)
124 {
125         struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
126
127         if (!res) {
128                 dev_err(&mhba->pdev->dev,
129                         "Failed to allocate memory for resource manager.\n");
130                 return NULL;
131         }
132
133         switch (type) {
134         case RESOURCE_CACHED_MEMORY:
135                 res->virt_addr = kzalloc(size, GFP_ATOMIC);
136                 if (!res->virt_addr) {
137                         dev_err(&mhba->pdev->dev,
138                                 "unable to allocate memory,size = %d.\n", size);
139                         kfree(res);
140                         return NULL;
141                 }
142                 break;
143
144         case RESOURCE_UNCACHED_MEMORY:
145                 size = round_up(size, 8);
146                 res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
147                                                     &res->bus_addr,
148                                                     GFP_KERNEL);
149                 if (!res->virt_addr) {
150                         dev_err(&mhba->pdev->dev,
151                                         "unable to allocate consistent mem,"
152                                                         "size = %d.\n", size);
153                         kfree(res);
154                         return NULL;
155                 }
156                 break;
157
158         default:
159                 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
160                 kfree(res);
161                 return NULL;
162         }
163
164         res->type = type;
165         res->size = size;
166         INIT_LIST_HEAD(&res->entry);
167         list_add_tail(&res->entry, &mhba->res_list);
168
169         return res;
170 }
171
172 static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
173 {
174         struct mvumi_res *res, *tmp;
175
176         list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
177                 switch (res->type) {
178                 case RESOURCE_UNCACHED_MEMORY:
179                         dma_free_coherent(&mhba->pdev->dev, res->size,
180                                                 res->virt_addr, res->bus_addr);
181                         break;
182                 case RESOURCE_CACHED_MEMORY:
183                         kfree(res->virt_addr);
184                         break;
185                 default:
186                         dev_err(&mhba->pdev->dev,
187                                 "unknown resource type %d\n", res->type);
188                         break;
189                 }
190                 list_del(&res->entry);
191                 kfree(res);
192         }
193         mhba->fw_flag &= ~MVUMI_FW_ALLOC;
194 }
195
196 /**
197  * mvumi_make_sgl -     Prepares  SGL
198  * @mhba:               Adapter soft state
199  * @scmd:               SCSI command from the mid-layer
200  * @sgl_p:              SGL to be filled in
201  * @sg_count            return the number of SG elements
202  *
203  * If successful, this function returns 0. otherwise, it returns -1.
204  */
205 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
206                                         void *sgl_p, unsigned char *sg_count)
207 {
208         struct scatterlist *sg;
209         struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
210         unsigned int i;
211         unsigned int sgnum = scsi_sg_count(scmd);
212         dma_addr_t busaddr;
213
214         sg = scsi_sglist(scmd);
215         *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
216                                scmd->sc_data_direction);
217         if (*sg_count > mhba->max_sge) {
218                 dev_err(&mhba->pdev->dev,
219                         "sg count[0x%x] is bigger than max sg[0x%x].\n",
220                         *sg_count, mhba->max_sge);
221                 dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
222                              scmd->sc_data_direction);
223                 return -1;
224         }
225         for (i = 0; i < *sg_count; i++) {
226                 busaddr = sg_dma_address(&sg[i]);
227                 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
228                 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
229                 m_sg->flags = 0;
230                 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
231                 if ((i + 1) == *sg_count)
232                         m_sg->flags |= 1U << mhba->eot_flag;
233
234                 sgd_inc(mhba, m_sg);
235         }
236
237         return 0;
238 }
239
240 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
241                                                         unsigned int size)
242 {
243         struct mvumi_sgl *m_sg;
244         void *virt_addr;
245         dma_addr_t phy_addr;
246
247         if (size == 0)
248                 return 0;
249
250         virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
251                                        GFP_KERNEL);
252         if (!virt_addr)
253                 return -1;
254
255         m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
256         cmd->frame->sg_counts = 1;
257         cmd->data_buf = virt_addr;
258
259         m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
260         m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
261         m_sg->flags = 1U << mhba->eot_flag;
262         sgd_setsz(mhba, m_sg, cpu_to_le32(size));
263
264         return 0;
265 }
266
267 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
268                                 unsigned int buf_size)
269 {
270         struct mvumi_cmd *cmd;
271
272         cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
273         if (!cmd) {
274                 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
275                 return NULL;
276         }
277         INIT_LIST_HEAD(&cmd->queue_pointer);
278
279         cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
280                         &cmd->frame_phys, GFP_KERNEL);
281         if (!cmd->frame) {
282                 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
283                         " frame,size = %d.\n", mhba->ib_max_size);
284                 kfree(cmd);
285                 return NULL;
286         }
287
288         if (buf_size) {
289                 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
290                         dev_err(&mhba->pdev->dev, "failed to allocate memory"
291                                                 " for internal frame\n");
292                         dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
293                                         cmd->frame, cmd->frame_phys);
294                         kfree(cmd);
295                         return NULL;
296                 }
297         } else
298                 cmd->frame->sg_counts = 0;
299
300         return cmd;
301 }
302
303 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
304                                                 struct mvumi_cmd *cmd)
305 {
306         struct mvumi_sgl *m_sg;
307         unsigned int size;
308         dma_addr_t phy_addr;
309
310         if (cmd && cmd->frame) {
311                 if (cmd->frame->sg_counts) {
312                         m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
313                         sgd_getsz(mhba, m_sg, size);
314
315                         phy_addr = (dma_addr_t) m_sg->baseaddr_l |
316                                 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
317
318                         dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
319                                                                 phy_addr);
320                 }
321                 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
322                                 cmd->frame, cmd->frame_phys);
323                 kfree(cmd);
324         }
325 }
326
327 /**
328  * mvumi_get_cmd -      Get a command from the free pool
329  * @mhba:               Adapter soft state
330  *
331  * Returns a free command from the pool
332  */
333 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
334 {
335         struct mvumi_cmd *cmd = NULL;
336
337         if (likely(!list_empty(&mhba->cmd_pool))) {
338                 cmd = list_entry((&mhba->cmd_pool)->next,
339                                 struct mvumi_cmd, queue_pointer);
340                 list_del_init(&cmd->queue_pointer);
341         } else
342                 dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
343
344         return cmd;
345 }
346
347 /**
348  * mvumi_return_cmd -   Return a cmd to free command pool
349  * @mhba:               Adapter soft state
350  * @cmd:                Command packet to be returned to free command pool
351  */
352 static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
353                                                 struct mvumi_cmd *cmd)
354 {
355         cmd->scmd = NULL;
356         list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
357 }
358
359 /**
360  * mvumi_free_cmds -    Free all the cmds in the free cmd pool
361  * @mhba:               Adapter soft state
362  */
363 static void mvumi_free_cmds(struct mvumi_hba *mhba)
364 {
365         struct mvumi_cmd *cmd;
366
367         while (!list_empty(&mhba->cmd_pool)) {
368                 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
369                                                         queue_pointer);
370                 list_del(&cmd->queue_pointer);
371                 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
372                         kfree(cmd->frame);
373                 kfree(cmd);
374         }
375 }
376
377 /**
378  * mvumi_alloc_cmds -   Allocates the command packets
379  * @mhba:               Adapter soft state
380  *
381  */
382 static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
383 {
384         int i;
385         struct mvumi_cmd *cmd;
386
387         for (i = 0; i < mhba->max_io; i++) {
388                 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
389                 if (!cmd)
390                         goto err_exit;
391
392                 INIT_LIST_HEAD(&cmd->queue_pointer);
393                 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
394                 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
395                         cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
396                         cmd->frame_phys = mhba->ib_frame_phys
397                                                 + i * mhba->ib_max_size;
398                 } else
399                         cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
400                 if (!cmd->frame)
401                         goto err_exit;
402         }
403         return 0;
404
405 err_exit:
406         dev_err(&mhba->pdev->dev,
407                         "failed to allocate memory for cmd[0x%x].\n", i);
408         while (!list_empty(&mhba->cmd_pool)) {
409                 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
410                                                 queue_pointer);
411                 list_del(&cmd->queue_pointer);
412                 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
413                         kfree(cmd->frame);
414                 kfree(cmd);
415         }
416         return -ENOMEM;
417 }
418
419 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
420 {
421         unsigned int ib_rp_reg;
422         struct mvumi_hw_regs *regs = mhba->regs;
423
424         ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
425
426         if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
427                         (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
428                         ((ib_rp_reg & regs->cl_pointer_toggle)
429                          != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
430                 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
431                 return 0;
432         }
433         if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
434                 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
435                 return 0;
436         } else {
437                 return mhba->max_io - atomic_read(&mhba->fw_outstanding);
438         }
439 }
440
441 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
442 {
443         unsigned int count;
444         if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
445                 return 0;
446         count = ioread32(mhba->ib_shadow);
447         if (count == 0xffff)
448                 return 0;
449         return count;
450 }
451
452 static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
453 {
454         unsigned int cur_ib_entry;
455
456         cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
457         cur_ib_entry++;
458         if (cur_ib_entry >= mhba->list_num_io) {
459                 cur_ib_entry -= mhba->list_num_io;
460                 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
461         }
462         mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
463         mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
464         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
465                 *ib_entry = mhba->ib_list + cur_ib_entry *
466                                 sizeof(struct mvumi_dyn_list_entry);
467         } else {
468                 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
469         }
470         atomic_inc(&mhba->fw_outstanding);
471 }
472
473 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
474 {
475         iowrite32(0xffff, mhba->ib_shadow);
476         iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
477 }
478
479 static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
480                 unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
481 {
482         unsigned short tag, request_id;
483
484         udelay(1);
485         p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
486         request_id = p_outb_frame->request_id;
487         tag = p_outb_frame->tag;
488         if (tag > mhba->tag_pool.size) {
489                 dev_err(&mhba->pdev->dev, "ob frame data error\n");
490                 return -1;
491         }
492         if (mhba->tag_cmd[tag] == NULL) {
493                 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
494                 return -1;
495         } else if (mhba->tag_cmd[tag]->request_id != request_id &&
496                                                 mhba->request_id_enabled) {
497                         dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
498                                         "cmd request ID:0x%x\n", request_id,
499                                         mhba->tag_cmd[tag]->request_id);
500                         return -1;
501         }
502
503         return 0;
504 }
505
506 static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
507                         unsigned int *cur_obf, unsigned int *assign_obf_end)
508 {
509         unsigned int ob_write, ob_write_shadow;
510         struct mvumi_hw_regs *regs = mhba->regs;
511
512         do {
513                 ob_write = ioread32(regs->outb_copy_pointer);
514                 ob_write_shadow = ioread32(mhba->ob_shadow);
515         } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
516
517         *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
518         *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
519
520         if ((ob_write & regs->cl_pointer_toggle) !=
521                         (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
522                 *assign_obf_end += mhba->list_num_io;
523         }
524         return 0;
525 }
526
527 static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
528                         unsigned int *cur_obf, unsigned int *assign_obf_end)
529 {
530         unsigned int ob_write;
531         struct mvumi_hw_regs *regs = mhba->regs;
532
533         ob_write = ioread32(regs->outb_read_pointer);
534         ob_write = ioread32(regs->outb_copy_pointer);
535         *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
536         *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
537         if (*assign_obf_end < *cur_obf)
538                 *assign_obf_end += mhba->list_num_io;
539         else if (*assign_obf_end == *cur_obf)
540                 return -1;
541         return 0;
542 }
543
544 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
545 {
546         unsigned int cur_obf, assign_obf_end, i;
547         struct mvumi_ob_data *ob_data;
548         struct mvumi_rsp_frame *p_outb_frame;
549         struct mvumi_hw_regs *regs = mhba->regs;
550
551         if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
552                 return;
553
554         for (i = (assign_obf_end - cur_obf); i != 0; i--) {
555                 cur_obf++;
556                 if (cur_obf >= mhba->list_num_io) {
557                         cur_obf -= mhba->list_num_io;
558                         mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
559                 }
560
561                 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
562
563                 /* Copy pointer may point to entry in outbound list
564                 *  before entry has valid data
565                 */
566                 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
567                         mhba->tag_cmd[p_outb_frame->tag] == NULL ||
568                         p_outb_frame->request_id !=
569                                 mhba->tag_cmd[p_outb_frame->tag]->request_id))
570                         if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
571                                 continue;
572
573                 if (!list_empty(&mhba->ob_data_list)) {
574                         ob_data = (struct mvumi_ob_data *)
575                                 list_first_entry(&mhba->ob_data_list,
576                                         struct mvumi_ob_data, list);
577                         list_del_init(&ob_data->list);
578                 } else {
579                         ob_data = NULL;
580                         if (cur_obf == 0) {
581                                 cur_obf = mhba->list_num_io - 1;
582                                 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
583                         } else
584                                 cur_obf -= 1;
585                         break;
586                 }
587
588                 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
589                 p_outb_frame->tag = 0xff;
590
591                 list_add_tail(&ob_data->list, &mhba->free_ob_list);
592         }
593         mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
594         mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
595         iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
596 }
597
598 static void mvumi_reset(struct mvumi_hba *mhba)
599 {
600         struct mvumi_hw_regs *regs = mhba->regs;
601
602         iowrite32(0, regs->enpointa_mask_reg);
603         if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
604                 return;
605
606         iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
607 }
608
609 static unsigned char mvumi_start(struct mvumi_hba *mhba);
610
611 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
612 {
613         mhba->fw_state = FW_STATE_ABORT;
614         mvumi_reset(mhba);
615
616         if (mvumi_start(mhba))
617                 return FAILED;
618         else
619                 return SUCCESS;
620 }
621
622 static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
623 {
624         struct mvumi_hw_regs *regs = mhba->regs;
625         u32 tmp;
626         unsigned long before;
627         before = jiffies;
628
629         iowrite32(0, regs->enpointa_mask_reg);
630         tmp = ioread32(regs->arm_to_pciea_msg1);
631         while (tmp != HANDSHAKE_READYSTATE) {
632                 iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
633                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
634                         dev_err(&mhba->pdev->dev,
635                                 "FW reset failed [0x%x].\n", tmp);
636                         return FAILED;
637                 }
638
639                 msleep(500);
640                 rmb();
641                 tmp = ioread32(regs->arm_to_pciea_msg1);
642         }
643
644         return SUCCESS;
645 }
646
647 static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
648 {
649         unsigned char i;
650
651         for (i = 0; i < MAX_BASE_ADDRESS; i++) {
652                 pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
653                                                 &mhba->pci_base[i]);
654         }
655 }
656
657 static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
658 {
659         unsigned char i;
660
661         for (i = 0; i < MAX_BASE_ADDRESS; i++) {
662                 if (mhba->pci_base[i])
663                         pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
664                                                 mhba->pci_base[i]);
665         }
666 }
667
668 static int mvumi_pci_set_master(struct pci_dev *pdev)
669 {
670         int ret = 0;
671
672         pci_set_master(pdev);
673
674         if (IS_DMA64) {
675                 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
676                         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
677         } else
678                 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
679
680         return ret;
681 }
682
683 static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
684 {
685         mhba->fw_state = FW_STATE_ABORT;
686
687         iowrite32(0, mhba->regs->reset_enable);
688         iowrite32(0xf, mhba->regs->reset_request);
689
690         iowrite32(0x10, mhba->regs->reset_enable);
691         iowrite32(0x10, mhba->regs->reset_request);
692         msleep(100);
693         pci_disable_device(mhba->pdev);
694
695         if (pci_enable_device(mhba->pdev)) {
696                 dev_err(&mhba->pdev->dev, "enable device failed\n");
697                 return FAILED;
698         }
699         if (mvumi_pci_set_master(mhba->pdev)) {
700                 dev_err(&mhba->pdev->dev, "set master failed\n");
701                 return FAILED;
702         }
703         mvumi_restore_bar_addr(mhba);
704         if (mvumi_wait_for_fw(mhba) == FAILED)
705                 return FAILED;
706
707         return mvumi_wait_for_outstanding(mhba);
708 }
709
710 static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
711 {
712         return mvumi_wait_for_outstanding(mhba);
713 }
714
715 static int mvumi_host_reset(struct scsi_cmnd *scmd)
716 {
717         struct mvumi_hba *mhba;
718
719         mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
720
721         scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
722                         scmd->serial_number, scmd->cmnd[0], scmd->retries);
723
724         return mhba->instancet->reset_host(mhba);
725 }
726
727 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
728                                                 struct mvumi_cmd *cmd)
729 {
730         unsigned long flags;
731
732         cmd->cmd_status = REQ_STATUS_PENDING;
733
734         if (atomic_read(&cmd->sync_cmd)) {
735                 dev_err(&mhba->pdev->dev,
736                         "last blocked cmd not finished, sync_cmd = %d\n",
737                                                 atomic_read(&cmd->sync_cmd));
738                 BUG_ON(1);
739                 return -1;
740         }
741         atomic_inc(&cmd->sync_cmd);
742         spin_lock_irqsave(mhba->shost->host_lock, flags);
743         mhba->instancet->fire_cmd(mhba, cmd);
744         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
745
746         wait_event_timeout(mhba->int_cmd_wait_q,
747                 (cmd->cmd_status != REQ_STATUS_PENDING),
748                 MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
749
750         /* command timeout */
751         if (atomic_read(&cmd->sync_cmd)) {
752                 spin_lock_irqsave(mhba->shost->host_lock, flags);
753                 atomic_dec(&cmd->sync_cmd);
754                 if (mhba->tag_cmd[cmd->frame->tag]) {
755                         mhba->tag_cmd[cmd->frame->tag] = 0;
756                         dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
757                                                         cmd->frame->tag);
758                         tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
759                 }
760                 if (!list_empty(&cmd->queue_pointer)) {
761                         dev_warn(&mhba->pdev->dev,
762                                 "TIMEOUT:A internal command doesn't send!\n");
763                         list_del_init(&cmd->queue_pointer);
764                 } else
765                         atomic_dec(&mhba->fw_outstanding);
766
767                 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
768         }
769         return 0;
770 }
771
772 static void mvumi_release_fw(struct mvumi_hba *mhba)
773 {
774         mvumi_free_cmds(mhba);
775         mvumi_release_mem_resource(mhba);
776         mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
777         dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
778                 mhba->handshake_page, mhba->handshake_page_phys);
779         kfree(mhba->regs);
780         pci_release_regions(mhba->pdev);
781 }
782
783 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
784 {
785         struct mvumi_cmd *cmd;
786         struct mvumi_msg_frame *frame;
787         unsigned char device_id, retry = 0;
788         unsigned char bitcount = sizeof(unsigned char) * 8;
789
790         for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
791                 if (!(mhba->target_map[device_id / bitcount] &
792                                 (1 << (device_id % bitcount))))
793                         continue;
794 get_cmd:        cmd = mvumi_create_internal_cmd(mhba, 0);
795                 if (!cmd) {
796                         if (retry++ >= 5) {
797                                 dev_err(&mhba->pdev->dev, "failed to get memory"
798                                         " for internal flush cache cmd for "
799                                         "device %d", device_id);
800                                 retry = 0;
801                                 continue;
802                         } else
803                                 goto get_cmd;
804                 }
805                 cmd->scmd = NULL;
806                 cmd->cmd_status = REQ_STATUS_PENDING;
807                 atomic_set(&cmd->sync_cmd, 0);
808                 frame = cmd->frame;
809                 frame->req_function = CL_FUN_SCSI_CMD;
810                 frame->device_id = device_id;
811                 frame->cmd_flag = CMD_FLAG_NON_DATA;
812                 frame->data_transfer_length = 0;
813                 frame->cdb_length = MAX_COMMAND_SIZE;
814                 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
815                 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
816                 frame->cdb[1] = CDB_CORE_MODULE;
817                 frame->cdb[2] = CDB_CORE_SHUTDOWN;
818
819                 mvumi_issue_blocked_cmd(mhba, cmd);
820                 if (cmd->cmd_status != SAM_STAT_GOOD) {
821                         dev_err(&mhba->pdev->dev,
822                                 "device %d flush cache failed, status=0x%x.\n",
823                                 device_id, cmd->cmd_status);
824                 }
825
826                 mvumi_delete_internal_cmd(mhba, cmd);
827         }
828         return 0;
829 }
830
831 static unsigned char
832 mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
833                                                         unsigned short len)
834 {
835         unsigned char *ptr;
836         unsigned char ret = 0, i;
837
838         ptr = (unsigned char *) p_header->frame_content;
839         for (i = 0; i < len; i++) {
840                 ret ^= *ptr;
841                 ptr++;
842         }
843
844         return ret;
845 }
846
847 static void mvumi_hs_build_page(struct mvumi_hba *mhba,
848                                 struct mvumi_hs_header *hs_header)
849 {
850         struct mvumi_hs_page2 *hs_page2;
851         struct mvumi_hs_page4 *hs_page4;
852         struct mvumi_hs_page3 *hs_page3;
853         u64 time;
854         u64 local_time;
855
856         switch (hs_header->page_code) {
857         case HS_PAGE_HOST_INFO:
858                 hs_page2 = (struct mvumi_hs_page2 *) hs_header;
859                 hs_header->frame_length = sizeof(*hs_page2) - 4;
860                 memset(hs_header->frame_content, 0, hs_header->frame_length);
861                 hs_page2->host_type = 3; /* 3 mean linux*/
862                 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
863                         hs_page2->host_cap = 0x08;/* host dynamic source mode */
864                 hs_page2->host_ver.ver_major = VER_MAJOR;
865                 hs_page2->host_ver.ver_minor = VER_MINOR;
866                 hs_page2->host_ver.ver_oem = VER_OEM;
867                 hs_page2->host_ver.ver_build = VER_BUILD;
868                 hs_page2->system_io_bus = 0;
869                 hs_page2->slot_number = 0;
870                 hs_page2->intr_level = 0;
871                 hs_page2->intr_vector = 0;
872                 time = ktime_get_real_seconds();
873                 local_time = (time - (sys_tz.tz_minuteswest * 60));
874                 hs_page2->seconds_since1970 = local_time;
875                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
876                                                 hs_header->frame_length);
877                 break;
878
879         case HS_PAGE_FIRM_CTL:
880                 hs_page3 = (struct mvumi_hs_page3 *) hs_header;
881                 hs_header->frame_length = sizeof(*hs_page3) - 4;
882                 memset(hs_header->frame_content, 0, hs_header->frame_length);
883                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
884                                                 hs_header->frame_length);
885                 break;
886
887         case HS_PAGE_CL_INFO:
888                 hs_page4 = (struct mvumi_hs_page4 *) hs_header;
889                 hs_header->frame_length = sizeof(*hs_page4) - 4;
890                 memset(hs_header->frame_content, 0, hs_header->frame_length);
891                 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
892                 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
893
894                 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
895                 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
896                 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
897                 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
898                 if (mhba->hba_capability
899                         & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
900                         hs_page4->ob_depth = find_first_bit((unsigned long *)
901                                                             &mhba->list_num_io,
902                                                             BITS_PER_LONG);
903                         hs_page4->ib_depth = find_first_bit((unsigned long *)
904                                                             &mhba->list_num_io,
905                                                             BITS_PER_LONG);
906                 } else {
907                         hs_page4->ob_depth = (u8) mhba->list_num_io;
908                         hs_page4->ib_depth = (u8) mhba->list_num_io;
909                 }
910                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
911                                                 hs_header->frame_length);
912                 break;
913
914         default:
915                 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
916                         hs_header->page_code);
917                 break;
918         }
919 }
920
921 /**
922  * mvumi_init_data -    Initialize requested date for FW
923  * @mhba:                       Adapter soft state
924  */
925 static int mvumi_init_data(struct mvumi_hba *mhba)
926 {
927         struct mvumi_ob_data *ob_pool;
928         struct mvumi_res *res_mgnt;
929         unsigned int tmp_size, offset, i;
930         void *virmem, *v;
931         dma_addr_t p;
932
933         if (mhba->fw_flag & MVUMI_FW_ALLOC)
934                 return 0;
935
936         tmp_size = mhba->ib_max_size * mhba->max_io;
937         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
938                 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
939
940         tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
941         tmp_size += 8 + sizeof(u32)*2 + 16;
942
943         res_mgnt = mvumi_alloc_mem_resource(mhba,
944                                         RESOURCE_UNCACHED_MEMORY, tmp_size);
945         if (!res_mgnt) {
946                 dev_err(&mhba->pdev->dev,
947                         "failed to allocate memory for inbound list\n");
948                 goto fail_alloc_dma_buf;
949         }
950
951         p = res_mgnt->bus_addr;
952         v = res_mgnt->virt_addr;
953         /* ib_list */
954         offset = round_up(p, 128) - p;
955         p += offset;
956         v += offset;
957         mhba->ib_list = v;
958         mhba->ib_list_phys = p;
959         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
960                 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
961                 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
962                 mhba->ib_frame = v;
963                 mhba->ib_frame_phys = p;
964         }
965         v += mhba->ib_max_size * mhba->max_io;
966         p += mhba->ib_max_size * mhba->max_io;
967
968         /* ib shadow */
969         offset = round_up(p, 8) - p;
970         p += offset;
971         v += offset;
972         mhba->ib_shadow = v;
973         mhba->ib_shadow_phys = p;
974         p += sizeof(u32)*2;
975         v += sizeof(u32)*2;
976         /* ob shadow */
977         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
978                 offset = round_up(p, 8) - p;
979                 p += offset;
980                 v += offset;
981                 mhba->ob_shadow = v;
982                 mhba->ob_shadow_phys = p;
983                 p += 8;
984                 v += 8;
985         } else {
986                 offset = round_up(p, 4) - p;
987                 p += offset;
988                 v += offset;
989                 mhba->ob_shadow = v;
990                 mhba->ob_shadow_phys = p;
991                 p += 4;
992                 v += 4;
993         }
994
995         /* ob list */
996         offset = round_up(p, 128) - p;
997         p += offset;
998         v += offset;
999
1000         mhba->ob_list = v;
1001         mhba->ob_list_phys = p;
1002
1003         /* ob data pool */
1004         tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
1005         tmp_size = round_up(tmp_size, 8);
1006
1007         res_mgnt = mvumi_alloc_mem_resource(mhba,
1008                                 RESOURCE_CACHED_MEMORY, tmp_size);
1009         if (!res_mgnt) {
1010                 dev_err(&mhba->pdev->dev,
1011                         "failed to allocate memory for outbound data buffer\n");
1012                 goto fail_alloc_dma_buf;
1013         }
1014         virmem = res_mgnt->virt_addr;
1015
1016         for (i = mhba->max_io; i != 0; i--) {
1017                 ob_pool = (struct mvumi_ob_data *) virmem;
1018                 list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1019                 virmem += mhba->ob_max_size + sizeof(*ob_pool);
1020         }
1021
1022         tmp_size = sizeof(unsigned short) * mhba->max_io +
1023                                 sizeof(struct mvumi_cmd *) * mhba->max_io;
1024         tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1025                                                 (sizeof(unsigned char) * 8);
1026
1027         res_mgnt = mvumi_alloc_mem_resource(mhba,
1028                                 RESOURCE_CACHED_MEMORY, tmp_size);
1029         if (!res_mgnt) {
1030                 dev_err(&mhba->pdev->dev,
1031                         "failed to allocate memory for tag and target map\n");
1032                 goto fail_alloc_dma_buf;
1033         }
1034
1035         virmem = res_mgnt->virt_addr;
1036         mhba->tag_pool.stack = virmem;
1037         mhba->tag_pool.size = mhba->max_io;
1038         tag_init(&mhba->tag_pool, mhba->max_io);
1039         virmem += sizeof(unsigned short) * mhba->max_io;
1040
1041         mhba->tag_cmd = virmem;
1042         virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1043
1044         mhba->target_map = virmem;
1045
1046         mhba->fw_flag |= MVUMI_FW_ALLOC;
1047         return 0;
1048
1049 fail_alloc_dma_buf:
1050         mvumi_release_mem_resource(mhba);
1051         return -1;
1052 }
1053
1054 static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1055                                 struct mvumi_hs_header *hs_header)
1056 {
1057         struct mvumi_hs_page1 *hs_page1;
1058         unsigned char page_checksum;
1059
1060         page_checksum = mvumi_calculate_checksum(hs_header,
1061                                                 hs_header->frame_length);
1062         if (page_checksum != hs_header->checksum) {
1063                 dev_err(&mhba->pdev->dev, "checksum error\n");
1064                 return -1;
1065         }
1066
1067         switch (hs_header->page_code) {
1068         case HS_PAGE_FIRM_CAP:
1069                 hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1070
1071                 mhba->max_io = hs_page1->max_io_support;
1072                 mhba->list_num_io = hs_page1->cl_inout_list_depth;
1073                 mhba->max_transfer_size = hs_page1->max_transfer_size;
1074                 mhba->max_target_id = hs_page1->max_devices_support;
1075                 mhba->hba_capability = hs_page1->capability;
1076                 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1077                 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1078
1079                 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1080                 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1081
1082                 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1083                                                 hs_page1->fw_ver.ver_build);
1084
1085                 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1086                         mhba->eot_flag = 22;
1087                 else
1088                         mhba->eot_flag = 27;
1089                 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1090                         mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1091                 break;
1092         default:
1093                 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1094                 return -1;
1095         }
1096         return 0;
1097 }
1098
1099 /**
1100  * mvumi_handshake -    Move the FW to READY state
1101  * @mhba:                               Adapter soft state
1102  *
1103  * During the initialization, FW passes can potentially be in any one of
1104  * several possible states. If the FW in operational, waiting-for-handshake
1105  * states, driver must take steps to bring it to ready state. Otherwise, it
1106  * has to wait for the ready state.
1107  */
1108 static int mvumi_handshake(struct mvumi_hba *mhba)
1109 {
1110         unsigned int hs_state, tmp, hs_fun;
1111         struct mvumi_hs_header *hs_header;
1112         struct mvumi_hw_regs *regs = mhba->regs;
1113
1114         if (mhba->fw_state == FW_STATE_STARTING)
1115                 hs_state = HS_S_START;
1116         else {
1117                 tmp = ioread32(regs->arm_to_pciea_msg0);
1118                 hs_state = HS_GET_STATE(tmp);
1119                 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1120                 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1121                         mhba->fw_state = FW_STATE_STARTING;
1122                         return -1;
1123                 }
1124         }
1125
1126         hs_fun = 0;
1127         switch (hs_state) {
1128         case HS_S_START:
1129                 mhba->fw_state = FW_STATE_HANDSHAKING;
1130                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1131                 HS_SET_STATE(hs_fun, HS_S_RESET);
1132                 iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1133                 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1134                 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1135                 break;
1136
1137         case HS_S_RESET:
1138                 iowrite32(lower_32_bits(mhba->handshake_page_phys),
1139                                         regs->pciea_to_arm_msg1);
1140                 iowrite32(upper_32_bits(mhba->handshake_page_phys),
1141                                         regs->arm_to_pciea_msg1);
1142                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1143                 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1144                 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1145                 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1146                 break;
1147
1148         case HS_S_PAGE_ADDR:
1149         case HS_S_QUERY_PAGE:
1150         case HS_S_SEND_PAGE:
1151                 hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1152                 if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1153                         mhba->hba_total_pages =
1154                         ((struct mvumi_hs_page1 *) hs_header)->total_pages;
1155
1156                         if (mhba->hba_total_pages == 0)
1157                                 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1158                 }
1159
1160                 if (hs_state == HS_S_QUERY_PAGE) {
1161                         if (mvumi_hs_process_page(mhba, hs_header)) {
1162                                 HS_SET_STATE(hs_fun, HS_S_ABORT);
1163                                 return -1;
1164                         }
1165                         if (mvumi_init_data(mhba)) {
1166                                 HS_SET_STATE(hs_fun, HS_S_ABORT);
1167                                 return -1;
1168                         }
1169                 } else if (hs_state == HS_S_PAGE_ADDR) {
1170                         hs_header->page_code = 0;
1171                         mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1172                 }
1173
1174                 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1175                         hs_header->page_code++;
1176                         if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1177                                 mvumi_hs_build_page(mhba, hs_header);
1178                                 HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1179                         } else
1180                                 HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1181                 } else
1182                         HS_SET_STATE(hs_fun, HS_S_END);
1183
1184                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1185                 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1186                 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1187                 break;
1188
1189         case HS_S_END:
1190                 /* Set communication list ISR */
1191                 tmp = ioread32(regs->enpointa_mask_reg);
1192                 tmp |= regs->int_comaout | regs->int_comaerr;
1193                 iowrite32(tmp, regs->enpointa_mask_reg);
1194                 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1195                 /* Set InBound List Available count shadow */
1196                 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1197                                         regs->inb_aval_count_basel);
1198                 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1199                                         regs->inb_aval_count_baseh);
1200
1201                 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1202                         /* Set OutBound List Available count shadow */
1203                         iowrite32((mhba->list_num_io-1) |
1204                                                         regs->cl_pointer_toggle,
1205                                                         mhba->ob_shadow);
1206                         iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1207                                                         regs->outb_copy_basel);
1208                         iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1209                                                         regs->outb_copy_baseh);
1210                 }
1211
1212                 mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1213                                                         regs->cl_pointer_toggle;
1214                 mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1215                                                         regs->cl_pointer_toggle;
1216                 mhba->fw_state = FW_STATE_STARTED;
1217
1218                 break;
1219         default:
1220                 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1221                                                                 hs_state);
1222                 return -1;
1223         }
1224         return 0;
1225 }
1226
1227 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1228 {
1229         unsigned int isr_status;
1230         unsigned long before;
1231
1232         before = jiffies;
1233         mvumi_handshake(mhba);
1234         do {
1235                 isr_status = mhba->instancet->read_fw_status_reg(mhba);
1236
1237                 if (mhba->fw_state == FW_STATE_STARTED)
1238                         return 0;
1239                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1240                         dev_err(&mhba->pdev->dev,
1241                                 "no handshake response at state 0x%x.\n",
1242                                   mhba->fw_state);
1243                         dev_err(&mhba->pdev->dev,
1244                                 "isr : global=0x%x,status=0x%x.\n",
1245                                         mhba->global_isr, isr_status);
1246                         return -1;
1247                 }
1248                 rmb();
1249                 usleep_range(1000, 2000);
1250         } while (!(isr_status & DRBL_HANDSHAKE_ISR));
1251
1252         return 0;
1253 }
1254
1255 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1256 {
1257         unsigned int tmp;
1258         unsigned long before;
1259
1260         before = jiffies;
1261         tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1262         while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1263                 if (tmp != HANDSHAKE_READYSTATE)
1264                         iowrite32(DRBL_MU_RESET,
1265                                         mhba->regs->pciea_to_arm_drbl_reg);
1266                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1267                         dev_err(&mhba->pdev->dev,
1268                                 "invalid signature [0x%x].\n", tmp);
1269                         return -1;
1270                 }
1271                 usleep_range(1000, 2000);
1272                 rmb();
1273                 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1274         }
1275
1276         mhba->fw_state = FW_STATE_STARTING;
1277         dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1278         do {
1279                 if (mvumi_handshake_event(mhba)) {
1280                         dev_err(&mhba->pdev->dev,
1281                                         "handshake failed at state 0x%x.\n",
1282                                                 mhba->fw_state);
1283                         return -1;
1284                 }
1285         } while (mhba->fw_state != FW_STATE_STARTED);
1286
1287         dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1288
1289         return 0;
1290 }
1291
1292 static unsigned char mvumi_start(struct mvumi_hba *mhba)
1293 {
1294         unsigned int tmp;
1295         struct mvumi_hw_regs *regs = mhba->regs;
1296
1297         /* clear Door bell */
1298         tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1299         iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1300
1301         iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1302         tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1303         iowrite32(tmp, regs->enpointa_mask_reg);
1304         msleep(100);
1305         if (mvumi_check_handshake(mhba))
1306                 return -1;
1307
1308         return 0;
1309 }
1310
1311 /**
1312  * mvumi_complete_cmd - Completes a command
1313  * @mhba:                       Adapter soft state
1314  * @cmd:                        Command to be completed
1315  */
1316 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1317                                         struct mvumi_rsp_frame *ob_frame)
1318 {
1319         struct scsi_cmnd *scmd = cmd->scmd;
1320
1321         cmd->scmd->SCp.ptr = NULL;
1322         scmd->result = ob_frame->req_status;
1323
1324         switch (ob_frame->req_status) {
1325         case SAM_STAT_GOOD:
1326                 scmd->result |= DID_OK << 16;
1327                 break;
1328         case SAM_STAT_BUSY:
1329                 scmd->result |= DID_BUS_BUSY << 16;
1330                 break;
1331         case SAM_STAT_CHECK_CONDITION:
1332                 scmd->result |= (DID_OK << 16);
1333                 if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1334                         memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1335                                 sizeof(struct mvumi_sense_data));
1336                         scmd->result |=  (DRIVER_SENSE << 24);
1337                 }
1338                 break;
1339         default:
1340                 scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1341                 break;
1342         }
1343
1344         if (scsi_bufflen(scmd))
1345                 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
1346                              scsi_sg_count(scmd),
1347                              scmd->sc_data_direction);
1348         cmd->scmd->scsi_done(scmd);
1349         mvumi_return_cmd(mhba, cmd);
1350 }
1351
1352 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1353                                                 struct mvumi_cmd *cmd,
1354                                         struct mvumi_rsp_frame *ob_frame)
1355 {
1356         if (atomic_read(&cmd->sync_cmd)) {
1357                 cmd->cmd_status = ob_frame->req_status;
1358
1359                 if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1360                                 (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1361                                 cmd->data_buf) {
1362                         memcpy(cmd->data_buf, ob_frame->payload,
1363                                         sizeof(struct mvumi_sense_data));
1364                 }
1365                 atomic_dec(&cmd->sync_cmd);
1366                 wake_up(&mhba->int_cmd_wait_q);
1367         }
1368 }
1369
1370 static void mvumi_show_event(struct mvumi_hba *mhba,
1371                         struct mvumi_driver_event *ptr)
1372 {
1373         unsigned int i;
1374
1375         dev_warn(&mhba->pdev->dev,
1376                 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1377                 ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1378         if (ptr->param_count) {
1379                 printk(KERN_WARNING "Event param(len 0x%x): ",
1380                                                 ptr->param_count);
1381                 for (i = 0; i < ptr->param_count; i++)
1382                         printk(KERN_WARNING "0x%x ", ptr->params[i]);
1383
1384                 printk(KERN_WARNING "\n");
1385         }
1386
1387         if (ptr->sense_data_length) {
1388                 printk(KERN_WARNING "Event sense data(len 0x%x): ",
1389                                                 ptr->sense_data_length);
1390                 for (i = 0; i < ptr->sense_data_length; i++)
1391                         printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1392                 printk(KERN_WARNING "\n");
1393         }
1394 }
1395
1396 static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1397 {
1398         struct scsi_device *sdev;
1399         int ret = -1;
1400
1401         if (status == DEVICE_OFFLINE) {
1402                 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1403                 if (sdev) {
1404                         dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1405                                                                 sdev->id, 0);
1406                         scsi_remove_device(sdev);
1407                         scsi_device_put(sdev);
1408                         ret = 0;
1409                 } else
1410                         dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1411                                                                         devid);
1412         } else if (status == DEVICE_ONLINE) {
1413                 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1414                 if (!sdev) {
1415                         scsi_add_device(mhba->shost, 0, devid, 0);
1416                         dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1417                                                                 devid, 0);
1418                         ret = 0;
1419                 } else {
1420                         dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1421                                                                 0, devid, 0);
1422                         scsi_device_put(sdev);
1423                 }
1424         }
1425         return ret;
1426 }
1427
1428 static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1429         unsigned int id, struct mvumi_cmd *cmd)
1430 {
1431         struct mvumi_msg_frame *frame;
1432         u64 wwid = 0;
1433         int cmd_alloc = 0;
1434         int data_buf_len = 64;
1435
1436         if (!cmd) {
1437                 cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1438                 if (cmd)
1439                         cmd_alloc = 1;
1440                 else
1441                         return 0;
1442         } else {
1443                 memset(cmd->data_buf, 0, data_buf_len);
1444         }
1445         cmd->scmd = NULL;
1446         cmd->cmd_status = REQ_STATUS_PENDING;
1447         atomic_set(&cmd->sync_cmd, 0);
1448         frame = cmd->frame;
1449         frame->device_id = (u16) id;
1450         frame->cmd_flag = CMD_FLAG_DATA_IN;
1451         frame->req_function = CL_FUN_SCSI_CMD;
1452         frame->cdb_length = 6;
1453         frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1454         memset(frame->cdb, 0, frame->cdb_length);
1455         frame->cdb[0] = INQUIRY;
1456         frame->cdb[4] = frame->data_transfer_length;
1457
1458         mvumi_issue_blocked_cmd(mhba, cmd);
1459
1460         if (cmd->cmd_status == SAM_STAT_GOOD) {
1461                 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1462                         wwid = id + 1;
1463                 else
1464                         memcpy((void *)&wwid,
1465                                (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1466                                MVUMI_INQUIRY_UUID_LEN);
1467                 dev_dbg(&mhba->pdev->dev,
1468                         "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1469         } else {
1470                 wwid = 0;
1471         }
1472         if (cmd_alloc)
1473                 mvumi_delete_internal_cmd(mhba, cmd);
1474
1475         return wwid;
1476 }
1477
1478 static void mvumi_detach_devices(struct mvumi_hba *mhba)
1479 {
1480         struct mvumi_device *mv_dev = NULL , *dev_next;
1481         struct scsi_device *sdev = NULL;
1482
1483         mutex_lock(&mhba->device_lock);
1484
1485         /* detach Hard Disk */
1486         list_for_each_entry_safe(mv_dev, dev_next,
1487                 &mhba->shost_dev_list, list) {
1488                 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1489                 list_del_init(&mv_dev->list);
1490                 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1491                         mv_dev->id, mv_dev->wwid);
1492                 kfree(mv_dev);
1493         }
1494         list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1495                 list_del_init(&mv_dev->list);
1496                 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1497                         mv_dev->id, mv_dev->wwid);
1498                 kfree(mv_dev);
1499         }
1500
1501         /* detach virtual device */
1502         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1503                 sdev = scsi_device_lookup(mhba->shost, 0,
1504                                                 mhba->max_target_id - 1, 0);
1505
1506         if (sdev) {
1507                 scsi_remove_device(sdev);
1508                 scsi_device_put(sdev);
1509         }
1510
1511         mutex_unlock(&mhba->device_lock);
1512 }
1513
1514 static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1515 {
1516         struct scsi_device *sdev;
1517
1518         sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1519         if (sdev) {
1520                 scsi_rescan_device(&sdev->sdev_gendev);
1521                 scsi_device_put(sdev);
1522         }
1523 }
1524
1525 static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1526 {
1527         struct mvumi_device *mv_dev = NULL;
1528
1529         list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1530                 if (mv_dev->wwid == wwid) {
1531                         if (mv_dev->id != id) {
1532                                 dev_err(&mhba->pdev->dev,
1533                                         "%s has same wwid[%llx] ,"
1534                                         " but different id[%d %d]\n",
1535                                         __func__, mv_dev->wwid, mv_dev->id, id);
1536                                 return -1;
1537                         } else {
1538                                 if (mhba->pdev->device ==
1539                                                 PCI_DEVICE_ID_MARVELL_MV9143)
1540                                         mvumi_rescan_devices(mhba, id);
1541                                 return 1;
1542                         }
1543                 }
1544         }
1545         return 0;
1546 }
1547
1548 static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1549 {
1550         struct mvumi_device *mv_dev = NULL, *dev_next;
1551
1552         list_for_each_entry_safe(mv_dev, dev_next,
1553                                 &mhba->shost_dev_list, list) {
1554                 if (mv_dev->id == id) {
1555                         dev_dbg(&mhba->pdev->dev,
1556                                 "detach device(0:%d:0) wwid(%llx) from HOST\n",
1557                                 mv_dev->id, mv_dev->wwid);
1558                         mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1559                         list_del_init(&mv_dev->list);
1560                         kfree(mv_dev);
1561                 }
1562         }
1563 }
1564
1565 static int mvumi_probe_devices(struct mvumi_hba *mhba)
1566 {
1567         int id, maxid;
1568         u64 wwid = 0;
1569         struct mvumi_device *mv_dev = NULL;
1570         struct mvumi_cmd *cmd = NULL;
1571         int found = 0;
1572
1573         cmd = mvumi_create_internal_cmd(mhba, 64);
1574         if (!cmd)
1575                 return -1;
1576
1577         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1578                 maxid = mhba->max_target_id;
1579         else
1580                 maxid = mhba->max_target_id - 1;
1581
1582         for (id = 0; id < maxid; id++) {
1583                 wwid = mvumi_inquiry(mhba, id, cmd);
1584                 if (!wwid) {
1585                         /* device no response, remove it */
1586                         mvumi_remove_devices(mhba, id);
1587                 } else {
1588                         /* device response, add it */
1589                         found = mvumi_match_devices(mhba, id, wwid);
1590                         if (!found) {
1591                                 mvumi_remove_devices(mhba, id);
1592                                 mv_dev = kzalloc(sizeof(struct mvumi_device),
1593                                                                 GFP_KERNEL);
1594                                 if (!mv_dev) {
1595                                         dev_err(&mhba->pdev->dev,
1596                                                 "%s alloc mv_dev failed\n",
1597                                                 __func__);
1598                                         continue;
1599                                 }
1600                                 mv_dev->id = id;
1601                                 mv_dev->wwid = wwid;
1602                                 mv_dev->sdev = NULL;
1603                                 INIT_LIST_HEAD(&mv_dev->list);
1604                                 list_add_tail(&mv_dev->list,
1605                                               &mhba->mhba_dev_list);
1606                                 dev_dbg(&mhba->pdev->dev,
1607                                         "probe a new device(0:%d:0)"
1608                                         " wwid(%llx)\n", id, mv_dev->wwid);
1609                         } else if (found == -1)
1610                                 return -1;
1611                         else
1612                                 continue;
1613                 }
1614         }
1615
1616         if (cmd)
1617                 mvumi_delete_internal_cmd(mhba, cmd);
1618
1619         return 0;
1620 }
1621
1622 static int mvumi_rescan_bus(void *data)
1623 {
1624         int ret = 0;
1625         struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1626         struct mvumi_device *mv_dev = NULL , *dev_next;
1627
1628         while (!kthread_should_stop()) {
1629
1630                 set_current_state(TASK_INTERRUPTIBLE);
1631                 if (!atomic_read(&mhba->pnp_count))
1632                         schedule();
1633                 msleep(1000);
1634                 atomic_set(&mhba->pnp_count, 0);
1635                 __set_current_state(TASK_RUNNING);
1636
1637                 mutex_lock(&mhba->device_lock);
1638                 ret = mvumi_probe_devices(mhba);
1639                 if (!ret) {
1640                         list_for_each_entry_safe(mv_dev, dev_next,
1641                                                  &mhba->mhba_dev_list, list) {
1642                                 if (mvumi_handle_hotplug(mhba, mv_dev->id,
1643                                                          DEVICE_ONLINE)) {
1644                                         dev_err(&mhba->pdev->dev,
1645                                                 "%s add device(0:%d:0) failed"
1646                                                 "wwid(%llx) has exist\n",
1647                                                 __func__,
1648                                                 mv_dev->id, mv_dev->wwid);
1649                                         list_del_init(&mv_dev->list);
1650                                         kfree(mv_dev);
1651                                 } else {
1652                                         list_move_tail(&mv_dev->list,
1653                                                        &mhba->shost_dev_list);
1654                                 }
1655                         }
1656                 }
1657                 mutex_unlock(&mhba->device_lock);
1658         }
1659         return 0;
1660 }
1661
1662 static void mvumi_proc_msg(struct mvumi_hba *mhba,
1663                                         struct mvumi_hotplug_event *param)
1664 {
1665         u16 size = param->size;
1666         const unsigned long *ar_bitmap;
1667         const unsigned long *re_bitmap;
1668         int index;
1669
1670         if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1671                 index = -1;
1672                 ar_bitmap = (const unsigned long *) param->bitmap;
1673                 re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1674
1675                 mutex_lock(&mhba->sas_discovery_mutex);
1676                 do {
1677                         index = find_next_zero_bit(ar_bitmap, size, index + 1);
1678                         if (index >= size)
1679                                 break;
1680                         mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1681                 } while (1);
1682
1683                 index = -1;
1684                 do {
1685                         index = find_next_zero_bit(re_bitmap, size, index + 1);
1686                         if (index >= size)
1687                                 break;
1688                         mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1689                 } while (1);
1690                 mutex_unlock(&mhba->sas_discovery_mutex);
1691         }
1692 }
1693
1694 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1695 {
1696         if (msg == APICDB1_EVENT_GETEVENT) {
1697                 int i, count;
1698                 struct mvumi_driver_event *param = NULL;
1699                 struct mvumi_event_req *er = buffer;
1700                 count = er->count;
1701                 if (count > MAX_EVENTS_RETURNED) {
1702                         dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1703                                         " than max event count[0x%x].\n",
1704                                         count, MAX_EVENTS_RETURNED);
1705                         return;
1706                 }
1707                 for (i = 0; i < count; i++) {
1708                         param = &er->events[i];
1709                         mvumi_show_event(mhba, param);
1710                 }
1711         } else if (msg == APICDB1_HOST_GETEVENT) {
1712                 mvumi_proc_msg(mhba, buffer);
1713         }
1714 }
1715
1716 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1717 {
1718         struct mvumi_cmd *cmd;
1719         struct mvumi_msg_frame *frame;
1720
1721         cmd = mvumi_create_internal_cmd(mhba, 512);
1722         if (!cmd)
1723                 return -1;
1724         cmd->scmd = NULL;
1725         cmd->cmd_status = REQ_STATUS_PENDING;
1726         atomic_set(&cmd->sync_cmd, 0);
1727         frame = cmd->frame;
1728         frame->device_id = 0;
1729         frame->cmd_flag = CMD_FLAG_DATA_IN;
1730         frame->req_function = CL_FUN_SCSI_CMD;
1731         frame->cdb_length = MAX_COMMAND_SIZE;
1732         frame->data_transfer_length = sizeof(struct mvumi_event_req);
1733         memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1734         frame->cdb[0] = APICDB0_EVENT;
1735         frame->cdb[1] = msg;
1736         mvumi_issue_blocked_cmd(mhba, cmd);
1737
1738         if (cmd->cmd_status != SAM_STAT_GOOD)
1739                 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1740                                                         cmd->cmd_status);
1741         else
1742                 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1743
1744         mvumi_delete_internal_cmd(mhba, cmd);
1745         return 0;
1746 }
1747
1748 static void mvumi_scan_events(struct work_struct *work)
1749 {
1750         struct mvumi_events_wq *mu_ev =
1751                 container_of(work, struct mvumi_events_wq, work_q);
1752
1753         mvumi_get_event(mu_ev->mhba, mu_ev->event);
1754         kfree(mu_ev);
1755 }
1756
1757 static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1758 {
1759         struct mvumi_events_wq *mu_ev;
1760
1761         while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1762                 if (isr_status & DRBL_BUS_CHANGE) {
1763                         atomic_inc(&mhba->pnp_count);
1764                         wake_up_process(mhba->dm_thread);
1765                         isr_status &= ~(DRBL_BUS_CHANGE);
1766                         continue;
1767                 }
1768
1769                 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1770                 if (mu_ev) {
1771                         INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1772                         mu_ev->mhba = mhba;
1773                         mu_ev->event = APICDB1_EVENT_GETEVENT;
1774                         isr_status &= ~(DRBL_EVENT_NOTIFY);
1775                         mu_ev->param = NULL;
1776                         schedule_work(&mu_ev->work_q);
1777                 }
1778         }
1779 }
1780
1781 static void mvumi_handle_clob(struct mvumi_hba *mhba)
1782 {
1783         struct mvumi_rsp_frame *ob_frame;
1784         struct mvumi_cmd *cmd;
1785         struct mvumi_ob_data *pool;
1786
1787         while (!list_empty(&mhba->free_ob_list)) {
1788                 pool = list_first_entry(&mhba->free_ob_list,
1789                                                 struct mvumi_ob_data, list);
1790                 list_del_init(&pool->list);
1791                 list_add_tail(&pool->list, &mhba->ob_data_list);
1792
1793                 ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1794                 cmd = mhba->tag_cmd[ob_frame->tag];
1795
1796                 atomic_dec(&mhba->fw_outstanding);
1797                 mhba->tag_cmd[ob_frame->tag] = 0;
1798                 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1799                 if (cmd->scmd)
1800                         mvumi_complete_cmd(mhba, cmd, ob_frame);
1801                 else
1802                         mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1803         }
1804         mhba->instancet->fire_cmd(mhba, NULL);
1805 }
1806
1807 static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1808 {
1809         struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1810         unsigned long flags;
1811
1812         spin_lock_irqsave(mhba->shost->host_lock, flags);
1813         if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1814                 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1815                 return IRQ_NONE;
1816         }
1817
1818         if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1819                 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1820                         mvumi_launch_events(mhba, mhba->isr_status);
1821                 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1822                         dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1823                         mvumi_handshake(mhba);
1824                 }
1825
1826         }
1827
1828         if (mhba->global_isr & mhba->regs->int_comaout)
1829                 mvumi_receive_ob_list_entry(mhba);
1830
1831         mhba->global_isr = 0;
1832         mhba->isr_status = 0;
1833         if (mhba->fw_state == FW_STATE_STARTED)
1834                 mvumi_handle_clob(mhba);
1835         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1836         return IRQ_HANDLED;
1837 }
1838
1839 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1840                                                 struct mvumi_cmd *cmd)
1841 {
1842         void *ib_entry;
1843         struct mvumi_msg_frame *ib_frame;
1844         unsigned int frame_len;
1845
1846         ib_frame = cmd->frame;
1847         if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1848                 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1849                 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1850         }
1851         if (tag_is_empty(&mhba->tag_pool)) {
1852                 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1853                 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1854         }
1855         mvumi_get_ib_list_entry(mhba, &ib_entry);
1856
1857         cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1858         cmd->frame->request_id = mhba->io_seq++;
1859         cmd->request_id = cmd->frame->request_id;
1860         mhba->tag_cmd[cmd->frame->tag] = cmd;
1861         frame_len = sizeof(*ib_frame) - 4 +
1862                                 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1863         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1864                 struct mvumi_dyn_list_entry *dle;
1865                 dle = ib_entry;
1866                 dle->src_low_addr =
1867                         cpu_to_le32(lower_32_bits(cmd->frame_phys));
1868                 dle->src_high_addr =
1869                         cpu_to_le32(upper_32_bits(cmd->frame_phys));
1870                 dle->if_length = (frame_len >> 2) & 0xFFF;
1871         } else {
1872                 memcpy(ib_entry, ib_frame, frame_len);
1873         }
1874         return MV_QUEUE_COMMAND_RESULT_SENT;
1875 }
1876
1877 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1878 {
1879         unsigned short num_of_cl_sent = 0;
1880         unsigned int count;
1881         enum mvumi_qc_result result;
1882
1883         if (cmd)
1884                 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1885         count = mhba->instancet->check_ib_list(mhba);
1886         if (list_empty(&mhba->waiting_req_list) || !count)
1887                 return;
1888
1889         do {
1890                 cmd = list_first_entry(&mhba->waiting_req_list,
1891                                        struct mvumi_cmd, queue_pointer);
1892                 list_del_init(&cmd->queue_pointer);
1893                 result = mvumi_send_command(mhba, cmd);
1894                 switch (result) {
1895                 case MV_QUEUE_COMMAND_RESULT_SENT:
1896                         num_of_cl_sent++;
1897                         break;
1898                 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1899                         list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1900                         if (num_of_cl_sent > 0)
1901                                 mvumi_send_ib_list_entry(mhba);
1902
1903                         return;
1904                 }
1905         } while (!list_empty(&mhba->waiting_req_list) && count--);
1906
1907         if (num_of_cl_sent > 0)
1908                 mvumi_send_ib_list_entry(mhba);
1909 }
1910
1911 /**
1912  * mvumi_enable_intr -  Enables interrupts
1913  * @mhba:               Adapter soft state
1914  */
1915 static void mvumi_enable_intr(struct mvumi_hba *mhba)
1916 {
1917         unsigned int mask;
1918         struct mvumi_hw_regs *regs = mhba->regs;
1919
1920         iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1921         mask = ioread32(regs->enpointa_mask_reg);
1922         mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1923         iowrite32(mask, regs->enpointa_mask_reg);
1924 }
1925
1926 /**
1927  * mvumi_disable_intr -Disables interrupt
1928  * @mhba:               Adapter soft state
1929  */
1930 static void mvumi_disable_intr(struct mvumi_hba *mhba)
1931 {
1932         unsigned int mask;
1933         struct mvumi_hw_regs *regs = mhba->regs;
1934
1935         iowrite32(0, regs->arm_to_pciea_mask_reg);
1936         mask = ioread32(regs->enpointa_mask_reg);
1937         mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1938                                                         regs->int_comaerr);
1939         iowrite32(mask, regs->enpointa_mask_reg);
1940 }
1941
1942 static int mvumi_clear_intr(void *extend)
1943 {
1944         struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1945         unsigned int status, isr_status = 0, tmp = 0;
1946         struct mvumi_hw_regs *regs = mhba->regs;
1947
1948         status = ioread32(regs->main_int_cause_reg);
1949         if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1950                 return 1;
1951         if (unlikely(status & regs->int_comaerr)) {
1952                 tmp = ioread32(regs->outb_isr_cause);
1953                 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1954                         if (tmp & regs->clic_out_err) {
1955                                 iowrite32(tmp & regs->clic_out_err,
1956                                                         regs->outb_isr_cause);
1957                         }
1958                 } else {
1959                         if (tmp & (regs->clic_in_err | regs->clic_out_err))
1960                                 iowrite32(tmp & (regs->clic_in_err |
1961                                                 regs->clic_out_err),
1962                                                 regs->outb_isr_cause);
1963                 }
1964                 status ^= mhba->regs->int_comaerr;
1965                 /* inbound or outbound parity error, command will timeout */
1966         }
1967         if (status & regs->int_comaout) {
1968                 tmp = ioread32(regs->outb_isr_cause);
1969                 if (tmp & regs->clic_irq)
1970                         iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1971         }
1972         if (status & regs->int_dl_cpu2pciea) {
1973                 isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1974                 if (isr_status)
1975                         iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1976         }
1977
1978         mhba->global_isr = status;
1979         mhba->isr_status = isr_status;
1980
1981         return 0;
1982 }
1983
1984 /**
1985  * mvumi_read_fw_status_reg - returns the current FW status value
1986  * @mhba:               Adapter soft state
1987  */
1988 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
1989 {
1990         unsigned int status;
1991
1992         status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
1993         if (status)
1994                 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
1995         return status;
1996 }
1997
1998 static struct mvumi_instance_template mvumi_instance_9143 = {
1999         .fire_cmd = mvumi_fire_cmd,
2000         .enable_intr = mvumi_enable_intr,
2001         .disable_intr = mvumi_disable_intr,
2002         .clear_intr = mvumi_clear_intr,
2003         .read_fw_status_reg = mvumi_read_fw_status_reg,
2004         .check_ib_list = mvumi_check_ib_list_9143,
2005         .check_ob_list = mvumi_check_ob_list_9143,
2006         .reset_host = mvumi_reset_host_9143,
2007 };
2008
2009 static struct mvumi_instance_template mvumi_instance_9580 = {
2010         .fire_cmd = mvumi_fire_cmd,
2011         .enable_intr = mvumi_enable_intr,
2012         .disable_intr = mvumi_disable_intr,
2013         .clear_intr = mvumi_clear_intr,
2014         .read_fw_status_reg = mvumi_read_fw_status_reg,
2015         .check_ib_list = mvumi_check_ib_list_9580,
2016         .check_ob_list = mvumi_check_ob_list_9580,
2017         .reset_host = mvumi_reset_host_9580,
2018 };
2019
2020 static int mvumi_slave_configure(struct scsi_device *sdev)
2021 {
2022         struct mvumi_hba *mhba;
2023         unsigned char bitcount = sizeof(unsigned char) * 8;
2024
2025         mhba = (struct mvumi_hba *) sdev->host->hostdata;
2026         if (sdev->id >= mhba->max_target_id)
2027                 return -EINVAL;
2028
2029         mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2030         return 0;
2031 }
2032
2033 /**
2034  * mvumi_build_frame -  Prepares a direct cdb (DCDB) command
2035  * @mhba:               Adapter soft state
2036  * @scmd:               SCSI command
2037  * @cmd:                Command to be prepared in
2038  *
2039  * This function prepares CDB commands. These are typcially pass-through
2040  * commands to the devices.
2041  */
2042 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2043                                 struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2044 {
2045         struct mvumi_msg_frame *pframe;
2046
2047         cmd->scmd = scmd;
2048         cmd->cmd_status = REQ_STATUS_PENDING;
2049         pframe = cmd->frame;
2050         pframe->device_id = ((unsigned short) scmd->device->id) |
2051                                 (((unsigned short) scmd->device->lun) << 8);
2052         pframe->cmd_flag = 0;
2053
2054         switch (scmd->sc_data_direction) {
2055         case DMA_NONE:
2056                 pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2057                 break;
2058         case DMA_FROM_DEVICE:
2059                 pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2060                 break;
2061         case DMA_TO_DEVICE:
2062                 pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2063                 break;
2064         case DMA_BIDIRECTIONAL:
2065         default:
2066                 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2067                         "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2068                 goto error;
2069         }
2070
2071         pframe->cdb_length = scmd->cmd_len;
2072         memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2073         pframe->req_function = CL_FUN_SCSI_CMD;
2074         if (scsi_bufflen(scmd)) {
2075                 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2076                         &pframe->sg_counts))
2077                         goto error;
2078
2079                 pframe->data_transfer_length = scsi_bufflen(scmd);
2080         } else {
2081                 pframe->sg_counts = 0;
2082                 pframe->data_transfer_length = 0;
2083         }
2084         return 0;
2085
2086 error:
2087         scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2088                 SAM_STAT_CHECK_CONDITION;
2089         scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2090                                                                         0);
2091         return -1;
2092 }
2093
2094 /**
2095  * mvumi_queue_command -        Queue entry point
2096  * @scmd:                       SCSI command to be queued
2097  * @done:                       Callback entry point
2098  */
2099 static int mvumi_queue_command(struct Scsi_Host *shost,
2100                                         struct scsi_cmnd *scmd)
2101 {
2102         struct mvumi_cmd *cmd;
2103         struct mvumi_hba *mhba;
2104         unsigned long irq_flags;
2105
2106         spin_lock_irqsave(shost->host_lock, irq_flags);
2107         scsi_cmd_get_serial(shost, scmd);
2108
2109         mhba = (struct mvumi_hba *) shost->hostdata;
2110         scmd->result = 0;
2111         cmd = mvumi_get_cmd(mhba);
2112         if (unlikely(!cmd)) {
2113                 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2114                 return SCSI_MLQUEUE_HOST_BUSY;
2115         }
2116
2117         if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2118                 goto out_return_cmd;
2119
2120         cmd->scmd = scmd;
2121         scmd->SCp.ptr = (char *) cmd;
2122         mhba->instancet->fire_cmd(mhba, cmd);
2123         spin_unlock_irqrestore(shost->host_lock, irq_flags);
2124         return 0;
2125
2126 out_return_cmd:
2127         mvumi_return_cmd(mhba, cmd);
2128         scmd->scsi_done(scmd);
2129         spin_unlock_irqrestore(shost->host_lock, irq_flags);
2130         return 0;
2131 }
2132
2133 static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2134 {
2135         struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2136         struct Scsi_Host *host = scmd->device->host;
2137         struct mvumi_hba *mhba = shost_priv(host);
2138         unsigned long flags;
2139
2140         spin_lock_irqsave(mhba->shost->host_lock, flags);
2141
2142         if (mhba->tag_cmd[cmd->frame->tag]) {
2143                 mhba->tag_cmd[cmd->frame->tag] = 0;
2144                 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2145         }
2146         if (!list_empty(&cmd->queue_pointer))
2147                 list_del_init(&cmd->queue_pointer);
2148         else
2149                 atomic_dec(&mhba->fw_outstanding);
2150
2151         scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2152         scmd->SCp.ptr = NULL;
2153         if (scsi_bufflen(scmd)) {
2154                 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
2155                              scsi_sg_count(scmd),
2156                              scmd->sc_data_direction);
2157         }
2158         mvumi_return_cmd(mhba, cmd);
2159         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2160
2161         return BLK_EH_DONE;
2162 }
2163
2164 static int
2165 mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2166                         sector_t capacity, int geom[])
2167 {
2168         int heads, sectors;
2169         sector_t cylinders;
2170         unsigned long tmp;
2171
2172         heads = 64;
2173         sectors = 32;
2174         tmp = heads * sectors;
2175         cylinders = capacity;
2176         sector_div(cylinders, tmp);
2177
2178         if (capacity >= 0x200000) {
2179                 heads = 255;
2180                 sectors = 63;
2181                 tmp = heads * sectors;
2182                 cylinders = capacity;
2183                 sector_div(cylinders, tmp);
2184         }
2185         geom[0] = heads;
2186         geom[1] = sectors;
2187         geom[2] = cylinders;
2188
2189         return 0;
2190 }
2191
2192 static struct scsi_host_template mvumi_template = {
2193
2194         .module = THIS_MODULE,
2195         .name = "Marvell Storage Controller",
2196         .slave_configure = mvumi_slave_configure,
2197         .queuecommand = mvumi_queue_command,
2198         .eh_timed_out = mvumi_timed_out,
2199         .eh_host_reset_handler = mvumi_host_reset,
2200         .bios_param = mvumi_bios_param,
2201         .dma_boundary = PAGE_SIZE - 1,
2202         .this_id = -1,
2203 };
2204
2205 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2206 {
2207         void *base = NULL;
2208         struct mvumi_hw_regs *regs;
2209
2210         switch (mhba->pdev->device) {
2211         case PCI_DEVICE_ID_MARVELL_MV9143:
2212                 mhba->mmio = mhba->base_addr[0];
2213                 base = mhba->mmio;
2214                 if (!mhba->regs) {
2215                         mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2216                         if (mhba->regs == NULL)
2217                                 return -ENOMEM;
2218                 }
2219                 regs = mhba->regs;
2220
2221                 /* For Arm */
2222                 regs->ctrl_sts_reg          = base + 0x20104;
2223                 regs->rstoutn_mask_reg      = base + 0x20108;
2224                 regs->sys_soft_rst_reg      = base + 0x2010C;
2225                 regs->main_int_cause_reg    = base + 0x20200;
2226                 regs->enpointa_mask_reg     = base + 0x2020C;
2227                 regs->rstoutn_en_reg        = base + 0xF1400;
2228                 /* For Doorbell */
2229                 regs->pciea_to_arm_drbl_reg = base + 0x20400;
2230                 regs->arm_to_pciea_drbl_reg = base + 0x20408;
2231                 regs->arm_to_pciea_mask_reg = base + 0x2040C;
2232                 regs->pciea_to_arm_msg0     = base + 0x20430;
2233                 regs->pciea_to_arm_msg1     = base + 0x20434;
2234                 regs->arm_to_pciea_msg0     = base + 0x20438;
2235                 regs->arm_to_pciea_msg1     = base + 0x2043C;
2236
2237                 /* For Message Unit */
2238
2239                 regs->inb_aval_count_basel  = base + 0x508;
2240                 regs->inb_aval_count_baseh  = base + 0x50C;
2241                 regs->inb_write_pointer     = base + 0x518;
2242                 regs->inb_read_pointer      = base + 0x51C;
2243                 regs->outb_coal_cfg         = base + 0x568;
2244                 regs->outb_copy_basel       = base + 0x5B0;
2245                 regs->outb_copy_baseh       = base + 0x5B4;
2246                 regs->outb_copy_pointer     = base + 0x544;
2247                 regs->outb_read_pointer     = base + 0x548;
2248                 regs->outb_isr_cause        = base + 0x560;
2249                 regs->outb_coal_cfg         = base + 0x568;
2250                 /* Bit setting for HW */
2251                 regs->int_comaout           = 1 << 8;
2252                 regs->int_comaerr           = 1 << 6;
2253                 regs->int_dl_cpu2pciea      = 1 << 1;
2254                 regs->cl_pointer_toggle     = 1 << 12;
2255                 regs->clic_irq              = 1 << 1;
2256                 regs->clic_in_err           = 1 << 8;
2257                 regs->clic_out_err          = 1 << 12;
2258                 regs->cl_slot_num_mask      = 0xFFF;
2259                 regs->int_drbl_int_mask     = 0x3FFFFFFF;
2260                 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2261                                                         regs->int_comaerr;
2262                 break;
2263         case PCI_DEVICE_ID_MARVELL_MV9580:
2264                 mhba->mmio = mhba->base_addr[2];
2265                 base = mhba->mmio;
2266                 if (!mhba->regs) {
2267                         mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2268                         if (mhba->regs == NULL)
2269                                 return -ENOMEM;
2270                 }
2271                 regs = mhba->regs;
2272                 /* For Arm */
2273                 regs->ctrl_sts_reg          = base + 0x20104;
2274                 regs->rstoutn_mask_reg      = base + 0x1010C;
2275                 regs->sys_soft_rst_reg      = base + 0x10108;
2276                 regs->main_int_cause_reg    = base + 0x10200;
2277                 regs->enpointa_mask_reg     = base + 0x1020C;
2278                 regs->rstoutn_en_reg        = base + 0xF1400;
2279
2280                 /* For Doorbell */
2281                 regs->pciea_to_arm_drbl_reg = base + 0x10460;
2282                 regs->arm_to_pciea_drbl_reg = base + 0x10480;
2283                 regs->arm_to_pciea_mask_reg = base + 0x10484;
2284                 regs->pciea_to_arm_msg0     = base + 0x10400;
2285                 regs->pciea_to_arm_msg1     = base + 0x10404;
2286                 regs->arm_to_pciea_msg0     = base + 0x10420;
2287                 regs->arm_to_pciea_msg1     = base + 0x10424;
2288
2289                 /* For reset*/
2290                 regs->reset_request         = base + 0x10108;
2291                 regs->reset_enable          = base + 0x1010c;
2292
2293                 /* For Message Unit */
2294                 regs->inb_aval_count_basel  = base + 0x4008;
2295                 regs->inb_aval_count_baseh  = base + 0x400C;
2296                 regs->inb_write_pointer     = base + 0x4018;
2297                 regs->inb_read_pointer      = base + 0x401C;
2298                 regs->outb_copy_basel       = base + 0x4058;
2299                 regs->outb_copy_baseh       = base + 0x405C;
2300                 regs->outb_copy_pointer     = base + 0x406C;
2301                 regs->outb_read_pointer     = base + 0x4070;
2302                 regs->outb_coal_cfg         = base + 0x4080;
2303                 regs->outb_isr_cause        = base + 0x4088;
2304                 /* Bit setting for HW */
2305                 regs->int_comaout           = 1 << 4;
2306                 regs->int_dl_cpu2pciea      = 1 << 12;
2307                 regs->int_comaerr           = 1 << 29;
2308                 regs->cl_pointer_toggle     = 1 << 14;
2309                 regs->cl_slot_num_mask      = 0x3FFF;
2310                 regs->clic_irq              = 1 << 0;
2311                 regs->clic_out_err          = 1 << 1;
2312                 regs->int_drbl_int_mask     = 0x3FFFFFFF;
2313                 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2314                 break;
2315         default:
2316                 return -1;
2317                 break;
2318         }
2319
2320         return 0;
2321 }
2322
2323 /**
2324  * mvumi_init_fw -      Initializes the FW
2325  * @mhba:               Adapter soft state
2326  *
2327  * This is the main function for initializing firmware.
2328  */
2329 static int mvumi_init_fw(struct mvumi_hba *mhba)
2330 {
2331         int ret = 0;
2332
2333         if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2334                 dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2335                 return -EBUSY;
2336         }
2337         ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2338         if (ret)
2339                 goto fail_ioremap;
2340
2341         switch (mhba->pdev->device) {
2342         case PCI_DEVICE_ID_MARVELL_MV9143:
2343                 mhba->instancet = &mvumi_instance_9143;
2344                 mhba->io_seq = 0;
2345                 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2346                 mhba->request_id_enabled = 1;
2347                 break;
2348         case PCI_DEVICE_ID_MARVELL_MV9580:
2349                 mhba->instancet = &mvumi_instance_9580;
2350                 mhba->io_seq = 0;
2351                 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2352                 break;
2353         default:
2354                 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2355                                                         mhba->pdev->device);
2356                 mhba->instancet = NULL;
2357                 ret = -EINVAL;
2358                 goto fail_alloc_mem;
2359         }
2360         dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2361                                                         mhba->pdev->device);
2362         ret = mvumi_cfg_hw_reg(mhba);
2363         if (ret) {
2364                 dev_err(&mhba->pdev->dev,
2365                         "failed to allocate memory for reg\n");
2366                 ret = -ENOMEM;
2367                 goto fail_alloc_mem;
2368         }
2369         mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
2370                         HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
2371         if (!mhba->handshake_page) {
2372                 dev_err(&mhba->pdev->dev,
2373                         "failed to allocate memory for handshake\n");
2374                 ret = -ENOMEM;
2375                 goto fail_alloc_page;
2376         }
2377
2378         if (mvumi_start(mhba)) {
2379                 ret = -EINVAL;
2380                 goto fail_ready_state;
2381         }
2382         ret = mvumi_alloc_cmds(mhba);
2383         if (ret)
2384                 goto fail_ready_state;
2385
2386         return 0;
2387
2388 fail_ready_state:
2389         mvumi_release_mem_resource(mhba);
2390         dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
2391                 mhba->handshake_page, mhba->handshake_page_phys);
2392 fail_alloc_page:
2393         kfree(mhba->regs);
2394 fail_alloc_mem:
2395         mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2396 fail_ioremap:
2397         pci_release_regions(mhba->pdev);
2398
2399         return ret;
2400 }
2401
2402 /**
2403  * mvumi_io_attach -    Attaches this driver to SCSI mid-layer
2404  * @mhba:               Adapter soft state
2405  */
2406 static int mvumi_io_attach(struct mvumi_hba *mhba)
2407 {
2408         struct Scsi_Host *host = mhba->shost;
2409         struct scsi_device *sdev = NULL;
2410         int ret;
2411         unsigned int max_sg = (mhba->ib_max_size + 4 -
2412                 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2413
2414         host->irq = mhba->pdev->irq;
2415         host->unique_id = mhba->unique_id;
2416         host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2417         host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2418         host->max_sectors = mhba->max_transfer_size / 512;
2419         host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2420         host->max_id = mhba->max_target_id;
2421         host->max_cmd_len = MAX_COMMAND_SIZE;
2422
2423         ret = scsi_add_host(host, &mhba->pdev->dev);
2424         if (ret) {
2425                 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2426                 return ret;
2427         }
2428         mhba->fw_flag |= MVUMI_FW_ATTACH;
2429
2430         mutex_lock(&mhba->sas_discovery_mutex);
2431         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2432                 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2433         else
2434                 ret = 0;
2435         if (ret) {
2436                 dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2437                 mutex_unlock(&mhba->sas_discovery_mutex);
2438                 goto fail_add_device;
2439         }
2440
2441         mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2442                                                 mhba, "mvumi_scanthread");
2443         if (IS_ERR(mhba->dm_thread)) {
2444                 dev_err(&mhba->pdev->dev,
2445                         "failed to create device scan thread\n");
2446                 mutex_unlock(&mhba->sas_discovery_mutex);
2447                 goto fail_create_thread;
2448         }
2449         atomic_set(&mhba->pnp_count, 1);
2450         wake_up_process(mhba->dm_thread);
2451
2452         mutex_unlock(&mhba->sas_discovery_mutex);
2453         return 0;
2454
2455 fail_create_thread:
2456         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2457                 sdev = scsi_device_lookup(mhba->shost, 0,
2458                                                 mhba->max_target_id - 1, 0);
2459         if (sdev) {
2460                 scsi_remove_device(sdev);
2461                 scsi_device_put(sdev);
2462         }
2463 fail_add_device:
2464         scsi_remove_host(mhba->shost);
2465         return ret;
2466 }
2467
2468 /**
2469  * mvumi_probe_one -    PCI hotplug entry point
2470  * @pdev:               PCI device structure
2471  * @id:                 PCI ids of supported hotplugged adapter
2472  */
2473 static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2474 {
2475         struct Scsi_Host *host;
2476         struct mvumi_hba *mhba;
2477         int ret;
2478
2479         dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2480                         pdev->vendor, pdev->device, pdev->subsystem_vendor,
2481                         pdev->subsystem_device);
2482
2483         ret = pci_enable_device(pdev);
2484         if (ret)
2485                 return ret;
2486
2487         ret = mvumi_pci_set_master(pdev);
2488         if (ret)
2489                 goto fail_set_dma_mask;
2490
2491         host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2492         if (!host) {
2493                 dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2494                 ret = -ENOMEM;
2495                 goto fail_alloc_instance;
2496         }
2497         mhba = shost_priv(host);
2498
2499         INIT_LIST_HEAD(&mhba->cmd_pool);
2500         INIT_LIST_HEAD(&mhba->ob_data_list);
2501         INIT_LIST_HEAD(&mhba->free_ob_list);
2502         INIT_LIST_HEAD(&mhba->res_list);
2503         INIT_LIST_HEAD(&mhba->waiting_req_list);
2504         mutex_init(&mhba->device_lock);
2505         INIT_LIST_HEAD(&mhba->mhba_dev_list);
2506         INIT_LIST_HEAD(&mhba->shost_dev_list);
2507         atomic_set(&mhba->fw_outstanding, 0);
2508         init_waitqueue_head(&mhba->int_cmd_wait_q);
2509         mutex_init(&mhba->sas_discovery_mutex);
2510
2511         mhba->pdev = pdev;
2512         mhba->shost = host;
2513         mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2514
2515         ret = mvumi_init_fw(mhba);
2516         if (ret)
2517                 goto fail_init_fw;
2518
2519         ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2520                                 "mvumi", mhba);
2521         if (ret) {
2522                 dev_err(&pdev->dev, "failed to register IRQ\n");
2523                 goto fail_init_irq;
2524         }
2525
2526         mhba->instancet->enable_intr(mhba);
2527         pci_set_drvdata(pdev, mhba);
2528
2529         ret = mvumi_io_attach(mhba);
2530         if (ret)
2531                 goto fail_io_attach;
2532
2533         mvumi_backup_bar_addr(mhba);
2534         dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2535
2536         return 0;
2537
2538 fail_io_attach:
2539         mhba->instancet->disable_intr(mhba);
2540         free_irq(mhba->pdev->irq, mhba);
2541 fail_init_irq:
2542         mvumi_release_fw(mhba);
2543 fail_init_fw:
2544         scsi_host_put(host);
2545
2546 fail_alloc_instance:
2547 fail_set_dma_mask:
2548         pci_disable_device(pdev);
2549
2550         return ret;
2551 }
2552
2553 static void mvumi_detach_one(struct pci_dev *pdev)
2554 {
2555         struct Scsi_Host *host;
2556         struct mvumi_hba *mhba;
2557
2558         mhba = pci_get_drvdata(pdev);
2559         if (mhba->dm_thread) {
2560                 kthread_stop(mhba->dm_thread);
2561                 mhba->dm_thread = NULL;
2562         }
2563
2564         mvumi_detach_devices(mhba);
2565         host = mhba->shost;
2566         scsi_remove_host(mhba->shost);
2567         mvumi_flush_cache(mhba);
2568
2569         mhba->instancet->disable_intr(mhba);
2570         free_irq(mhba->pdev->irq, mhba);
2571         mvumi_release_fw(mhba);
2572         scsi_host_put(host);
2573         pci_disable_device(pdev);
2574         dev_dbg(&pdev->dev, "driver is removed!\n");
2575 }
2576
2577 /**
2578  * mvumi_shutdown -     Shutdown entry point
2579  * @device:             Generic device structure
2580  */
2581 static void mvumi_shutdown(struct pci_dev *pdev)
2582 {
2583         struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2584
2585         mvumi_flush_cache(mhba);
2586 }
2587
2588 static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2589 {
2590         struct mvumi_hba *mhba = NULL;
2591
2592         mhba = pci_get_drvdata(pdev);
2593         mvumi_flush_cache(mhba);
2594
2595         pci_set_drvdata(pdev, mhba);
2596         mhba->instancet->disable_intr(mhba);
2597         free_irq(mhba->pdev->irq, mhba);
2598         mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2599         pci_release_regions(pdev);
2600         pci_save_state(pdev);
2601         pci_disable_device(pdev);
2602         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2603
2604         return 0;
2605 }
2606
2607 static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2608 {
2609         int ret;
2610         struct mvumi_hba *mhba = NULL;
2611
2612         mhba = pci_get_drvdata(pdev);
2613
2614         pci_set_power_state(pdev, PCI_D0);
2615         pci_enable_wake(pdev, PCI_D0, 0);
2616         pci_restore_state(pdev);
2617
2618         ret = pci_enable_device(pdev);
2619         if (ret) {
2620                 dev_err(&pdev->dev, "enable device failed\n");
2621                 return ret;
2622         }
2623
2624         ret = mvumi_pci_set_master(pdev);
2625         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2626         if (ret)
2627                 goto fail;
2628         ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2629         if (ret)
2630                 goto fail;
2631         ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2632         if (ret)
2633                 goto release_regions;
2634
2635         if (mvumi_cfg_hw_reg(mhba)) {
2636                 ret = -EINVAL;
2637                 goto unmap_pci_addr;
2638         }
2639
2640         mhba->mmio = mhba->base_addr[0];
2641         mvumi_reset(mhba);
2642
2643         if (mvumi_start(mhba)) {
2644                 ret = -EINVAL;
2645                 goto unmap_pci_addr;
2646         }
2647
2648         ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2649                                 "mvumi", mhba);
2650         if (ret) {
2651                 dev_err(&pdev->dev, "failed to register IRQ\n");
2652                 goto unmap_pci_addr;
2653         }
2654         mhba->instancet->enable_intr(mhba);
2655
2656         return 0;
2657
2658 unmap_pci_addr:
2659         mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2660 release_regions:
2661         pci_release_regions(pdev);
2662 fail:
2663         pci_disable_device(pdev);
2664
2665         return ret;
2666 }
2667
2668 static struct pci_driver mvumi_pci_driver = {
2669
2670         .name = MV_DRIVER_NAME,
2671         .id_table = mvumi_pci_table,
2672         .probe = mvumi_probe_one,
2673         .remove = mvumi_detach_one,
2674         .shutdown = mvumi_shutdown,
2675 #ifdef CONFIG_PM
2676         .suspend = mvumi_suspend,
2677         .resume = mvumi_resume,
2678 #endif
2679 };
2680
2681 module_pci_driver(mvumi_pci_driver);