Merge tag 'watchdog-for-linus-v4.10' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / block / skd_main.c
1 /* Copyright 2012 STEC, Inc.
2  *
3  * This file is licensed under the terms of the 3-clause
4  * BSD License (http://opensource.org/licenses/BSD-3-Clause)
5  * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
6  * at your option. Both licenses are also available in the LICENSE file
7  * distributed with this project. This file may not be copied, modified,
8  * or distributed except in accordance with those terms.
9  * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
10  * Initial Driver Design!
11  * Thomas Swann <tswann@stec-inc.com>
12  * Interrupt handling.
13  * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
14  * biomode implementation.
15  * Akhil Bhansali <abhansali@stec-inc.com>
16  * Added support for DISCARD / FLUSH and FUA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/blkdev.h>
26 #include <linux/sched.h>
27 #include <linux/interrupt.h>
28 #include <linux/compiler.h>
29 #include <linux/workqueue.h>
30 #include <linux/bitops.h>
31 #include <linux/delay.h>
32 #include <linux/time.h>
33 #include <linux/hdreg.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/completion.h>
36 #include <linux/scatterlist.h>
37 #include <linux/version.h>
38 #include <linux/err.h>
39 #include <linux/aer.h>
40 #include <linux/ctype.h>
41 #include <linux/wait.h>
42 #include <linux/uio.h>
43 #include <scsi/scsi.h>
44 #include <scsi/sg.h>
45 #include <linux/io.h>
46 #include <linux/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include "skd_s1120.h"
50
51 static int skd_dbg_level;
52 static int skd_isr_comp_limit = 4;
53
54 enum {
55         STEC_LINK_2_5GTS = 0,
56         STEC_LINK_5GTS = 1,
57         STEC_LINK_8GTS = 2,
58         STEC_LINK_UNKNOWN = 0xFF
59 };
60
61 enum {
62         SKD_FLUSH_INITIALIZER,
63         SKD_FLUSH_ZERO_SIZE_FIRST,
64         SKD_FLUSH_DATA_SECOND,
65 };
66
67 #define SKD_ASSERT(expr) \
68         do { \
69                 if (unlikely(!(expr))) { \
70                         pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
71                                # expr, __FILE__, __func__, __LINE__); \
72                 } \
73         } while (0)
74
75 #define DRV_NAME "skd"
76 #define DRV_VERSION "2.2.1"
77 #define DRV_BUILD_ID "0260"
78 #define PFX DRV_NAME ": "
79 #define DRV_BIN_VERSION 0x100
80 #define DRV_VER_COMPL   "2.2.1." DRV_BUILD_ID
81
82 MODULE_AUTHOR("bug-reports: support@stec-inc.com");
83 MODULE_LICENSE("Dual BSD/GPL");
84
85 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
86 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
87
88 #define PCI_VENDOR_ID_STEC      0x1B39
89 #define PCI_DEVICE_ID_S1120     0x0001
90
91 #define SKD_FUA_NV              (1 << 1)
92 #define SKD_MINORS_PER_DEVICE   16
93
94 #define SKD_MAX_QUEUE_DEPTH     200u
95
96 #define SKD_PAUSE_TIMEOUT       (5 * 1000)
97
98 #define SKD_N_FITMSG_BYTES      (512u)
99
100 #define SKD_N_SPECIAL_CONTEXT   32u
101 #define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
102
103 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
104  * 128KB limit.  That allows 4096*4K = 16M xfer size
105  */
106 #define SKD_N_SG_PER_REQ_DEFAULT 256u
107 #define SKD_N_SG_PER_SPECIAL    256u
108
109 #define SKD_N_COMPLETION_ENTRY  256u
110 #define SKD_N_READ_CAP_BYTES    (8u)
111
112 #define SKD_N_INTERNAL_BYTES    (512u)
113
114 /* 5 bits of uniqifier, 0xF800 */
115 #define SKD_ID_INCR             (0x400)
116 #define SKD_ID_TABLE_MASK       (3u << 8u)
117 #define  SKD_ID_RW_REQUEST      (0u << 8u)
118 #define  SKD_ID_INTERNAL        (1u << 8u)
119 #define  SKD_ID_SPECIAL_REQUEST (2u << 8u)
120 #define  SKD_ID_FIT_MSG         (3u << 8u)
121 #define SKD_ID_SLOT_MASK        0x00FFu
122 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
123
124 #define SKD_N_TIMEOUT_SLOT      4u
125 #define SKD_TIMEOUT_SLOT_MASK   3u
126
127 #define SKD_N_MAX_SECTORS 2048u
128
129 #define SKD_MAX_RETRIES 2u
130
131 #define SKD_TIMER_SECONDS(seconds) (seconds)
132 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
133
134 #define INQ_STD_NBYTES 36
135
136 enum skd_drvr_state {
137         SKD_DRVR_STATE_LOAD,
138         SKD_DRVR_STATE_IDLE,
139         SKD_DRVR_STATE_BUSY,
140         SKD_DRVR_STATE_STARTING,
141         SKD_DRVR_STATE_ONLINE,
142         SKD_DRVR_STATE_PAUSING,
143         SKD_DRVR_STATE_PAUSED,
144         SKD_DRVR_STATE_DRAINING_TIMEOUT,
145         SKD_DRVR_STATE_RESTARTING,
146         SKD_DRVR_STATE_RESUMING,
147         SKD_DRVR_STATE_STOPPING,
148         SKD_DRVR_STATE_FAULT,
149         SKD_DRVR_STATE_DISAPPEARED,
150         SKD_DRVR_STATE_PROTOCOL_MISMATCH,
151         SKD_DRVR_STATE_BUSY_ERASE,
152         SKD_DRVR_STATE_BUSY_SANITIZE,
153         SKD_DRVR_STATE_BUSY_IMMINENT,
154         SKD_DRVR_STATE_WAIT_BOOT,
155         SKD_DRVR_STATE_SYNCING,
156 };
157
158 #define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
159 #define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
160 #define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
161 #define SKD_DRAINING_TIMO       SKD_TIMER_SECONDS(6u)
162 #define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
163 #define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
164 #define SKD_START_WAIT_SECONDS  90u
165
166 enum skd_req_state {
167         SKD_REQ_STATE_IDLE,
168         SKD_REQ_STATE_SETUP,
169         SKD_REQ_STATE_BUSY,
170         SKD_REQ_STATE_COMPLETED,
171         SKD_REQ_STATE_TIMEOUT,
172         SKD_REQ_STATE_ABORTED,
173 };
174
175 enum skd_fit_msg_state {
176         SKD_MSG_STATE_IDLE,
177         SKD_MSG_STATE_BUSY,
178 };
179
180 enum skd_check_status_action {
181         SKD_CHECK_STATUS_REPORT_GOOD,
182         SKD_CHECK_STATUS_REPORT_SMART_ALERT,
183         SKD_CHECK_STATUS_REQUEUE_REQUEST,
184         SKD_CHECK_STATUS_REPORT_ERROR,
185         SKD_CHECK_STATUS_BUSY_IMMINENT,
186 };
187
188 struct skd_fitmsg_context {
189         enum skd_fit_msg_state state;
190
191         struct skd_fitmsg_context *next;
192
193         u32 id;
194         u16 outstanding;
195
196         u32 length;
197         u32 offset;
198
199         u8 *msg_buf;
200         dma_addr_t mb_dma_address;
201 };
202
203 struct skd_request_context {
204         enum skd_req_state state;
205
206         struct skd_request_context *next;
207
208         u16 id;
209         u32 fitmsg_id;
210
211         struct request *req;
212         u8 flush_cmd;
213
214         u32 timeout_stamp;
215         u8 sg_data_dir;
216         struct scatterlist *sg;
217         u32 n_sg;
218         u32 sg_byte_count;
219
220         struct fit_sg_descriptor *sksg_list;
221         dma_addr_t sksg_dma_address;
222
223         struct fit_completion_entry_v1 completion;
224
225         struct fit_comp_error_info err_info;
226
227 };
228 #define SKD_DATA_DIR_HOST_TO_CARD       1
229 #define SKD_DATA_DIR_CARD_TO_HOST       2
230
231 struct skd_special_context {
232         struct skd_request_context req;
233
234         u8 orphaned;
235
236         void *data_buf;
237         dma_addr_t db_dma_address;
238
239         u8 *msg_buf;
240         dma_addr_t mb_dma_address;
241 };
242
243 struct skd_sg_io {
244         fmode_t mode;
245         void __user *argp;
246
247         struct sg_io_hdr sg;
248
249         u8 cdb[16];
250
251         u32 dxfer_len;
252         u32 iovcnt;
253         struct sg_iovec *iov;
254         struct sg_iovec no_iov_iov;
255
256         struct skd_special_context *skspcl;
257 };
258
259 typedef enum skd_irq_type {
260         SKD_IRQ_LEGACY,
261         SKD_IRQ_MSI,
262         SKD_IRQ_MSIX
263 } skd_irq_type_t;
264
265 #define SKD_MAX_BARS                    2
266
267 struct skd_device {
268         volatile void __iomem *mem_map[SKD_MAX_BARS];
269         resource_size_t mem_phys[SKD_MAX_BARS];
270         u32 mem_size[SKD_MAX_BARS];
271
272         struct skd_msix_entry *msix_entries;
273
274         struct pci_dev *pdev;
275         int pcie_error_reporting_is_enabled;
276
277         spinlock_t lock;
278         struct gendisk *disk;
279         struct request_queue *queue;
280         struct device *class_dev;
281         int gendisk_on;
282         int sync_done;
283
284         atomic_t device_count;
285         u32 devno;
286         u32 major;
287         char name[32];
288         char isr_name[30];
289
290         enum skd_drvr_state state;
291         u32 drive_state;
292
293         u32 in_flight;
294         u32 cur_max_queue_depth;
295         u32 queue_low_water_mark;
296         u32 dev_max_queue_depth;
297
298         u32 num_fitmsg_context;
299         u32 num_req_context;
300
301         u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
302         u32 timeout_stamp;
303         struct skd_fitmsg_context *skmsg_free_list;
304         struct skd_fitmsg_context *skmsg_table;
305
306         struct skd_request_context *skreq_free_list;
307         struct skd_request_context *skreq_table;
308
309         struct skd_special_context *skspcl_free_list;
310         struct skd_special_context *skspcl_table;
311
312         struct skd_special_context internal_skspcl;
313         u32 read_cap_blocksize;
314         u32 read_cap_last_lba;
315         int read_cap_is_valid;
316         int inquiry_is_valid;
317         u8 inq_serial_num[13];  /*12 chars plus null term */
318         u8 id_str[80];          /* holds a composite name (pci + sernum) */
319
320         u8 skcomp_cycle;
321         u32 skcomp_ix;
322         struct fit_completion_entry_v1 *skcomp_table;
323         struct fit_comp_error_info *skerr_table;
324         dma_addr_t cq_dma_address;
325
326         wait_queue_head_t waitq;
327
328         struct timer_list timer;
329         u32 timer_countdown;
330         u32 timer_substate;
331
332         int n_special;
333         int sgs_per_request;
334         u32 last_mtd;
335
336         u32 proto_ver;
337
338         int dbg_level;
339         u32 connect_time_stamp;
340         int connect_retries;
341 #define SKD_MAX_CONNECT_RETRIES 16
342         u32 drive_jiffies;
343
344         u32 timo_slot;
345
346
347         struct work_struct completion_worker;
348 };
349
350 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
351 #define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
352 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
353
354 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
355 {
356         u32 val;
357
358         if (likely(skdev->dbg_level < 2))
359                 return readl(skdev->mem_map[1] + offset);
360         else {
361                 barrier();
362                 val = readl(skdev->mem_map[1] + offset);
363                 barrier();
364                 pr_debug("%s:%s:%d offset %x = %x\n",
365                          skdev->name, __func__, __LINE__, offset, val);
366                 return val;
367         }
368
369 }
370
371 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
372                                    u32 offset)
373 {
374         if (likely(skdev->dbg_level < 2)) {
375                 writel(val, skdev->mem_map[1] + offset);
376                 barrier();
377         } else {
378                 barrier();
379                 writel(val, skdev->mem_map[1] + offset);
380                 barrier();
381                 pr_debug("%s:%s:%d offset %x = %x\n",
382                          skdev->name, __func__, __LINE__, offset, val);
383         }
384 }
385
386 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
387                                    u32 offset)
388 {
389         if (likely(skdev->dbg_level < 2)) {
390                 writeq(val, skdev->mem_map[1] + offset);
391                 barrier();
392         } else {
393                 barrier();
394                 writeq(val, skdev->mem_map[1] + offset);
395                 barrier();
396                 pr_debug("%s:%s:%d offset %x = %016llx\n",
397                          skdev->name, __func__, __LINE__, offset, val);
398         }
399 }
400
401
402 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
403 static int skd_isr_type = SKD_IRQ_DEFAULT;
404
405 module_param(skd_isr_type, int, 0444);
406 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
407                  " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
408
409 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
410 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
411
412 module_param(skd_max_req_per_msg, int, 0444);
413 MODULE_PARM_DESC(skd_max_req_per_msg,
414                  "Maximum SCSI requests packed in a single message."
415                  " (1-14, default==1)");
416
417 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
418 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
419 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
420
421 module_param(skd_max_queue_depth, int, 0444);
422 MODULE_PARM_DESC(skd_max_queue_depth,
423                  "Maximum SCSI requests issued to s1120."
424                  " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
425
426 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
427 module_param(skd_sgs_per_request, int, 0444);
428 MODULE_PARM_DESC(skd_sgs_per_request,
429                  "Maximum SG elements per block request."
430                  " (1-4096, default==256)");
431
432 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
433 module_param(skd_max_pass_thru, int, 0444);
434 MODULE_PARM_DESC(skd_max_pass_thru,
435                  "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
436
437 module_param(skd_dbg_level, int, 0444);
438 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
439
440 module_param(skd_isr_comp_limit, int, 0444);
441 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
442
443 /* Major device number dynamically assigned. */
444 static u32 skd_major;
445
446 static void skd_destruct(struct skd_device *skdev);
447 static const struct block_device_operations skd_blockdev_ops;
448 static void skd_send_fitmsg(struct skd_device *skdev,
449                             struct skd_fitmsg_context *skmsg);
450 static void skd_send_special_fitmsg(struct skd_device *skdev,
451                                     struct skd_special_context *skspcl);
452 static void skd_request_fn(struct request_queue *rq);
453 static void skd_end_request(struct skd_device *skdev,
454                             struct skd_request_context *skreq, int error);
455 static int skd_preop_sg_list(struct skd_device *skdev,
456                              struct skd_request_context *skreq);
457 static void skd_postop_sg_list(struct skd_device *skdev,
458                                struct skd_request_context *skreq);
459
460 static void skd_restart_device(struct skd_device *skdev);
461 static int skd_quiesce_dev(struct skd_device *skdev);
462 static int skd_unquiesce_dev(struct skd_device *skdev);
463 static void skd_release_special(struct skd_device *skdev,
464                                 struct skd_special_context *skspcl);
465 static void skd_disable_interrupts(struct skd_device *skdev);
466 static void skd_isr_fwstate(struct skd_device *skdev);
467 static void skd_recover_requests(struct skd_device *skdev, int requeue);
468 static void skd_soft_reset(struct skd_device *skdev);
469
470 static const char *skd_name(struct skd_device *skdev);
471 const char *skd_drive_state_to_str(int state);
472 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
473 static void skd_log_skdev(struct skd_device *skdev, const char *event);
474 static void skd_log_skmsg(struct skd_device *skdev,
475                           struct skd_fitmsg_context *skmsg, const char *event);
476 static void skd_log_skreq(struct skd_device *skdev,
477                           struct skd_request_context *skreq, const char *event);
478
479 /*
480  *****************************************************************************
481  * READ/WRITE REQUESTS
482  *****************************************************************************
483  */
484 static void skd_fail_all_pending(struct skd_device *skdev)
485 {
486         struct request_queue *q = skdev->queue;
487         struct request *req;
488
489         for (;; ) {
490                 req = blk_peek_request(q);
491                 if (req == NULL)
492                         break;
493                 blk_start_request(req);
494                 __blk_end_request_all(req, -EIO);
495         }
496 }
497
498 static void
499 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
500                 int data_dir, unsigned lba,
501                 unsigned count)
502 {
503         if (data_dir == READ)
504                 scsi_req->cdb[0] = 0x28;
505         else
506                 scsi_req->cdb[0] = 0x2a;
507
508         scsi_req->cdb[1] = 0;
509         scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
510         scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
511         scsi_req->cdb[4] = (lba & 0xff00) >> 8;
512         scsi_req->cdb[5] = (lba & 0xff);
513         scsi_req->cdb[6] = 0;
514         scsi_req->cdb[7] = (count & 0xff00) >> 8;
515         scsi_req->cdb[8] = count & 0xff;
516         scsi_req->cdb[9] = 0;
517 }
518
519 static void
520 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
521                             struct skd_request_context *skreq)
522 {
523         skreq->flush_cmd = 1;
524
525         scsi_req->cdb[0] = 0x35;
526         scsi_req->cdb[1] = 0;
527         scsi_req->cdb[2] = 0;
528         scsi_req->cdb[3] = 0;
529         scsi_req->cdb[4] = 0;
530         scsi_req->cdb[5] = 0;
531         scsi_req->cdb[6] = 0;
532         scsi_req->cdb[7] = 0;
533         scsi_req->cdb[8] = 0;
534         scsi_req->cdb[9] = 0;
535 }
536
537 static void skd_request_fn_not_online(struct request_queue *q);
538
539 static void skd_request_fn(struct request_queue *q)
540 {
541         struct skd_device *skdev = q->queuedata;
542         struct skd_fitmsg_context *skmsg = NULL;
543         struct fit_msg_hdr *fmh = NULL;
544         struct skd_request_context *skreq;
545         struct request *req = NULL;
546         struct skd_scsi_request *scsi_req;
547         unsigned long io_flags;
548         int error;
549         u32 lba;
550         u32 count;
551         int data_dir;
552         u32 be_lba;
553         u32 be_count;
554         u64 be_dmaa;
555         u64 cmdctxt;
556         u32 timo_slot;
557         void *cmd_ptr;
558         int flush, fua;
559
560         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
561                 skd_request_fn_not_online(q);
562                 return;
563         }
564
565         if (blk_queue_stopped(skdev->queue)) {
566                 if (skdev->skmsg_free_list == NULL ||
567                     skdev->skreq_free_list == NULL ||
568                     skdev->in_flight >= skdev->queue_low_water_mark)
569                         /* There is still some kind of shortage */
570                         return;
571
572                 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
573         }
574
575         /*
576          * Stop conditions:
577          *  - There are no more native requests
578          *  - There are already the maximum number of requests in progress
579          *  - There are no more skd_request_context entries
580          *  - There are no more FIT msg buffers
581          */
582         for (;; ) {
583
584                 flush = fua = 0;
585
586                 req = blk_peek_request(q);
587
588                 /* Are there any native requests to start? */
589                 if (req == NULL)
590                         break;
591
592                 lba = (u32)blk_rq_pos(req);
593                 count = blk_rq_sectors(req);
594                 data_dir = rq_data_dir(req);
595                 io_flags = req->cmd_flags;
596
597                 if (req_op(req) == REQ_OP_FLUSH)
598                         flush++;
599
600                 if (io_flags & REQ_FUA)
601                         fua++;
602
603                 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
604                          "count=%u(0x%x) dir=%d\n",
605                          skdev->name, __func__, __LINE__,
606                          req, lba, lba, count, count, data_dir);
607
608                 /* At this point we know there is a request */
609
610                 /* Are too many requets already in progress? */
611                 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
612                         pr_debug("%s:%s:%d qdepth %d, limit %d\n",
613                                  skdev->name, __func__, __LINE__,
614                                  skdev->in_flight, skdev->cur_max_queue_depth);
615                         break;
616                 }
617
618                 /* Is a skd_request_context available? */
619                 skreq = skdev->skreq_free_list;
620                 if (skreq == NULL) {
621                         pr_debug("%s:%s:%d Out of req=%p\n",
622                                  skdev->name, __func__, __LINE__, q);
623                         break;
624                 }
625                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
626                 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
627
628                 /* Now we check to see if we can get a fit msg */
629                 if (skmsg == NULL) {
630                         if (skdev->skmsg_free_list == NULL) {
631                                 pr_debug("%s:%s:%d Out of msg\n",
632                                          skdev->name, __func__, __LINE__);
633                                 break;
634                         }
635                 }
636
637                 skreq->flush_cmd = 0;
638                 skreq->n_sg = 0;
639                 skreq->sg_byte_count = 0;
640
641                 /*
642                  * OK to now dequeue request from q.
643                  *
644                  * At this point we are comitted to either start or reject
645                  * the native request. Note that skd_request_context is
646                  * available but is still at the head of the free list.
647                  */
648                 blk_start_request(req);
649                 skreq->req = req;
650                 skreq->fitmsg_id = 0;
651
652                 /* Either a FIT msg is in progress or we have to start one. */
653                 if (skmsg == NULL) {
654                         /* Are there any FIT msg buffers available? */
655                         skmsg = skdev->skmsg_free_list;
656                         if (skmsg == NULL) {
657                                 pr_debug("%s:%s:%d Out of msg skdev=%p\n",
658                                          skdev->name, __func__, __LINE__,
659                                          skdev);
660                                 break;
661                         }
662                         SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
663                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
664
665                         skdev->skmsg_free_list = skmsg->next;
666
667                         skmsg->state = SKD_MSG_STATE_BUSY;
668                         skmsg->id += SKD_ID_INCR;
669
670                         /* Initialize the FIT msg header */
671                         fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
672                         memset(fmh, 0, sizeof(*fmh));
673                         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
674                         skmsg->length = sizeof(*fmh);
675                 }
676
677                 skreq->fitmsg_id = skmsg->id;
678
679                 /*
680                  * Note that a FIT msg may have just been started
681                  * but contains no SoFIT requests yet.
682                  */
683
684                 /*
685                  * Transcode the request, checking as we go. The outcome of
686                  * the transcoding is represented by the error variable.
687                  */
688                 cmd_ptr = &skmsg->msg_buf[skmsg->length];
689                 memset(cmd_ptr, 0, 32);
690
691                 be_lba = cpu_to_be32(lba);
692                 be_count = cpu_to_be32(count);
693                 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
694                 cmdctxt = skreq->id + SKD_ID_INCR;
695
696                 scsi_req = cmd_ptr;
697                 scsi_req->hdr.tag = cmdctxt;
698                 scsi_req->hdr.sg_list_dma_address = be_dmaa;
699
700                 if (data_dir == READ)
701                         skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
702                 else
703                         skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
704
705                 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
706                         skd_prep_zerosize_flush_cdb(scsi_req, skreq);
707                         SKD_ASSERT(skreq->flush_cmd == 1);
708
709                 } else {
710                         skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
711                 }
712
713                 if (fua)
714                         scsi_req->cdb[1] |= SKD_FUA_NV;
715
716                 if (!req->bio)
717                         goto skip_sg;
718
719                 error = skd_preop_sg_list(skdev, skreq);
720
721                 if (error != 0) {
722                         /*
723                          * Complete the native request with error.
724                          * Note that the request context is still at the
725                          * head of the free list, and that the SoFIT request
726                          * was encoded into the FIT msg buffer but the FIT
727                          * msg length has not been updated. In short, the
728                          * only resource that has been allocated but might
729                          * not be used is that the FIT msg could be empty.
730                          */
731                         pr_debug("%s:%s:%d error Out\n",
732                                  skdev->name, __func__, __LINE__);
733                         skd_end_request(skdev, skreq, error);
734                         continue;
735                 }
736
737 skip_sg:
738                 scsi_req->hdr.sg_list_len_bytes =
739                         cpu_to_be32(skreq->sg_byte_count);
740
741                 /* Complete resource allocations. */
742                 skdev->skreq_free_list = skreq->next;
743                 skreq->state = SKD_REQ_STATE_BUSY;
744                 skreq->id += SKD_ID_INCR;
745
746                 skmsg->length += sizeof(struct skd_scsi_request);
747                 fmh->num_protocol_cmds_coalesced++;
748
749                 /*
750                  * Update the active request counts.
751                  * Capture the timeout timestamp.
752                  */
753                 skreq->timeout_stamp = skdev->timeout_stamp;
754                 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
755                 skdev->timeout_slot[timo_slot]++;
756                 skdev->in_flight++;
757                 pr_debug("%s:%s:%d req=0x%x busy=%d\n",
758                          skdev->name, __func__, __LINE__,
759                          skreq->id, skdev->in_flight);
760
761                 /*
762                  * If the FIT msg buffer is full send it.
763                  */
764                 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
765                     fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
766                         skd_send_fitmsg(skdev, skmsg);
767                         skmsg = NULL;
768                         fmh = NULL;
769                 }
770         }
771
772         /*
773          * Is a FIT msg in progress? If it is empty put the buffer back
774          * on the free list. If it is non-empty send what we got.
775          * This minimizes latency when there are fewer requests than
776          * what fits in a FIT msg.
777          */
778         if (skmsg != NULL) {
779                 /* Bigger than just a FIT msg header? */
780                 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
781                         pr_debug("%s:%s:%d sending msg=%p, len %d\n",
782                                  skdev->name, __func__, __LINE__,
783                                  skmsg, skmsg->length);
784                         skd_send_fitmsg(skdev, skmsg);
785                 } else {
786                         /*
787                          * The FIT msg is empty. It means we got started
788                          * on the msg, but the requests were rejected.
789                          */
790                         skmsg->state = SKD_MSG_STATE_IDLE;
791                         skmsg->id += SKD_ID_INCR;
792                         skmsg->next = skdev->skmsg_free_list;
793                         skdev->skmsg_free_list = skmsg;
794                 }
795                 skmsg = NULL;
796                 fmh = NULL;
797         }
798
799         /*
800          * If req is non-NULL it means there is something to do but
801          * we are out of a resource.
802          */
803         if (req)
804                 blk_stop_queue(skdev->queue);
805 }
806
807 static void skd_end_request(struct skd_device *skdev,
808                             struct skd_request_context *skreq, int error)
809 {
810         if (unlikely(error)) {
811                 struct request *req = skreq->req;
812                 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
813                 u32 lba = (u32)blk_rq_pos(req);
814                 u32 count = blk_rq_sectors(req);
815
816                 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
817                        skd_name(skdev), cmd, lba, count, skreq->id);
818         } else
819                 pr_debug("%s:%s:%d id=0x%x error=%d\n",
820                          skdev->name, __func__, __LINE__, skreq->id, error);
821
822         __blk_end_request_all(skreq->req, error);
823 }
824
825 static int skd_preop_sg_list(struct skd_device *skdev,
826                              struct skd_request_context *skreq)
827 {
828         struct request *req = skreq->req;
829         int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
830         int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
831         struct scatterlist *sg = &skreq->sg[0];
832         int n_sg;
833         int i;
834
835         skreq->sg_byte_count = 0;
836
837         /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
838                    skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
839
840         n_sg = blk_rq_map_sg(skdev->queue, req, sg);
841         if (n_sg <= 0)
842                 return -EINVAL;
843
844         /*
845          * Map scatterlist to PCI bus addresses.
846          * Note PCI might change the number of entries.
847          */
848         n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
849         if (n_sg <= 0)
850                 return -EINVAL;
851
852         SKD_ASSERT(n_sg <= skdev->sgs_per_request);
853
854         skreq->n_sg = n_sg;
855
856         for (i = 0; i < n_sg; i++) {
857                 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
858                 u32 cnt = sg_dma_len(&sg[i]);
859                 uint64_t dma_addr = sg_dma_address(&sg[i]);
860
861                 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
862                 sgd->byte_count = cnt;
863                 skreq->sg_byte_count += cnt;
864                 sgd->host_side_addr = dma_addr;
865                 sgd->dev_side_addr = 0;
866         }
867
868         skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
869         skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
870
871         if (unlikely(skdev->dbg_level > 1)) {
872                 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
873                          skdev->name, __func__, __LINE__,
874                          skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
875                 for (i = 0; i < n_sg; i++) {
876                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
877                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
878                                  "addr=0x%llx next=0x%llx\n",
879                                  skdev->name, __func__, __LINE__,
880                                  i, sgd->byte_count, sgd->control,
881                                  sgd->host_side_addr, sgd->next_desc_ptr);
882                 }
883         }
884
885         return 0;
886 }
887
888 static void skd_postop_sg_list(struct skd_device *skdev,
889                                struct skd_request_context *skreq)
890 {
891         int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
892         int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
893
894         /*
895          * restore the next ptr for next IO request so we
896          * don't have to set it every time.
897          */
898         skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
899                 skreq->sksg_dma_address +
900                 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
901         pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
902 }
903
904 static void skd_request_fn_not_online(struct request_queue *q)
905 {
906         struct skd_device *skdev = q->queuedata;
907         int error;
908
909         SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
910
911         skd_log_skdev(skdev, "req_not_online");
912         switch (skdev->state) {
913         case SKD_DRVR_STATE_PAUSING:
914         case SKD_DRVR_STATE_PAUSED:
915         case SKD_DRVR_STATE_STARTING:
916         case SKD_DRVR_STATE_RESTARTING:
917         case SKD_DRVR_STATE_WAIT_BOOT:
918         /* In case of starting, we haven't started the queue,
919          * so we can't get here... but requests are
920          * possibly hanging out waiting for us because we
921          * reported the dev/skd0 already.  They'll wait
922          * forever if connect doesn't complete.
923          * What to do??? delay dev/skd0 ??
924          */
925         case SKD_DRVR_STATE_BUSY:
926         case SKD_DRVR_STATE_BUSY_IMMINENT:
927         case SKD_DRVR_STATE_BUSY_ERASE:
928         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
929                 return;
930
931         case SKD_DRVR_STATE_BUSY_SANITIZE:
932         case SKD_DRVR_STATE_STOPPING:
933         case SKD_DRVR_STATE_SYNCING:
934         case SKD_DRVR_STATE_FAULT:
935         case SKD_DRVR_STATE_DISAPPEARED:
936         default:
937                 error = -EIO;
938                 break;
939         }
940
941         /* If we get here, terminate all pending block requeusts
942          * with EIO and any scsi pass thru with appropriate sense
943          */
944
945         skd_fail_all_pending(skdev);
946 }
947
948 /*
949  *****************************************************************************
950  * TIMER
951  *****************************************************************************
952  */
953
954 static void skd_timer_tick_not_online(struct skd_device *skdev);
955
956 static void skd_timer_tick(ulong arg)
957 {
958         struct skd_device *skdev = (struct skd_device *)arg;
959
960         u32 timo_slot;
961         u32 overdue_timestamp;
962         unsigned long reqflags;
963         u32 state;
964
965         if (skdev->state == SKD_DRVR_STATE_FAULT)
966                 /* The driver has declared fault, and we want it to
967                  * stay that way until driver is reloaded.
968                  */
969                 return;
970
971         spin_lock_irqsave(&skdev->lock, reqflags);
972
973         state = SKD_READL(skdev, FIT_STATUS);
974         state &= FIT_SR_DRIVE_STATE_MASK;
975         if (state != skdev->drive_state)
976                 skd_isr_fwstate(skdev);
977
978         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
979                 skd_timer_tick_not_online(skdev);
980                 goto timer_func_out;
981         }
982         skdev->timeout_stamp++;
983         timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
984
985         /*
986          * All requests that happened during the previous use of
987          * this slot should be done by now. The previous use was
988          * over 7 seconds ago.
989          */
990         if (skdev->timeout_slot[timo_slot] == 0)
991                 goto timer_func_out;
992
993         /* Something is overdue */
994         overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
995
996         pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
997                  skdev->name, __func__, __LINE__,
998                  skdev->timeout_slot[timo_slot], skdev->in_flight);
999         pr_err("(%s): Overdue IOs (%d), busy %d\n",
1000                skd_name(skdev), skdev->timeout_slot[timo_slot],
1001                skdev->in_flight);
1002
1003         skdev->timer_countdown = SKD_DRAINING_TIMO;
1004         skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1005         skdev->timo_slot = timo_slot;
1006         blk_stop_queue(skdev->queue);
1007
1008 timer_func_out:
1009         mod_timer(&skdev->timer, (jiffies + HZ));
1010
1011         spin_unlock_irqrestore(&skdev->lock, reqflags);
1012 }
1013
1014 static void skd_timer_tick_not_online(struct skd_device *skdev)
1015 {
1016         switch (skdev->state) {
1017         case SKD_DRVR_STATE_IDLE:
1018         case SKD_DRVR_STATE_LOAD:
1019                 break;
1020         case SKD_DRVR_STATE_BUSY_SANITIZE:
1021                 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
1022                          skdev->name, __func__, __LINE__,
1023                          skdev->drive_state, skdev->state);
1024                 /* If we've been in sanitize for 3 seconds, we figure we're not
1025                  * going to get anymore completions, so recover requests now
1026                  */
1027                 if (skdev->timer_countdown > 0) {
1028                         skdev->timer_countdown--;
1029                         return;
1030                 }
1031                 skd_recover_requests(skdev, 0);
1032                 break;
1033
1034         case SKD_DRVR_STATE_BUSY:
1035         case SKD_DRVR_STATE_BUSY_IMMINENT:
1036         case SKD_DRVR_STATE_BUSY_ERASE:
1037                 pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1038                          skdev->name, __func__, __LINE__,
1039                          skdev->state, skdev->timer_countdown);
1040                 if (skdev->timer_countdown > 0) {
1041                         skdev->timer_countdown--;
1042                         return;
1043                 }
1044                 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1045                          skdev->name, __func__, __LINE__,
1046                          skdev->state, skdev->timer_countdown);
1047                 skd_restart_device(skdev);
1048                 break;
1049
1050         case SKD_DRVR_STATE_WAIT_BOOT:
1051         case SKD_DRVR_STATE_STARTING:
1052                 if (skdev->timer_countdown > 0) {
1053                         skdev->timer_countdown--;
1054                         return;
1055                 }
1056                 /* For now, we fault the drive.  Could attempt resets to
1057                  * revcover at some point. */
1058                 skdev->state = SKD_DRVR_STATE_FAULT;
1059
1060                 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1061                        skd_name(skdev), skdev->drive_state);
1062
1063                 /*start the queue so we can respond with error to requests */
1064                 /* wakeup anyone waiting for startup complete */
1065                 blk_start_queue(skdev->queue);
1066                 skdev->gendisk_on = -1;
1067                 wake_up_interruptible(&skdev->waitq);
1068                 break;
1069
1070         case SKD_DRVR_STATE_ONLINE:
1071                 /* shouldn't get here. */
1072                 break;
1073
1074         case SKD_DRVR_STATE_PAUSING:
1075         case SKD_DRVR_STATE_PAUSED:
1076                 break;
1077
1078         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1079                 pr_debug("%s:%s:%d "
1080                          "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1081                          skdev->name, __func__, __LINE__,
1082                          skdev->timo_slot,
1083                          skdev->timer_countdown,
1084                          skdev->in_flight,
1085                          skdev->timeout_slot[skdev->timo_slot]);
1086                 /* if the slot has cleared we can let the I/O continue */
1087                 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1088                         pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1089                                  skdev->name, __func__, __LINE__);
1090                         skdev->state = SKD_DRVR_STATE_ONLINE;
1091                         blk_start_queue(skdev->queue);
1092                         return;
1093                 }
1094                 if (skdev->timer_countdown > 0) {
1095                         skdev->timer_countdown--;
1096                         return;
1097                 }
1098                 skd_restart_device(skdev);
1099                 break;
1100
1101         case SKD_DRVR_STATE_RESTARTING:
1102                 if (skdev->timer_countdown > 0) {
1103                         skdev->timer_countdown--;
1104                         return;
1105                 }
1106                 /* For now, we fault the drive. Could attempt resets to
1107                  * revcover at some point. */
1108                 skdev->state = SKD_DRVR_STATE_FAULT;
1109                 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1110                        skd_name(skdev), skdev->drive_state);
1111
1112                 /*
1113                  * Recovering does two things:
1114                  * 1. completes IO with error
1115                  * 2. reclaims dma resources
1116                  * When is it safe to recover requests?
1117                  * - if the drive state is faulted
1118                  * - if the state is still soft reset after out timeout
1119                  * - if the drive registers are dead (state = FF)
1120                  * If it is "unsafe", we still need to recover, so we will
1121                  * disable pci bus mastering and disable our interrupts.
1122                  */
1123
1124                 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1125                     (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1126                     (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1127                         /* It never came out of soft reset. Try to
1128                          * recover the requests and then let them
1129                          * fail. This is to mitigate hung processes. */
1130                         skd_recover_requests(skdev, 0);
1131                 else {
1132                         pr_err("(%s): Disable BusMaster (%x)\n",
1133                                skd_name(skdev), skdev->drive_state);
1134                         pci_disable_device(skdev->pdev);
1135                         skd_disable_interrupts(skdev);
1136                         skd_recover_requests(skdev, 0);
1137                 }
1138
1139                 /*start the queue so we can respond with error to requests */
1140                 /* wakeup anyone waiting for startup complete */
1141                 blk_start_queue(skdev->queue);
1142                 skdev->gendisk_on = -1;
1143                 wake_up_interruptible(&skdev->waitq);
1144                 break;
1145
1146         case SKD_DRVR_STATE_RESUMING:
1147         case SKD_DRVR_STATE_STOPPING:
1148         case SKD_DRVR_STATE_SYNCING:
1149         case SKD_DRVR_STATE_FAULT:
1150         case SKD_DRVR_STATE_DISAPPEARED:
1151         default:
1152                 break;
1153         }
1154 }
1155
1156 static int skd_start_timer(struct skd_device *skdev)
1157 {
1158         int rc;
1159
1160         init_timer(&skdev->timer);
1161         setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1162
1163         rc = mod_timer(&skdev->timer, (jiffies + HZ));
1164         if (rc)
1165                 pr_err("%s: failed to start timer %d\n",
1166                        __func__, rc);
1167         return rc;
1168 }
1169
1170 static void skd_kill_timer(struct skd_device *skdev)
1171 {
1172         del_timer_sync(&skdev->timer);
1173 }
1174
1175 /*
1176  *****************************************************************************
1177  * IOCTL
1178  *****************************************************************************
1179  */
1180 static int skd_ioctl_sg_io(struct skd_device *skdev,
1181                            fmode_t mode, void __user *argp);
1182 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1183                                         struct skd_sg_io *sksgio);
1184 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1185                                    struct skd_sg_io *sksgio);
1186 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1187                                     struct skd_sg_io *sksgio);
1188 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1189                                  struct skd_sg_io *sksgio, int dxfer_dir);
1190 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1191                                  struct skd_sg_io *sksgio);
1192 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1193 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1194                                     struct skd_sg_io *sksgio);
1195 static int skd_sg_io_put_status(struct skd_device *skdev,
1196                                 struct skd_sg_io *sksgio);
1197
1198 static void skd_complete_special(struct skd_device *skdev,
1199                                  volatile struct fit_completion_entry_v1
1200                                  *skcomp,
1201                                  volatile struct fit_comp_error_info *skerr,
1202                                  struct skd_special_context *skspcl);
1203
1204 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1205                           uint cmd_in, ulong arg)
1206 {
1207         int rc = 0;
1208         struct gendisk *disk = bdev->bd_disk;
1209         struct skd_device *skdev = disk->private_data;
1210         void __user *p = (void *)arg;
1211
1212         pr_debug("%s:%s:%d %s: CMD[%s] ioctl  mode 0x%x, cmd 0x%x arg %0lx\n",
1213                  skdev->name, __func__, __LINE__,
1214                  disk->disk_name, current->comm, mode, cmd_in, arg);
1215
1216         if (!capable(CAP_SYS_ADMIN))
1217                 return -EPERM;
1218
1219         switch (cmd_in) {
1220         case SG_SET_TIMEOUT:
1221         case SG_GET_TIMEOUT:
1222         case SG_GET_VERSION_NUM:
1223                 rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
1224                 break;
1225         case SG_IO:
1226                 rc = skd_ioctl_sg_io(skdev, mode, p);
1227                 break;
1228
1229         default:
1230                 rc = -ENOTTY;
1231                 break;
1232         }
1233
1234         pr_debug("%s:%s:%d %s:  completion rc %d\n",
1235                  skdev->name, __func__, __LINE__, disk->disk_name, rc);
1236         return rc;
1237 }
1238
1239 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1240                            void __user *argp)
1241 {
1242         int rc;
1243         struct skd_sg_io sksgio;
1244
1245         memset(&sksgio, 0, sizeof(sksgio));
1246         sksgio.mode = mode;
1247         sksgio.argp = argp;
1248         sksgio.iov = &sksgio.no_iov_iov;
1249
1250         switch (skdev->state) {
1251         case SKD_DRVR_STATE_ONLINE:
1252         case SKD_DRVR_STATE_BUSY_IMMINENT:
1253                 break;
1254
1255         default:
1256                 pr_debug("%s:%s:%d drive not online\n",
1257                          skdev->name, __func__, __LINE__);
1258                 rc = -ENXIO;
1259                 goto out;
1260         }
1261
1262         rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1263         if (rc)
1264                 goto out;
1265
1266         rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1267         if (rc)
1268                 goto out;
1269
1270         rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1271         if (rc)
1272                 goto out;
1273
1274         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1275         if (rc)
1276                 goto out;
1277
1278         rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1279         if (rc)
1280                 goto out;
1281
1282         rc = skd_sg_io_await(skdev, &sksgio);
1283         if (rc)
1284                 goto out;
1285
1286         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1287         if (rc)
1288                 goto out;
1289
1290         rc = skd_sg_io_put_status(skdev, &sksgio);
1291         if (rc)
1292                 goto out;
1293
1294         rc = 0;
1295
1296 out:
1297         skd_sg_io_release_skspcl(skdev, &sksgio);
1298
1299         if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1300                 kfree(sksgio.iov);
1301         return rc;
1302 }
1303
1304 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1305                                         struct skd_sg_io *sksgio)
1306 {
1307         struct sg_io_hdr *sgp = &sksgio->sg;
1308         int i, acc;
1309
1310         if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1311                 pr_debug("%s:%s:%d access sg failed %p\n",
1312                          skdev->name, __func__, __LINE__, sksgio->argp);
1313                 return -EFAULT;
1314         }
1315
1316         if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1317                 pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1318                          skdev->name, __func__, __LINE__, sksgio->argp);
1319                 return -EFAULT;
1320         }
1321
1322         if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1323                 pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1324                          skdev->name, __func__, __LINE__, sgp->interface_id);
1325                 return -EINVAL;
1326         }
1327
1328         if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1329                 pr_debug("%s:%s:%d cmd_len invalid %d\n",
1330                          skdev->name, __func__, __LINE__, sgp->cmd_len);
1331                 return -EINVAL;
1332         }
1333
1334         if (sgp->iovec_count > 256) {
1335                 pr_debug("%s:%s:%d iovec_count invalid %d\n",
1336                          skdev->name, __func__, __LINE__, sgp->iovec_count);
1337                 return -EINVAL;
1338         }
1339
1340         if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1341                 pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1342                          skdev->name, __func__, __LINE__, sgp->dxfer_len);
1343                 return -EINVAL;
1344         }
1345
1346         switch (sgp->dxfer_direction) {
1347         case SG_DXFER_NONE:
1348                 acc = -1;
1349                 break;
1350
1351         case SG_DXFER_TO_DEV:
1352                 acc = VERIFY_READ;
1353                 break;
1354
1355         case SG_DXFER_FROM_DEV:
1356         case SG_DXFER_TO_FROM_DEV:
1357                 acc = VERIFY_WRITE;
1358                 break;
1359
1360         default:
1361                 pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1362                          skdev->name, __func__, __LINE__, sgp->dxfer_direction);
1363                 return -EINVAL;
1364         }
1365
1366         if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1367                 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1368                          skdev->name, __func__, __LINE__, sgp->cmdp);
1369                 return -EFAULT;
1370         }
1371
1372         if (sgp->mx_sb_len != 0) {
1373                 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1374                         pr_debug("%s:%s:%d access sbp failed %p\n",
1375                                  skdev->name, __func__, __LINE__, sgp->sbp);
1376                         return -EFAULT;
1377                 }
1378         }
1379
1380         if (sgp->iovec_count == 0) {
1381                 sksgio->iov[0].iov_base = sgp->dxferp;
1382                 sksgio->iov[0].iov_len = sgp->dxfer_len;
1383                 sksgio->iovcnt = 1;
1384                 sksgio->dxfer_len = sgp->dxfer_len;
1385         } else {
1386                 struct sg_iovec *iov;
1387                 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1388                 size_t iov_data_len;
1389
1390                 iov = kmalloc(nbytes, GFP_KERNEL);
1391                 if (iov == NULL) {
1392                         pr_debug("%s:%s:%d alloc iovec failed %d\n",
1393                                  skdev->name, __func__, __LINE__,
1394                                  sgp->iovec_count);
1395                         return -ENOMEM;
1396                 }
1397                 sksgio->iov = iov;
1398                 sksgio->iovcnt = sgp->iovec_count;
1399
1400                 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1401                         pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1402                                  skdev->name, __func__, __LINE__, sgp->dxferp);
1403                         return -EFAULT;
1404                 }
1405
1406                 /*
1407                  * Sum up the vecs, making sure they don't overflow
1408                  */
1409                 iov_data_len = 0;
1410                 for (i = 0; i < sgp->iovec_count; i++) {
1411                         if (iov_data_len + iov[i].iov_len < iov_data_len)
1412                                 return -EINVAL;
1413                         iov_data_len += iov[i].iov_len;
1414                 }
1415
1416                 /* SG_IO howto says that the shorter of the two wins */
1417                 if (sgp->dxfer_len < iov_data_len) {
1418                         sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1419                                                      sgp->iovec_count,
1420                                                      sgp->dxfer_len);
1421                         sksgio->dxfer_len = sgp->dxfer_len;
1422                 } else
1423                         sksgio->dxfer_len = iov_data_len;
1424         }
1425
1426         if (sgp->dxfer_direction != SG_DXFER_NONE) {
1427                 struct sg_iovec *iov = sksgio->iov;
1428                 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1429                         if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1430                                 pr_debug("%s:%s:%d access data failed %p/%d\n",
1431                                          skdev->name, __func__, __LINE__,
1432                                          iov->iov_base, (int)iov->iov_len);
1433                                 return -EFAULT;
1434                         }
1435                 }
1436         }
1437
1438         return 0;
1439 }
1440
1441 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1442                                    struct skd_sg_io *sksgio)
1443 {
1444         struct skd_special_context *skspcl = NULL;
1445         int rc;
1446
1447         for (;;) {
1448                 ulong flags;
1449
1450                 spin_lock_irqsave(&skdev->lock, flags);
1451                 skspcl = skdev->skspcl_free_list;
1452                 if (skspcl != NULL) {
1453                         skdev->skspcl_free_list =
1454                                 (struct skd_special_context *)skspcl->req.next;
1455                         skspcl->req.id += SKD_ID_INCR;
1456                         skspcl->req.state = SKD_REQ_STATE_SETUP;
1457                         skspcl->orphaned = 0;
1458                         skspcl->req.n_sg = 0;
1459                 }
1460                 spin_unlock_irqrestore(&skdev->lock, flags);
1461
1462                 if (skspcl != NULL) {
1463                         rc = 0;
1464                         break;
1465                 }
1466
1467                 pr_debug("%s:%s:%d blocking\n",
1468                          skdev->name, __func__, __LINE__);
1469
1470                 rc = wait_event_interruptible_timeout(
1471                                 skdev->waitq,
1472                                 (skdev->skspcl_free_list != NULL),
1473                                 msecs_to_jiffies(sksgio->sg.timeout));
1474
1475                 pr_debug("%s:%s:%d unblocking, rc=%d\n",
1476                          skdev->name, __func__, __LINE__, rc);
1477
1478                 if (rc <= 0) {
1479                         if (rc == 0)
1480                                 rc = -ETIMEDOUT;
1481                         else
1482                                 rc = -EINTR;
1483                         break;
1484                 }
1485                 /*
1486                  * If we get here rc > 0 meaning the timeout to
1487                  * wait_event_interruptible_timeout() had time left, hence the
1488                  * sought event -- non-empty free list -- happened.
1489                  * Retry the allocation.
1490                  */
1491         }
1492         sksgio->skspcl = skspcl;
1493
1494         return rc;
1495 }
1496
1497 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1498                                     struct skd_request_context *skreq,
1499                                     u32 dxfer_len)
1500 {
1501         u32 resid = dxfer_len;
1502
1503         /*
1504          * The DMA engine must have aligned addresses and byte counts.
1505          */
1506         resid += (-resid) & 3;
1507         skreq->sg_byte_count = resid;
1508
1509         skreq->n_sg = 0;
1510
1511         while (resid > 0) {
1512                 u32 nbytes = PAGE_SIZE;
1513                 u32 ix = skreq->n_sg;
1514                 struct scatterlist *sg = &skreq->sg[ix];
1515                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1516                 struct page *page;
1517
1518                 if (nbytes > resid)
1519                         nbytes = resid;
1520
1521                 page = alloc_page(GFP_KERNEL);
1522                 if (page == NULL)
1523                         return -ENOMEM;
1524
1525                 sg_set_page(sg, page, nbytes, 0);
1526
1527                 /* TODO: This should be going through a pci_???()
1528                  * routine to do proper mapping. */
1529                 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1530                 sksg->byte_count = nbytes;
1531
1532                 sksg->host_side_addr = sg_phys(sg);
1533
1534                 sksg->dev_side_addr = 0;
1535                 sksg->next_desc_ptr = skreq->sksg_dma_address +
1536                                       (ix + 1) * sizeof(*sksg);
1537
1538                 skreq->n_sg++;
1539                 resid -= nbytes;
1540         }
1541
1542         if (skreq->n_sg > 0) {
1543                 u32 ix = skreq->n_sg - 1;
1544                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1545
1546                 sksg->control = FIT_SGD_CONTROL_LAST;
1547                 sksg->next_desc_ptr = 0;
1548         }
1549
1550         if (unlikely(skdev->dbg_level > 1)) {
1551                 u32 i;
1552
1553                 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1554                          skdev->name, __func__, __LINE__,
1555                          skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1556                 for (i = 0; i < skreq->n_sg; i++) {
1557                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1558
1559                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
1560                                  "addr=0x%llx next=0x%llx\n",
1561                                  skdev->name, __func__, __LINE__,
1562                                  i, sgd->byte_count, sgd->control,
1563                                  sgd->host_side_addr, sgd->next_desc_ptr);
1564                 }
1565         }
1566
1567         return 0;
1568 }
1569
1570 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1571                                     struct skd_sg_io *sksgio)
1572 {
1573         struct skd_special_context *skspcl = sksgio->skspcl;
1574         struct skd_request_context *skreq = &skspcl->req;
1575         u32 dxfer_len = sksgio->dxfer_len;
1576         int rc;
1577
1578         rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1579         /*
1580          * Eventually, errors or not, skd_release_special() is called
1581          * to recover allocations including partial allocations.
1582          */
1583         return rc;
1584 }
1585
1586 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1587                                  struct skd_sg_io *sksgio, int dxfer_dir)
1588 {
1589         struct skd_special_context *skspcl = sksgio->skspcl;
1590         u32 iov_ix = 0;
1591         struct sg_iovec curiov;
1592         u32 sksg_ix = 0;
1593         u8 *bufp = NULL;
1594         u32 buf_len = 0;
1595         u32 resid = sksgio->dxfer_len;
1596         int rc;
1597
1598         curiov.iov_len = 0;
1599         curiov.iov_base = NULL;
1600
1601         if (dxfer_dir != sksgio->sg.dxfer_direction) {
1602                 if (dxfer_dir != SG_DXFER_TO_DEV ||
1603                     sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1604                         return 0;
1605         }
1606
1607         while (resid > 0) {
1608                 u32 nbytes = PAGE_SIZE;
1609
1610                 if (curiov.iov_len == 0) {
1611                         curiov = sksgio->iov[iov_ix++];
1612                         continue;
1613                 }
1614
1615                 if (buf_len == 0) {
1616                         struct page *page;
1617                         page = sg_page(&skspcl->req.sg[sksg_ix++]);
1618                         bufp = page_address(page);
1619                         buf_len = PAGE_SIZE;
1620                 }
1621
1622                 nbytes = min_t(u32, nbytes, resid);
1623                 nbytes = min_t(u32, nbytes, curiov.iov_len);
1624                 nbytes = min_t(u32, nbytes, buf_len);
1625
1626                 if (dxfer_dir == SG_DXFER_TO_DEV)
1627                         rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1628                 else
1629                         rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1630
1631                 if (rc)
1632                         return -EFAULT;
1633
1634                 resid -= nbytes;
1635                 curiov.iov_len -= nbytes;
1636                 curiov.iov_base += nbytes;
1637                 buf_len -= nbytes;
1638         }
1639
1640         return 0;
1641 }
1642
1643 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1644                                  struct skd_sg_io *sksgio)
1645 {
1646         struct skd_special_context *skspcl = sksgio->skspcl;
1647         struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1648         struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1649
1650         memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1651
1652         /* Initialize the FIT msg header */
1653         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1654         fmh->num_protocol_cmds_coalesced = 1;
1655
1656         /* Initialize the SCSI request */
1657         if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1658                 scsi_req->hdr.sg_list_dma_address =
1659                         cpu_to_be64(skspcl->req.sksg_dma_address);
1660         scsi_req->hdr.tag = skspcl->req.id;
1661         scsi_req->hdr.sg_list_len_bytes =
1662                 cpu_to_be32(skspcl->req.sg_byte_count);
1663         memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1664
1665         skspcl->req.state = SKD_REQ_STATE_BUSY;
1666         skd_send_special_fitmsg(skdev, skspcl);
1667
1668         return 0;
1669 }
1670
1671 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1672 {
1673         unsigned long flags;
1674         int rc;
1675
1676         rc = wait_event_interruptible_timeout(skdev->waitq,
1677                                               (sksgio->skspcl->req.state !=
1678                                                SKD_REQ_STATE_BUSY),
1679                                               msecs_to_jiffies(sksgio->sg.
1680                                                                timeout));
1681
1682         spin_lock_irqsave(&skdev->lock, flags);
1683
1684         if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1685                 pr_debug("%s:%s:%d skspcl %p aborted\n",
1686                          skdev->name, __func__, __LINE__, sksgio->skspcl);
1687
1688                 /* Build check cond, sense and let command finish. */
1689                 /* For a timeout, we must fabricate completion and sense
1690                  * data to complete the command */
1691                 sksgio->skspcl->req.completion.status =
1692                         SAM_STAT_CHECK_CONDITION;
1693
1694                 memset(&sksgio->skspcl->req.err_info, 0,
1695                        sizeof(sksgio->skspcl->req.err_info));
1696                 sksgio->skspcl->req.err_info.type = 0x70;
1697                 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1698                 sksgio->skspcl->req.err_info.code = 0x44;
1699                 sksgio->skspcl->req.err_info.qual = 0;
1700                 rc = 0;
1701         } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1702                 /* No longer on the adapter. We finish. */
1703                 rc = 0;
1704         else {
1705                 /* Something's gone wrong. Still busy. Timeout or
1706                  * user interrupted (control-C). Mark as an orphan
1707                  * so it will be disposed when completed. */
1708                 sksgio->skspcl->orphaned = 1;
1709                 sksgio->skspcl = NULL;
1710                 if (rc == 0) {
1711                         pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1712                                  skdev->name, __func__, __LINE__,
1713                                  sksgio, sksgio->sg.timeout);
1714                         rc = -ETIMEDOUT;
1715                 } else {
1716                         pr_debug("%s:%s:%d cntlc %p\n",
1717                                  skdev->name, __func__, __LINE__, sksgio);
1718                         rc = -EINTR;
1719                 }
1720         }
1721
1722         spin_unlock_irqrestore(&skdev->lock, flags);
1723
1724         return rc;
1725 }
1726
1727 static int skd_sg_io_put_status(struct skd_device *skdev,
1728                                 struct skd_sg_io *sksgio)
1729 {
1730         struct sg_io_hdr *sgp = &sksgio->sg;
1731         struct skd_special_context *skspcl = sksgio->skspcl;
1732         int resid = 0;
1733
1734         u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1735
1736         sgp->status = skspcl->req.completion.status;
1737         resid = sksgio->dxfer_len - nb;
1738
1739         sgp->masked_status = sgp->status & STATUS_MASK;
1740         sgp->msg_status = 0;
1741         sgp->host_status = 0;
1742         sgp->driver_status = 0;
1743         sgp->resid = resid;
1744         if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1745                 sgp->info |= SG_INFO_CHECK;
1746
1747         pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1748                  skdev->name, __func__, __LINE__,
1749                  sgp->status, sgp->masked_status, sgp->resid);
1750
1751         if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1752                 if (sgp->mx_sb_len > 0) {
1753                         struct fit_comp_error_info *ei = &skspcl->req.err_info;
1754                         u32 nbytes = sizeof(*ei);
1755
1756                         nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1757
1758                         sgp->sb_len_wr = nbytes;
1759
1760                         if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1761                                 pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1762                                          skdev->name, __func__, __LINE__,
1763                                          sgp->sbp);
1764                                 return -EFAULT;
1765                         }
1766                 }
1767         }
1768
1769         if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1770                 pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1771                          skdev->name, __func__, __LINE__, sksgio->argp);
1772                 return -EFAULT;
1773         }
1774
1775         return 0;
1776 }
1777
1778 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1779                                     struct skd_sg_io *sksgio)
1780 {
1781         struct skd_special_context *skspcl = sksgio->skspcl;
1782
1783         if (skspcl != NULL) {
1784                 ulong flags;
1785
1786                 sksgio->skspcl = NULL;
1787
1788                 spin_lock_irqsave(&skdev->lock, flags);
1789                 skd_release_special(skdev, skspcl);
1790                 spin_unlock_irqrestore(&skdev->lock, flags);
1791         }
1792
1793         return 0;
1794 }
1795
1796 /*
1797  *****************************************************************************
1798  * INTERNAL REQUESTS -- generated by driver itself
1799  *****************************************************************************
1800  */
1801
1802 static int skd_format_internal_skspcl(struct skd_device *skdev)
1803 {
1804         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1805         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1806         struct fit_msg_hdr *fmh;
1807         uint64_t dma_address;
1808         struct skd_scsi_request *scsi;
1809
1810         fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1811         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1812         fmh->num_protocol_cmds_coalesced = 1;
1813
1814         scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1815         memset(scsi, 0, sizeof(*scsi));
1816         dma_address = skspcl->req.sksg_dma_address;
1817         scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1818         sgd->control = FIT_SGD_CONTROL_LAST;
1819         sgd->byte_count = 0;
1820         sgd->host_side_addr = skspcl->db_dma_address;
1821         sgd->dev_side_addr = 0;
1822         sgd->next_desc_ptr = 0LL;
1823
1824         return 1;
1825 }
1826
1827 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1828
1829 static void skd_send_internal_skspcl(struct skd_device *skdev,
1830                                      struct skd_special_context *skspcl,
1831                                      u8 opcode)
1832 {
1833         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1834         struct skd_scsi_request *scsi;
1835         unsigned char *buf = skspcl->data_buf;
1836         int i;
1837
1838         if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1839                 /*
1840                  * A refresh is already in progress.
1841                  * Just wait for it to finish.
1842                  */
1843                 return;
1844
1845         SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1846         skspcl->req.state = SKD_REQ_STATE_BUSY;
1847         skspcl->req.id += SKD_ID_INCR;
1848
1849         scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1850         scsi->hdr.tag = skspcl->req.id;
1851
1852         memset(scsi->cdb, 0, sizeof(scsi->cdb));
1853
1854         switch (opcode) {
1855         case TEST_UNIT_READY:
1856                 scsi->cdb[0] = TEST_UNIT_READY;
1857                 sgd->byte_count = 0;
1858                 scsi->hdr.sg_list_len_bytes = 0;
1859                 break;
1860
1861         case READ_CAPACITY:
1862                 scsi->cdb[0] = READ_CAPACITY;
1863                 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1864                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1865                 break;
1866
1867         case INQUIRY:
1868                 scsi->cdb[0] = INQUIRY;
1869                 scsi->cdb[1] = 0x01;    /* evpd */
1870                 scsi->cdb[2] = 0x80;    /* serial number page */
1871                 scsi->cdb[4] = 0x10;
1872                 sgd->byte_count = 16;
1873                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1874                 break;
1875
1876         case SYNCHRONIZE_CACHE:
1877                 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1878                 sgd->byte_count = 0;
1879                 scsi->hdr.sg_list_len_bytes = 0;
1880                 break;
1881
1882         case WRITE_BUFFER:
1883                 scsi->cdb[0] = WRITE_BUFFER;
1884                 scsi->cdb[1] = 0x02;
1885                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1886                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1887                 sgd->byte_count = WR_BUF_SIZE;
1888                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1889                 /* fill incrementing byte pattern */
1890                 for (i = 0; i < sgd->byte_count; i++)
1891                         buf[i] = i & 0xFF;
1892                 break;
1893
1894         case READ_BUFFER:
1895                 scsi->cdb[0] = READ_BUFFER;
1896                 scsi->cdb[1] = 0x02;
1897                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1898                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1899                 sgd->byte_count = WR_BUF_SIZE;
1900                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1901                 memset(skspcl->data_buf, 0, sgd->byte_count);
1902                 break;
1903
1904         default:
1905                 SKD_ASSERT("Don't know what to send");
1906                 return;
1907
1908         }
1909         skd_send_special_fitmsg(skdev, skspcl);
1910 }
1911
1912 static void skd_refresh_device_data(struct skd_device *skdev)
1913 {
1914         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1915
1916         skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1917 }
1918
1919 static int skd_chk_read_buf(struct skd_device *skdev,
1920                             struct skd_special_context *skspcl)
1921 {
1922         unsigned char *buf = skspcl->data_buf;
1923         int i;
1924
1925         /* check for incrementing byte pattern */
1926         for (i = 0; i < WR_BUF_SIZE; i++)
1927                 if (buf[i] != (i & 0xFF))
1928                         return 1;
1929
1930         return 0;
1931 }
1932
1933 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1934                                  u8 code, u8 qual, u8 fruc)
1935 {
1936         /* If the check condition is of special interest, log a message */
1937         if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1938             && (code == 0x04) && (qual == 0x06)) {
1939                 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1940                        "ascq/fruc %02x/%02x/%02x/%02x\n",
1941                        skd_name(skdev), key, code, qual, fruc);
1942         }
1943 }
1944
1945 static void skd_complete_internal(struct skd_device *skdev,
1946                                   volatile struct fit_completion_entry_v1
1947                                   *skcomp,
1948                                   volatile struct fit_comp_error_info *skerr,
1949                                   struct skd_special_context *skspcl)
1950 {
1951         u8 *buf = skspcl->data_buf;
1952         u8 status;
1953         int i;
1954         struct skd_scsi_request *scsi =
1955                 (struct skd_scsi_request *)&skspcl->msg_buf[64];
1956
1957         SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1958
1959         pr_debug("%s:%s:%d complete internal %x\n",
1960                  skdev->name, __func__, __LINE__, scsi->cdb[0]);
1961
1962         skspcl->req.completion = *skcomp;
1963         skspcl->req.state = SKD_REQ_STATE_IDLE;
1964         skspcl->req.id += SKD_ID_INCR;
1965
1966         status = skspcl->req.completion.status;
1967
1968         skd_log_check_status(skdev, status, skerr->key, skerr->code,
1969                              skerr->qual, skerr->fruc);
1970
1971         switch (scsi->cdb[0]) {
1972         case TEST_UNIT_READY:
1973                 if (status == SAM_STAT_GOOD)
1974                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1975                 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1976                          (skerr->key == MEDIUM_ERROR))
1977                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1978                 else {
1979                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1980                                 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
1981                                          skdev->name, __func__, __LINE__,
1982                                          skdev->state);
1983                                 return;
1984                         }
1985                         pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
1986                                  skdev->name, __func__, __LINE__);
1987                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
1988                 }
1989                 break;
1990
1991         case WRITE_BUFFER:
1992                 if (status == SAM_STAT_GOOD)
1993                         skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1994                 else {
1995                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1996                                 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
1997                                          skdev->name, __func__, __LINE__,
1998                                          skdev->state);
1999                                 return;
2000                         }
2001                         pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
2002                                  skdev->name, __func__, __LINE__);
2003                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
2004                 }
2005                 break;
2006
2007         case READ_BUFFER:
2008                 if (status == SAM_STAT_GOOD) {
2009                         if (skd_chk_read_buf(skdev, skspcl) == 0)
2010                                 skd_send_internal_skspcl(skdev, skspcl,
2011                                                          READ_CAPACITY);
2012                         else {
2013                                 pr_err(
2014                                        "(%s):*** W/R Buffer mismatch %d ***\n",
2015                                        skd_name(skdev), skdev->connect_retries);
2016                                 if (skdev->connect_retries <
2017                                     SKD_MAX_CONNECT_RETRIES) {
2018                                         skdev->connect_retries++;
2019                                         skd_soft_reset(skdev);
2020                                 } else {
2021                                         pr_err(
2022                                                "(%s): W/R Buffer Connect Error\n",
2023                                                skd_name(skdev));
2024                                         return;
2025                                 }
2026                         }
2027
2028                 } else {
2029                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2030                                 pr_debug("%s:%s:%d "
2031                                          "read buffer failed, don't send anymore state 0x%x\n",
2032                                          skdev->name, __func__, __LINE__,
2033                                          skdev->state);
2034                                 return;
2035                         }
2036                         pr_debug("%s:%s:%d "
2037                                  "**** read buffer failed, retry skerr\n",
2038                                  skdev->name, __func__, __LINE__);
2039                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
2040                 }
2041                 break;
2042
2043         case READ_CAPACITY:
2044                 skdev->read_cap_is_valid = 0;
2045                 if (status == SAM_STAT_GOOD) {
2046                         skdev->read_cap_last_lba =
2047                                 (buf[0] << 24) | (buf[1] << 16) |
2048                                 (buf[2] << 8) | buf[3];
2049                         skdev->read_cap_blocksize =
2050                                 (buf[4] << 24) | (buf[5] << 16) |
2051                                 (buf[6] << 8) | buf[7];
2052
2053                         pr_debug("%s:%s:%d last lba %d, bs %d\n",
2054                                  skdev->name, __func__, __LINE__,
2055                                  skdev->read_cap_last_lba,
2056                                  skdev->read_cap_blocksize);
2057
2058                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2059
2060                         skdev->read_cap_is_valid = 1;
2061
2062                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2063                 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2064                            (skerr->key == MEDIUM_ERROR)) {
2065                         skdev->read_cap_last_lba = ~0;
2066                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2067                         pr_debug("%s:%s:%d "
2068                                  "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2069                                  skdev->name, __func__, __LINE__);
2070                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2071                 } else {
2072                         pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2073                                  skdev->name, __func__, __LINE__);
2074                         skd_send_internal_skspcl(skdev, skspcl,
2075                                                  TEST_UNIT_READY);
2076                 }
2077                 break;
2078
2079         case INQUIRY:
2080                 skdev->inquiry_is_valid = 0;
2081                 if (status == SAM_STAT_GOOD) {
2082                         skdev->inquiry_is_valid = 1;
2083
2084                         for (i = 0; i < 12; i++)
2085                                 skdev->inq_serial_num[i] = buf[i + 4];
2086                         skdev->inq_serial_num[12] = 0;
2087                 }
2088
2089                 if (skd_unquiesce_dev(skdev) < 0)
2090                         pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2091                                  skdev->name, __func__, __LINE__);
2092                  /* connection is complete */
2093                 skdev->connect_retries = 0;
2094                 break;
2095
2096         case SYNCHRONIZE_CACHE:
2097                 if (status == SAM_STAT_GOOD)
2098                         skdev->sync_done = 1;
2099                 else
2100                         skdev->sync_done = -1;
2101                 wake_up_interruptible(&skdev->waitq);
2102                 break;
2103
2104         default:
2105                 SKD_ASSERT("we didn't send this");
2106         }
2107 }
2108
2109 /*
2110  *****************************************************************************
2111  * FIT MESSAGES
2112  *****************************************************************************
2113  */
2114
2115 static void skd_send_fitmsg(struct skd_device *skdev,
2116                             struct skd_fitmsg_context *skmsg)
2117 {
2118         u64 qcmd;
2119         struct fit_msg_hdr *fmh;
2120
2121         pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2122                  skdev->name, __func__, __LINE__,
2123                  skmsg->mb_dma_address, skdev->in_flight);
2124         pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2125                  skdev->name, __func__, __LINE__,
2126                  skmsg->msg_buf, skmsg->offset);
2127
2128         qcmd = skmsg->mb_dma_address;
2129         qcmd |= FIT_QCMD_QID_NORMAL;
2130
2131         fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2132         skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2133
2134         if (unlikely(skdev->dbg_level > 1)) {
2135                 u8 *bp = (u8 *)skmsg->msg_buf;
2136                 int i;
2137                 for (i = 0; i < skmsg->length; i += 8) {
2138                         pr_debug("%s:%s:%d msg[%2d] %8ph\n",
2139                                  skdev->name, __func__, __LINE__, i, &bp[i]);
2140                         if (i == 0)
2141                                 i = 64 - 8;
2142                 }
2143         }
2144
2145         if (skmsg->length > 256)
2146                 qcmd |= FIT_QCMD_MSGSIZE_512;
2147         else if (skmsg->length > 128)
2148                 qcmd |= FIT_QCMD_MSGSIZE_256;
2149         else if (skmsg->length > 64)
2150                 qcmd |= FIT_QCMD_MSGSIZE_128;
2151         else
2152                 /*
2153                  * This makes no sense because the FIT msg header is
2154                  * 64 bytes. If the msg is only 64 bytes long it has
2155                  * no payload.
2156                  */
2157                 qcmd |= FIT_QCMD_MSGSIZE_64;
2158
2159         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2160 }
2161
2162 static void skd_send_special_fitmsg(struct skd_device *skdev,
2163                                     struct skd_special_context *skspcl)
2164 {
2165         u64 qcmd;
2166
2167         if (unlikely(skdev->dbg_level > 1)) {
2168                 u8 *bp = (u8 *)skspcl->msg_buf;
2169                 int i;
2170
2171                 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2172                         pr_debug("%s:%s:%d  spcl[%2d] %8ph\n",
2173                                  skdev->name, __func__, __LINE__, i, &bp[i]);
2174                         if (i == 0)
2175                                 i = 64 - 8;
2176                 }
2177
2178                 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2179                          skdev->name, __func__, __LINE__,
2180                          skspcl, skspcl->req.id, skspcl->req.sksg_list,
2181                          skspcl->req.sksg_dma_address);
2182                 for (i = 0; i < skspcl->req.n_sg; i++) {
2183                         struct fit_sg_descriptor *sgd =
2184                                 &skspcl->req.sksg_list[i];
2185
2186                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
2187                                  "addr=0x%llx next=0x%llx\n",
2188                                  skdev->name, __func__, __LINE__,
2189                                  i, sgd->byte_count, sgd->control,
2190                                  sgd->host_side_addr, sgd->next_desc_ptr);
2191                 }
2192         }
2193
2194         /*
2195          * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2196          * and one 64-byte SSDI command.
2197          */
2198         qcmd = skspcl->mb_dma_address;
2199         qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2200
2201         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2202 }
2203
2204 /*
2205  *****************************************************************************
2206  * COMPLETION QUEUE
2207  *****************************************************************************
2208  */
2209
2210 static void skd_complete_other(struct skd_device *skdev,
2211                                volatile struct fit_completion_entry_v1 *skcomp,
2212                                volatile struct fit_comp_error_info *skerr);
2213
2214 struct sns_info {
2215         u8 type;
2216         u8 stat;
2217         u8 key;
2218         u8 asc;
2219         u8 ascq;
2220         u8 mask;
2221         enum skd_check_status_action action;
2222 };
2223
2224 static struct sns_info skd_chkstat_table[] = {
2225         /* Good */
2226         { 0x70, 0x02, RECOVERED_ERROR, 0,    0,    0x1c,
2227           SKD_CHECK_STATUS_REPORT_GOOD },
2228
2229         /* Smart alerts */
2230         { 0x70, 0x02, NO_SENSE,        0x0B, 0x00, 0x1E,        /* warnings */
2231           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2232         { 0x70, 0x02, NO_SENSE,        0x5D, 0x00, 0x1E,        /* thresholds */
2233           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2234         { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
2235           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2236
2237         /* Retry (with limits) */
2238         { 0x70, 0x02, 0x0B,            0,    0,    0x1C,        /* This one is for DMA ERROR */
2239           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2240         { 0x70, 0x02, 0x06,            0x0B, 0x00, 0x1E,        /* warnings */
2241           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2242         { 0x70, 0x02, 0x06,            0x5D, 0x00, 0x1E,        /* thresholds */
2243           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2244         { 0x70, 0x02, 0x06,            0x80, 0x30, 0x1F,        /* backup power */
2245           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2246
2247         /* Busy (or about to be) */
2248         { 0x70, 0x02, 0x06,            0x3f, 0x01, 0x1F, /* fw changed */
2249           SKD_CHECK_STATUS_BUSY_IMMINENT },
2250 };
2251
2252 /*
2253  * Look up status and sense data to decide how to handle the error
2254  * from the device.
2255  * mask says which fields must match e.g., mask=0x18 means check
2256  * type and stat, ignore key, asc, ascq.
2257  */
2258
2259 static enum skd_check_status_action
2260 skd_check_status(struct skd_device *skdev,
2261                  u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2262 {
2263         int i, n;
2264
2265         pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2266                skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2267                skerr->fruc);
2268
2269         pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2270                  skdev->name, __func__, __LINE__, skerr->type, cmp_status,
2271                  skerr->key, skerr->code, skerr->qual, skerr->fruc);
2272
2273         /* Does the info match an entry in the good category? */
2274         n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2275         for (i = 0; i < n; i++) {
2276                 struct sns_info *sns = &skd_chkstat_table[i];
2277
2278                 if (sns->mask & 0x10)
2279                         if (skerr->type != sns->type)
2280                                 continue;
2281
2282                 if (sns->mask & 0x08)
2283                         if (cmp_status != sns->stat)
2284                                 continue;
2285
2286                 if (sns->mask & 0x04)
2287                         if (skerr->key != sns->key)
2288                                 continue;
2289
2290                 if (sns->mask & 0x02)
2291                         if (skerr->code != sns->asc)
2292                                 continue;
2293
2294                 if (sns->mask & 0x01)
2295                         if (skerr->qual != sns->ascq)
2296                                 continue;
2297
2298                 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2299                         pr_err("(%s): SMART Alert: sense key/asc/ascq "
2300                                "%02x/%02x/%02x\n",
2301                                skd_name(skdev), skerr->key,
2302                                skerr->code, skerr->qual);
2303                 }
2304                 return sns->action;
2305         }
2306
2307         /* No other match, so nonzero status means error,
2308          * zero status means good
2309          */
2310         if (cmp_status) {
2311                 pr_debug("%s:%s:%d status check: error\n",
2312                          skdev->name, __func__, __LINE__);
2313                 return SKD_CHECK_STATUS_REPORT_ERROR;
2314         }
2315
2316         pr_debug("%s:%s:%d status check good default\n",
2317                  skdev->name, __func__, __LINE__);
2318         return SKD_CHECK_STATUS_REPORT_GOOD;
2319 }
2320
2321 static void skd_resolve_req_exception(struct skd_device *skdev,
2322                                       struct skd_request_context *skreq)
2323 {
2324         u8 cmp_status = skreq->completion.status;
2325
2326         switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2327         case SKD_CHECK_STATUS_REPORT_GOOD:
2328         case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2329                 skd_end_request(skdev, skreq, 0);
2330                 break;
2331
2332         case SKD_CHECK_STATUS_BUSY_IMMINENT:
2333                 skd_log_skreq(skdev, skreq, "retry(busy)");
2334                 blk_requeue_request(skdev->queue, skreq->req);
2335                 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2336                 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2337                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2338                 skd_quiesce_dev(skdev);
2339                 break;
2340
2341         case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2342                 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2343                         skd_log_skreq(skdev, skreq, "retry");
2344                         blk_requeue_request(skdev->queue, skreq->req);
2345                         break;
2346                 }
2347         /* fall through to report error */
2348
2349         case SKD_CHECK_STATUS_REPORT_ERROR:
2350         default:
2351                 skd_end_request(skdev, skreq, -EIO);
2352                 break;
2353         }
2354 }
2355
2356 /* assume spinlock is already held */
2357 static void skd_release_skreq(struct skd_device *skdev,
2358                               struct skd_request_context *skreq)
2359 {
2360         u32 msg_slot;
2361         struct skd_fitmsg_context *skmsg;
2362
2363         u32 timo_slot;
2364
2365         /*
2366          * Reclaim the FIT msg buffer if this is
2367          * the first of the requests it carried to
2368          * be completed. The FIT msg buffer used to
2369          * send this request cannot be reused until
2370          * we are sure the s1120 card has copied
2371          * it to its memory. The FIT msg might have
2372          * contained several requests. As soon as
2373          * any of them are completed we know that
2374          * the entire FIT msg was transferred.
2375          * Only the first completed request will
2376          * match the FIT msg buffer id. The FIT
2377          * msg buffer id is immediately updated.
2378          * When subsequent requests complete the FIT
2379          * msg buffer id won't match, so we know
2380          * quite cheaply that it is already done.
2381          */
2382         msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2383         SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2384
2385         skmsg = &skdev->skmsg_table[msg_slot];
2386         if (skmsg->id == skreq->fitmsg_id) {
2387                 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2388                 SKD_ASSERT(skmsg->outstanding > 0);
2389                 skmsg->outstanding--;
2390                 if (skmsg->outstanding == 0) {
2391                         skmsg->state = SKD_MSG_STATE_IDLE;
2392                         skmsg->id += SKD_ID_INCR;
2393                         skmsg->next = skdev->skmsg_free_list;
2394                         skdev->skmsg_free_list = skmsg;
2395                 }
2396         }
2397
2398         /*
2399          * Decrease the number of active requests.
2400          * Also decrements the count in the timeout slot.
2401          */
2402         SKD_ASSERT(skdev->in_flight > 0);
2403         skdev->in_flight -= 1;
2404
2405         timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2406         SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2407         skdev->timeout_slot[timo_slot] -= 1;
2408
2409         /*
2410          * Reset backpointer
2411          */
2412         skreq->req = NULL;
2413
2414         /*
2415          * Reclaim the skd_request_context
2416          */
2417         skreq->state = SKD_REQ_STATE_IDLE;
2418         skreq->id += SKD_ID_INCR;
2419         skreq->next = skdev->skreq_free_list;
2420         skdev->skreq_free_list = skreq;
2421 }
2422
2423 #define DRIVER_INQ_EVPD_PAGE_CODE   0xDA
2424
2425 static void skd_do_inq_page_00(struct skd_device *skdev,
2426                                volatile struct fit_completion_entry_v1 *skcomp,
2427                                volatile struct fit_comp_error_info *skerr,
2428                                uint8_t *cdb, uint8_t *buf)
2429 {
2430         uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2431
2432         /* Caller requested "supported pages".  The driver needs to insert
2433          * its page.
2434          */
2435         pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2436                  skdev->name, __func__, __LINE__);
2437
2438         /* If the device rejected the request because the CDB was
2439          * improperly formed, then just leave.
2440          */
2441         if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2442             skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2443                 return;
2444
2445         /* Get the amount of space the caller allocated */
2446         max_bytes = (cdb[3] << 8) | cdb[4];
2447
2448         /* Get the number of pages actually returned by the device */
2449         drive_pages = (buf[2] << 8) | buf[3];
2450         drive_bytes = drive_pages + 4;
2451         new_size = drive_pages + 1;
2452
2453         /* Supported pages must be in numerical order, so find where
2454          * the driver page needs to be inserted into the list of
2455          * pages returned by the device.
2456          */
2457         for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2458                 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2459                         return; /* Device using this page code. abort */
2460                 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2461                         break;
2462         }
2463
2464         if (insert_pt < max_bytes) {
2465                 uint16_t u;
2466
2467                 /* Shift everything up one byte to make room. */
2468                 for (u = new_size + 3; u > insert_pt; u--)
2469                         buf[u] = buf[u - 1];
2470                 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2471
2472                 /* SCSI byte order increment of num_returned_bytes by 1 */
2473                 skcomp->num_returned_bytes =
2474                         be32_to_cpu(skcomp->num_returned_bytes) + 1;
2475                 skcomp->num_returned_bytes =
2476                         be32_to_cpu(skcomp->num_returned_bytes);
2477         }
2478
2479         /* update page length field to reflect the driver's page too */
2480         buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2481         buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2482 }
2483
2484 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2485 {
2486         int pcie_reg;
2487         u16 pci_bus_speed;
2488         u8 pci_lanes;
2489
2490         pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2491         if (pcie_reg) {
2492                 u16 linksta;
2493                 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2494
2495                 pci_bus_speed = linksta & 0xF;
2496                 pci_lanes = (linksta & 0x3F0) >> 4;
2497         } else {
2498                 *speed = STEC_LINK_UNKNOWN;
2499                 *width = 0xFF;
2500                 return;
2501         }
2502
2503         switch (pci_bus_speed) {
2504         case 1:
2505                 *speed = STEC_LINK_2_5GTS;
2506                 break;
2507         case 2:
2508                 *speed = STEC_LINK_5GTS;
2509                 break;
2510         case 3:
2511                 *speed = STEC_LINK_8GTS;
2512                 break;
2513         default:
2514                 *speed = STEC_LINK_UNKNOWN;
2515                 break;
2516         }
2517
2518         if (pci_lanes <= 0x20)
2519                 *width = pci_lanes;
2520         else
2521                 *width = 0xFF;
2522 }
2523
2524 static void skd_do_inq_page_da(struct skd_device *skdev,
2525                                volatile struct fit_completion_entry_v1 *skcomp,
2526                                volatile struct fit_comp_error_info *skerr,
2527                                uint8_t *cdb, uint8_t *buf)
2528 {
2529         struct pci_dev *pdev = skdev->pdev;
2530         unsigned max_bytes;
2531         struct driver_inquiry_data inq;
2532         u16 val;
2533
2534         pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2535                  skdev->name, __func__, __LINE__);
2536
2537         memset(&inq, 0, sizeof(inq));
2538
2539         inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2540
2541         skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2542         inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2543         inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2544         inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2545
2546         pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2547         inq.pcie_vendor_id = cpu_to_be16(val);
2548
2549         pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2550         inq.pcie_device_id = cpu_to_be16(val);
2551
2552         pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2553         inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2554
2555         pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2556         inq.pcie_subsystem_device_id = cpu_to_be16(val);
2557
2558         /* Driver version, fixed lenth, padded with spaces on the right */
2559         inq.driver_version_length = sizeof(inq.driver_version);
2560         memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2561         memcpy(inq.driver_version, DRV_VER_COMPL,
2562                min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2563
2564         inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2565
2566         /* Clear the error set by the device */
2567         skcomp->status = SAM_STAT_GOOD;
2568         memset((void *)skerr, 0, sizeof(*skerr));
2569
2570         /* copy response into output buffer */
2571         max_bytes = (cdb[3] << 8) | cdb[4];
2572         memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2573
2574         skcomp->num_returned_bytes =
2575                 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2576 }
2577
2578 static void skd_do_driver_inq(struct skd_device *skdev,
2579                               volatile struct fit_completion_entry_v1 *skcomp,
2580                               volatile struct fit_comp_error_info *skerr,
2581                               uint8_t *cdb, uint8_t *buf)
2582 {
2583         if (!buf)
2584                 return;
2585         else if (cdb[0] != INQUIRY)
2586                 return;         /* Not an INQUIRY */
2587         else if ((cdb[1] & 1) == 0)
2588                 return;         /* EVPD not set */
2589         else if (cdb[2] == 0)
2590                 /* Need to add driver's page to supported pages list */
2591                 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2592         else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2593                 /* Caller requested driver's page */
2594                 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2595 }
2596
2597 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2598 {
2599         if (!sg)
2600                 return NULL;
2601         if (!sg_page(sg))
2602                 return NULL;
2603         return sg_virt(sg);
2604 }
2605
2606 static void skd_process_scsi_inq(struct skd_device *skdev,
2607                                  volatile struct fit_completion_entry_v1
2608                                  *skcomp,
2609                                  volatile struct fit_comp_error_info *skerr,
2610                                  struct skd_special_context *skspcl)
2611 {
2612         uint8_t *buf;
2613         struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2614         struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2615
2616         dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2617                             skspcl->req.sg_data_dir);
2618         buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2619
2620         if (buf)
2621                 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2622 }
2623
2624
2625 static int skd_isr_completion_posted(struct skd_device *skdev,
2626                                         int limit, int *enqueued)
2627 {
2628         volatile struct fit_completion_entry_v1 *skcmp = NULL;
2629         volatile struct fit_comp_error_info *skerr;
2630         u16 req_id;
2631         u32 req_slot;
2632         struct skd_request_context *skreq;
2633         u16 cmp_cntxt = 0;
2634         u8 cmp_status = 0;
2635         u8 cmp_cycle = 0;
2636         u32 cmp_bytes = 0;
2637         int rc = 0;
2638         int processed = 0;
2639
2640         for (;; ) {
2641                 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2642
2643                 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2644                 cmp_cycle = skcmp->cycle;
2645                 cmp_cntxt = skcmp->tag;
2646                 cmp_status = skcmp->status;
2647                 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2648
2649                 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2650
2651                 pr_debug("%s:%s:%d "
2652                          "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2653                          "busy=%d rbytes=0x%x proto=%d\n",
2654                          skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
2655                          skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
2656                          skdev->in_flight, cmp_bytes, skdev->proto_ver);
2657
2658                 if (cmp_cycle != skdev->skcomp_cycle) {
2659                         pr_debug("%s:%s:%d end of completions\n",
2660                                  skdev->name, __func__, __LINE__);
2661                         break;
2662                 }
2663                 /*
2664                  * Update the completion queue head index and possibly
2665                  * the completion cycle count. 8-bit wrap-around.
2666                  */
2667                 skdev->skcomp_ix++;
2668                 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2669                         skdev->skcomp_ix = 0;
2670                         skdev->skcomp_cycle++;
2671                 }
2672
2673                 /*
2674                  * The command context is a unique 32-bit ID. The low order
2675                  * bits help locate the request. The request is usually a
2676                  * r/w request (see skd_start() above) or a special request.
2677                  */
2678                 req_id = cmp_cntxt;
2679                 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2680
2681                 /* Is this other than a r/w request? */
2682                 if (req_slot >= skdev->num_req_context) {
2683                         /*
2684                          * This is not a completion for a r/w request.
2685                          */
2686                         skd_complete_other(skdev, skcmp, skerr);
2687                         continue;
2688                 }
2689
2690                 skreq = &skdev->skreq_table[req_slot];
2691
2692                 /*
2693                  * Make sure the request ID for the slot matches.
2694                  */
2695                 if (skreq->id != req_id) {
2696                         pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2697                                  skdev->name, __func__, __LINE__,
2698                                  req_id, skreq->id);
2699                         {
2700                                 u16 new_id = cmp_cntxt;
2701                                 pr_err("(%s): Completion mismatch "
2702                                        "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2703                                        skd_name(skdev), req_id,
2704                                        skreq->id, new_id);
2705
2706                                 continue;
2707                         }
2708                 }
2709
2710                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2711
2712                 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2713                         pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2714                                  skdev->name, __func__, __LINE__,
2715                                  skreq, skreq->id);
2716                         /* a previously timed out command can
2717                          * now be cleaned up */
2718                         skd_release_skreq(skdev, skreq);
2719                         continue;
2720                 }
2721
2722                 skreq->completion = *skcmp;
2723                 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2724                         skreq->err_info = *skerr;
2725                         skd_log_check_status(skdev, cmp_status, skerr->key,
2726                                              skerr->code, skerr->qual,
2727                                              skerr->fruc);
2728                 }
2729                 /* Release DMA resources for the request. */
2730                 if (skreq->n_sg > 0)
2731                         skd_postop_sg_list(skdev, skreq);
2732
2733                 if (!skreq->req) {
2734                         pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2735                                  "req=0x%x req_id=0x%x\n",
2736                                  skdev->name, __func__, __LINE__,
2737                                  skreq, skreq->id, req_id);
2738                 } else {
2739                         /*
2740                          * Capture the outcome and post it back to the
2741                          * native request.
2742                          */
2743                         if (likely(cmp_status == SAM_STAT_GOOD))
2744                                 skd_end_request(skdev, skreq, 0);
2745                         else
2746                                 skd_resolve_req_exception(skdev, skreq);
2747                 }
2748
2749                 /*
2750                  * Release the skreq, its FIT msg (if one), timeout slot,
2751                  * and queue depth.
2752                  */
2753                 skd_release_skreq(skdev, skreq);
2754
2755                 /* skd_isr_comp_limit equal zero means no limit */
2756                 if (limit) {
2757                         if (++processed >= limit) {
2758                                 rc = 1;
2759                                 break;
2760                         }
2761                 }
2762         }
2763
2764         if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2765                 && (skdev->in_flight) == 0) {
2766                 skdev->state = SKD_DRVR_STATE_PAUSED;
2767                 wake_up_interruptible(&skdev->waitq);
2768         }
2769
2770         return rc;
2771 }
2772
2773 static void skd_complete_other(struct skd_device *skdev,
2774                                volatile struct fit_completion_entry_v1 *skcomp,
2775                                volatile struct fit_comp_error_info *skerr)
2776 {
2777         u32 req_id = 0;
2778         u32 req_table;
2779         u32 req_slot;
2780         struct skd_special_context *skspcl;
2781
2782         req_id = skcomp->tag;
2783         req_table = req_id & SKD_ID_TABLE_MASK;
2784         req_slot = req_id & SKD_ID_SLOT_MASK;
2785
2786         pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2787                  skdev->name, __func__, __LINE__,
2788                  req_table, req_id, req_slot);
2789
2790         /*
2791          * Based on the request id, determine how to dispatch this completion.
2792          * This swich/case is finding the good cases and forwarding the
2793          * completion entry. Errors are reported below the switch.
2794          */
2795         switch (req_table) {
2796         case SKD_ID_RW_REQUEST:
2797                 /*
2798                  * The caller, skd_completion_posted_isr() above,
2799                  * handles r/w requests. The only way we get here
2800                  * is if the req_slot is out of bounds.
2801                  */
2802                 break;
2803
2804         case SKD_ID_SPECIAL_REQUEST:
2805                 /*
2806                  * Make sure the req_slot is in bounds and that the id
2807                  * matches.
2808                  */
2809                 if (req_slot < skdev->n_special) {
2810                         skspcl = &skdev->skspcl_table[req_slot];
2811                         if (skspcl->req.id == req_id &&
2812                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2813                                 skd_complete_special(skdev,
2814                                                      skcomp, skerr, skspcl);
2815                                 return;
2816                         }
2817                 }
2818                 break;
2819
2820         case SKD_ID_INTERNAL:
2821                 if (req_slot == 0) {
2822                         skspcl = &skdev->internal_skspcl;
2823                         if (skspcl->req.id == req_id &&
2824                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2825                                 skd_complete_internal(skdev,
2826                                                       skcomp, skerr, skspcl);
2827                                 return;
2828                         }
2829                 }
2830                 break;
2831
2832         case SKD_ID_FIT_MSG:
2833                 /*
2834                  * These id's should never appear in a completion record.
2835                  */
2836                 break;
2837
2838         default:
2839                 /*
2840                  * These id's should never appear anywhere;
2841                  */
2842                 break;
2843         }
2844
2845         /*
2846          * If we get here it is a bad or stale id.
2847          */
2848 }
2849
2850 static void skd_complete_special(struct skd_device *skdev,
2851                                  volatile struct fit_completion_entry_v1
2852                                  *skcomp,
2853                                  volatile struct fit_comp_error_info *skerr,
2854                                  struct skd_special_context *skspcl)
2855 {
2856         pr_debug("%s:%s:%d  completing special request %p\n",
2857                  skdev->name, __func__, __LINE__, skspcl);
2858         if (skspcl->orphaned) {
2859                 /* Discard orphaned request */
2860                 /* ?: Can this release directly or does it need
2861                  * to use a worker? */
2862                 pr_debug("%s:%s:%d release orphaned %p\n",
2863                          skdev->name, __func__, __LINE__, skspcl);
2864                 skd_release_special(skdev, skspcl);
2865                 return;
2866         }
2867
2868         skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2869
2870         skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2871         skspcl->req.completion = *skcomp;
2872         skspcl->req.err_info = *skerr;
2873
2874         skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2875                              skerr->code, skerr->qual, skerr->fruc);
2876
2877         wake_up_interruptible(&skdev->waitq);
2878 }
2879
2880 /* assume spinlock is already held */
2881 static void skd_release_special(struct skd_device *skdev,
2882                                 struct skd_special_context *skspcl)
2883 {
2884         int i, was_depleted;
2885
2886         for (i = 0; i < skspcl->req.n_sg; i++) {
2887                 struct page *page = sg_page(&skspcl->req.sg[i]);
2888                 __free_page(page);
2889         }
2890
2891         was_depleted = (skdev->skspcl_free_list == NULL);
2892
2893         skspcl->req.state = SKD_REQ_STATE_IDLE;
2894         skspcl->req.id += SKD_ID_INCR;
2895         skspcl->req.next =
2896                 (struct skd_request_context *)skdev->skspcl_free_list;
2897         skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2898
2899         if (was_depleted) {
2900                 pr_debug("%s:%s:%d skspcl was depleted\n",
2901                          skdev->name, __func__, __LINE__);
2902                 /* Free list was depleted. Their might be waiters. */
2903                 wake_up_interruptible(&skdev->waitq);
2904         }
2905 }
2906
2907 static void skd_reset_skcomp(struct skd_device *skdev)
2908 {
2909         u32 nbytes;
2910         struct fit_completion_entry_v1 *skcomp;
2911
2912         nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2913         nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2914
2915         memset(skdev->skcomp_table, 0, nbytes);
2916
2917         skdev->skcomp_ix = 0;
2918         skdev->skcomp_cycle = 1;
2919 }
2920
2921 /*
2922  *****************************************************************************
2923  * INTERRUPTS
2924  *****************************************************************************
2925  */
2926 static void skd_completion_worker(struct work_struct *work)
2927 {
2928         struct skd_device *skdev =
2929                 container_of(work, struct skd_device, completion_worker);
2930         unsigned long flags;
2931         int flush_enqueued = 0;
2932
2933         spin_lock_irqsave(&skdev->lock, flags);
2934
2935         /*
2936          * pass in limit=0, which means no limit..
2937          * process everything in compq
2938          */
2939         skd_isr_completion_posted(skdev, 0, &flush_enqueued);
2940         skd_request_fn(skdev->queue);
2941
2942         spin_unlock_irqrestore(&skdev->lock, flags);
2943 }
2944
2945 static void skd_isr_msg_from_dev(struct skd_device *skdev);
2946
2947 static irqreturn_t
2948 skd_isr(int irq, void *ptr)
2949 {
2950         struct skd_device *skdev;
2951         u32 intstat;
2952         u32 ack;
2953         int rc = 0;
2954         int deferred = 0;
2955         int flush_enqueued = 0;
2956
2957         skdev = (struct skd_device *)ptr;
2958         spin_lock(&skdev->lock);
2959
2960         for (;; ) {
2961                 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2962
2963                 ack = FIT_INT_DEF_MASK;
2964                 ack &= intstat;
2965
2966                 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
2967                          skdev->name, __func__, __LINE__, intstat, ack);
2968
2969                 /* As long as there is an int pending on device, keep
2970                  * running loop.  When none, get out, but if we've never
2971                  * done any processing, call completion handler?
2972                  */
2973                 if (ack == 0) {
2974                         /* No interrupts on device, but run the completion
2975                          * processor anyway?
2976                          */
2977                         if (rc == 0)
2978                                 if (likely (skdev->state
2979                                         == SKD_DRVR_STATE_ONLINE))
2980                                         deferred = 1;
2981                         break;
2982                 }
2983
2984                 rc = IRQ_HANDLED;
2985
2986                 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2987
2988                 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
2989                            (skdev->state != SKD_DRVR_STATE_STOPPING))) {
2990                         if (intstat & FIT_ISH_COMPLETION_POSTED) {
2991                                 /*
2992                                  * If we have already deferred completion
2993                                  * processing, don't bother running it again
2994                                  */
2995                                 if (deferred == 0)
2996                                         deferred =
2997                                                 skd_isr_completion_posted(skdev,
2998                                                 skd_isr_comp_limit, &flush_enqueued);
2999                         }
3000
3001                         if (intstat & FIT_ISH_FW_STATE_CHANGE) {
3002                                 skd_isr_fwstate(skdev);
3003                                 if (skdev->state == SKD_DRVR_STATE_FAULT ||
3004                                     skdev->state ==
3005                                     SKD_DRVR_STATE_DISAPPEARED) {
3006                                         spin_unlock(&skdev->lock);
3007                                         return rc;
3008                                 }
3009                         }
3010
3011                         if (intstat & FIT_ISH_MSG_FROM_DEV)
3012                                 skd_isr_msg_from_dev(skdev);
3013                 }
3014         }
3015
3016         if (unlikely(flush_enqueued))
3017                 skd_request_fn(skdev->queue);
3018
3019         if (deferred)
3020                 schedule_work(&skdev->completion_worker);
3021         else if (!flush_enqueued)
3022                 skd_request_fn(skdev->queue);
3023
3024         spin_unlock(&skdev->lock);
3025
3026         return rc;
3027 }
3028
3029 static void skd_drive_fault(struct skd_device *skdev)
3030 {
3031         skdev->state = SKD_DRVR_STATE_FAULT;
3032         pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3033 }
3034
3035 static void skd_drive_disappeared(struct skd_device *skdev)
3036 {
3037         skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3038         pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3039 }
3040
3041 static void skd_isr_fwstate(struct skd_device *skdev)
3042 {
3043         u32 sense;
3044         u32 state;
3045         u32 mtd;
3046         int prev_driver_state = skdev->state;
3047
3048         sense = SKD_READL(skdev, FIT_STATUS);
3049         state = sense & FIT_SR_DRIVE_STATE_MASK;
3050
3051         pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3052                skd_name(skdev),
3053                skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3054                skd_drive_state_to_str(state), state);
3055
3056         skdev->drive_state = state;
3057
3058         switch (skdev->drive_state) {
3059         case FIT_SR_DRIVE_INIT:
3060                 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3061                         skd_disable_interrupts(skdev);
3062                         break;
3063                 }
3064                 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3065                         skd_recover_requests(skdev, 0);
3066                 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3067                         skdev->timer_countdown = SKD_STARTING_TIMO;
3068                         skdev->state = SKD_DRVR_STATE_STARTING;
3069                         skd_soft_reset(skdev);
3070                         break;
3071                 }
3072                 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3073                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3074                 skdev->last_mtd = mtd;
3075                 break;
3076
3077         case FIT_SR_DRIVE_ONLINE:
3078                 skdev->cur_max_queue_depth = skd_max_queue_depth;
3079                 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3080                         skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3081
3082                 skdev->queue_low_water_mark =
3083                         skdev->cur_max_queue_depth * 2 / 3 + 1;
3084                 if (skdev->queue_low_water_mark < 1)
3085                         skdev->queue_low_water_mark = 1;
3086                 pr_info(
3087                        "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3088                        skd_name(skdev),
3089                        skdev->cur_max_queue_depth,
3090                        skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3091
3092                 skd_refresh_device_data(skdev);
3093                 break;
3094
3095         case FIT_SR_DRIVE_BUSY:
3096                 skdev->state = SKD_DRVR_STATE_BUSY;
3097                 skdev->timer_countdown = SKD_BUSY_TIMO;
3098                 skd_quiesce_dev(skdev);
3099                 break;
3100         case FIT_SR_DRIVE_BUSY_SANITIZE:
3101                 /* set timer for 3 seconds, we'll abort any unfinished
3102                  * commands after that expires
3103                  */
3104                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3105                 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3106                 blk_start_queue(skdev->queue);
3107                 break;
3108         case FIT_SR_DRIVE_BUSY_ERASE:
3109                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3110                 skdev->timer_countdown = SKD_BUSY_TIMO;
3111                 break;
3112         case FIT_SR_DRIVE_OFFLINE:
3113                 skdev->state = SKD_DRVR_STATE_IDLE;
3114                 break;
3115         case FIT_SR_DRIVE_SOFT_RESET:
3116                 switch (skdev->state) {
3117                 case SKD_DRVR_STATE_STARTING:
3118                 case SKD_DRVR_STATE_RESTARTING:
3119                         /* Expected by a caller of skd_soft_reset() */
3120                         break;
3121                 default:
3122                         skdev->state = SKD_DRVR_STATE_RESTARTING;
3123                         break;
3124                 }
3125                 break;
3126         case FIT_SR_DRIVE_FW_BOOTING:
3127                 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3128                          skdev->name, __func__, __LINE__, skdev->name);
3129                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3130                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3131                 break;
3132
3133         case FIT_SR_DRIVE_DEGRADED:
3134         case FIT_SR_PCIE_LINK_DOWN:
3135         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3136                 break;
3137
3138         case FIT_SR_DRIVE_FAULT:
3139                 skd_drive_fault(skdev);
3140                 skd_recover_requests(skdev, 0);
3141                 blk_start_queue(skdev->queue);
3142                 break;
3143
3144         /* PCIe bus returned all Fs? */
3145         case 0xFF:
3146                 pr_info("(%s): state=0x%x sense=0x%x\n",
3147                        skd_name(skdev), state, sense);
3148                 skd_drive_disappeared(skdev);
3149                 skd_recover_requests(skdev, 0);
3150                 blk_start_queue(skdev->queue);
3151                 break;
3152         default:
3153                 /*
3154                  * Uknown FW State. Wait for a state we recognize.
3155                  */
3156                 break;
3157         }
3158         pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3159                skd_name(skdev),
3160                skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3161                skd_skdev_state_to_str(skdev->state), skdev->state);
3162 }
3163
3164 static void skd_recover_requests(struct skd_device *skdev, int requeue)
3165 {
3166         int i;
3167
3168         for (i = 0; i < skdev->num_req_context; i++) {
3169                 struct skd_request_context *skreq = &skdev->skreq_table[i];
3170
3171                 if (skreq->state == SKD_REQ_STATE_BUSY) {
3172                         skd_log_skreq(skdev, skreq, "recover");
3173
3174                         SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3175                         SKD_ASSERT(skreq->req != NULL);
3176
3177                         /* Release DMA resources for the request. */
3178                         if (skreq->n_sg > 0)
3179                                 skd_postop_sg_list(skdev, skreq);
3180
3181                         if (requeue &&
3182                             (unsigned long) ++skreq->req->special <
3183                             SKD_MAX_RETRIES)
3184                                 blk_requeue_request(skdev->queue, skreq->req);
3185                         else
3186                                 skd_end_request(skdev, skreq, -EIO);
3187
3188                         skreq->req = NULL;
3189
3190                         skreq->state = SKD_REQ_STATE_IDLE;
3191                         skreq->id += SKD_ID_INCR;
3192                 }
3193                 if (i > 0)
3194                         skreq[-1].next = skreq;
3195                 skreq->next = NULL;
3196         }
3197         skdev->skreq_free_list = skdev->skreq_table;
3198
3199         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3200                 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3201
3202                 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3203                         skd_log_skmsg(skdev, skmsg, "salvaged");
3204                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3205                         skmsg->state = SKD_MSG_STATE_IDLE;
3206                         skmsg->id += SKD_ID_INCR;
3207                 }
3208                 if (i > 0)
3209                         skmsg[-1].next = skmsg;
3210                 skmsg->next = NULL;
3211         }
3212         skdev->skmsg_free_list = skdev->skmsg_table;
3213
3214         for (i = 0; i < skdev->n_special; i++) {
3215                 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3216
3217                 /* If orphaned, reclaim it because it has already been reported
3218                  * to the process as an error (it was just waiting for
3219                  * a completion that didn't come, and now it will never come)
3220                  * If busy, change to a state that will cause it to error
3221                  * out in the wait routine and let it do the normal
3222                  * reporting and reclaiming
3223                  */
3224                 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3225                         if (skspcl->orphaned) {
3226                                 pr_debug("%s:%s:%d orphaned %p\n",
3227                                          skdev->name, __func__, __LINE__,
3228                                          skspcl);
3229                                 skd_release_special(skdev, skspcl);
3230                         } else {
3231                                 pr_debug("%s:%s:%d not orphaned %p\n",
3232                                          skdev->name, __func__, __LINE__,
3233                                          skspcl);
3234                                 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3235                         }
3236                 }
3237         }
3238         skdev->skspcl_free_list = skdev->skspcl_table;
3239
3240         for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3241                 skdev->timeout_slot[i] = 0;
3242
3243         skdev->in_flight = 0;
3244 }
3245
3246 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3247 {
3248         u32 mfd;
3249         u32 mtd;
3250         u32 data;
3251
3252         mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3253
3254         pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3255                  skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
3256
3257         /* ignore any mtd that is an ack for something we didn't send */
3258         if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3259                 return;
3260
3261         switch (FIT_MXD_TYPE(mfd)) {
3262         case FIT_MTD_FITFW_INIT:
3263                 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3264
3265                 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3266                         pr_err("(%s): protocol mismatch\n",
3267                                skdev->name);
3268                         pr_err("(%s):   got=%d support=%d\n",
3269                                skdev->name, skdev->proto_ver,
3270                                FIT_PROTOCOL_VERSION_1);
3271                         pr_err("(%s):   please upgrade driver\n",
3272                                skdev->name);
3273                         skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3274                         skd_soft_reset(skdev);
3275                         break;
3276                 }
3277                 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3278                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3279                 skdev->last_mtd = mtd;
3280                 break;
3281
3282         case FIT_MTD_GET_CMDQ_DEPTH:
3283                 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3284                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3285                                    SKD_N_COMPLETION_ENTRY);
3286                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3287                 skdev->last_mtd = mtd;
3288                 break;
3289
3290         case FIT_MTD_SET_COMPQ_DEPTH:
3291                 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3292                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3293                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3294                 skdev->last_mtd = mtd;
3295                 break;
3296
3297         case FIT_MTD_SET_COMPQ_ADDR:
3298                 skd_reset_skcomp(skdev);
3299                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3300                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3301                 skdev->last_mtd = mtd;
3302                 break;
3303
3304         case FIT_MTD_CMD_LOG_HOST_ID:
3305                 skdev->connect_time_stamp = get_seconds();
3306                 data = skdev->connect_time_stamp & 0xFFFF;
3307                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3308                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3309                 skdev->last_mtd = mtd;
3310                 break;
3311
3312         case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3313                 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3314                 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3315                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3316                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3317                 skdev->last_mtd = mtd;
3318                 break;
3319
3320         case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3321                 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3322                 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3323                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3324                 skdev->last_mtd = mtd;
3325
3326                 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3327                        skd_name(skdev),
3328                        skdev->connect_time_stamp, skdev->drive_jiffies);
3329                 break;
3330
3331         case FIT_MTD_ARM_QUEUE:
3332                 skdev->last_mtd = 0;
3333                 /*
3334                  * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3335                  */
3336                 break;
3337
3338         default:
3339                 break;
3340         }
3341 }
3342
3343 static void skd_disable_interrupts(struct skd_device *skdev)
3344 {
3345         u32 sense;
3346
3347         sense = SKD_READL(skdev, FIT_CONTROL);
3348         sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3349         SKD_WRITEL(skdev, sense, FIT_CONTROL);
3350         pr_debug("%s:%s:%d sense 0x%x\n",
3351                  skdev->name, __func__, __LINE__, sense);
3352
3353         /* Note that the 1s is written. A 1-bit means
3354          * disable, a 0 means enable.
3355          */
3356         SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3357 }
3358
3359 static void skd_enable_interrupts(struct skd_device *skdev)
3360 {
3361         u32 val;
3362
3363         /* unmask interrupts first */
3364         val = FIT_ISH_FW_STATE_CHANGE +
3365               FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3366
3367         /* Note that the compliment of mask is written. A 1-bit means
3368          * disable, a 0 means enable. */
3369         SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3370         pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3371                  skdev->name, __func__, __LINE__, ~val);
3372
3373         val = SKD_READL(skdev, FIT_CONTROL);
3374         val |= FIT_CR_ENABLE_INTERRUPTS;
3375         pr_debug("%s:%s:%d control=0x%x\n",
3376                  skdev->name, __func__, __LINE__, val);
3377         SKD_WRITEL(skdev, val, FIT_CONTROL);
3378 }
3379
3380 /*
3381  *****************************************************************************
3382  * START, STOP, RESTART, QUIESCE, UNQUIESCE
3383  *****************************************************************************
3384  */
3385
3386 static void skd_soft_reset(struct skd_device *skdev)
3387 {
3388         u32 val;
3389
3390         val = SKD_READL(skdev, FIT_CONTROL);
3391         val |= (FIT_CR_SOFT_RESET);
3392         pr_debug("%s:%s:%d control=0x%x\n",
3393                  skdev->name, __func__, __LINE__, val);
3394         SKD_WRITEL(skdev, val, FIT_CONTROL);
3395 }
3396
3397 static void skd_start_device(struct skd_device *skdev)
3398 {
3399         unsigned long flags;
3400         u32 sense;
3401         u32 state;
3402
3403         spin_lock_irqsave(&skdev->lock, flags);
3404
3405         /* ack all ghost interrupts */
3406         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3407
3408         sense = SKD_READL(skdev, FIT_STATUS);
3409
3410         pr_debug("%s:%s:%d initial status=0x%x\n",
3411                  skdev->name, __func__, __LINE__, sense);
3412
3413         state = sense & FIT_SR_DRIVE_STATE_MASK;
3414         skdev->drive_state = state;
3415         skdev->last_mtd = 0;
3416
3417         skdev->state = SKD_DRVR_STATE_STARTING;
3418         skdev->timer_countdown = SKD_STARTING_TIMO;
3419
3420         skd_enable_interrupts(skdev);
3421
3422         switch (skdev->drive_state) {
3423         case FIT_SR_DRIVE_OFFLINE:
3424                 pr_err("(%s): Drive offline...\n", skd_name(skdev));
3425                 break;
3426
3427         case FIT_SR_DRIVE_FW_BOOTING:
3428                 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3429                          skdev->name, __func__, __LINE__, skdev->name);
3430                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3431                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3432                 break;
3433
3434         case FIT_SR_DRIVE_BUSY_SANITIZE:
3435                 pr_info("(%s): Start: BUSY_SANITIZE\n",
3436                        skd_name(skdev));
3437                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3438                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3439                 break;
3440
3441         case FIT_SR_DRIVE_BUSY_ERASE:
3442                 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3443                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3444                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3445                 break;
3446
3447         case FIT_SR_DRIVE_INIT:
3448         case FIT_SR_DRIVE_ONLINE:
3449                 skd_soft_reset(skdev);
3450                 break;
3451
3452         case FIT_SR_DRIVE_BUSY:
3453                 pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3454                 skdev->state = SKD_DRVR_STATE_BUSY;
3455                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3456                 break;
3457
3458         case FIT_SR_DRIVE_SOFT_RESET:
3459                 pr_err("(%s) drive soft reset in prog\n",
3460                        skd_name(skdev));
3461                 break;
3462
3463         case FIT_SR_DRIVE_FAULT:
3464                 /* Fault state is bad...soft reset won't do it...
3465                  * Hard reset, maybe, but does it work on device?
3466                  * For now, just fault so the system doesn't hang.
3467                  */
3468                 skd_drive_fault(skdev);
3469                 /*start the queue so we can respond with error to requests */
3470                 pr_debug("%s:%s:%d starting %s queue\n",
3471                          skdev->name, __func__, __LINE__, skdev->name);
3472                 blk_start_queue(skdev->queue);
3473                 skdev->gendisk_on = -1;
3474                 wake_up_interruptible(&skdev->waitq);
3475                 break;
3476
3477         case 0xFF:
3478                 /* Most likely the device isn't there or isn't responding
3479                  * to the BAR1 addresses. */
3480                 skd_drive_disappeared(skdev);
3481                 /*start the queue so we can respond with error to requests */
3482                 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3483                          skdev->name, __func__, __LINE__, skdev->name);
3484                 blk_start_queue(skdev->queue);
3485                 skdev->gendisk_on = -1;
3486                 wake_up_interruptible(&skdev->waitq);
3487                 break;
3488
3489         default:
3490                 pr_err("(%s) Start: unknown state %x\n",
3491                        skd_name(skdev), skdev->drive_state);
3492                 break;
3493         }
3494
3495         state = SKD_READL(skdev, FIT_CONTROL);
3496         pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3497                  skdev->name, __func__, __LINE__, state);
3498
3499         state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3500         pr_debug("%s:%s:%d Intr Status=0x%x\n",
3501                  skdev->name, __func__, __LINE__, state);
3502
3503         state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3504         pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3505                  skdev->name, __func__, __LINE__, state);
3506
3507         state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3508         pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3509                  skdev->name, __func__, __LINE__, state);
3510
3511         state = SKD_READL(skdev, FIT_HW_VERSION);
3512         pr_debug("%s:%s:%d HW version=0x%x\n",
3513                  skdev->name, __func__, __LINE__, state);
3514
3515         spin_unlock_irqrestore(&skdev->lock, flags);
3516 }
3517
3518 static void skd_stop_device(struct skd_device *skdev)
3519 {
3520         unsigned long flags;
3521         struct skd_special_context *skspcl = &skdev->internal_skspcl;
3522         u32 dev_state;
3523         int i;
3524
3525         spin_lock_irqsave(&skdev->lock, flags);
3526
3527         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3528                 pr_err("(%s): skd_stop_device not online no sync\n",
3529                        skd_name(skdev));
3530                 goto stop_out;
3531         }
3532
3533         if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3534                 pr_err("(%s): skd_stop_device no special\n",
3535                        skd_name(skdev));
3536                 goto stop_out;
3537         }
3538
3539         skdev->state = SKD_DRVR_STATE_SYNCING;
3540         skdev->sync_done = 0;
3541
3542         skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3543
3544         spin_unlock_irqrestore(&skdev->lock, flags);
3545
3546         wait_event_interruptible_timeout(skdev->waitq,
3547                                          (skdev->sync_done), (10 * HZ));
3548
3549         spin_lock_irqsave(&skdev->lock, flags);
3550
3551         switch (skdev->sync_done) {
3552         case 0:
3553                 pr_err("(%s): skd_stop_device no sync\n",
3554                        skd_name(skdev));
3555                 break;
3556         case 1:
3557                 pr_err("(%s): skd_stop_device sync done\n",
3558                        skd_name(skdev));
3559                 break;
3560         default:
3561                 pr_err("(%s): skd_stop_device sync error\n",
3562                        skd_name(skdev));
3563         }
3564
3565 stop_out:
3566         skdev->state = SKD_DRVR_STATE_STOPPING;
3567         spin_unlock_irqrestore(&skdev->lock, flags);
3568
3569         skd_kill_timer(skdev);
3570
3571         spin_lock_irqsave(&skdev->lock, flags);
3572         skd_disable_interrupts(skdev);
3573
3574         /* ensure all ints on device are cleared */
3575         /* soft reset the device to unload with a clean slate */
3576         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3577         SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3578
3579         spin_unlock_irqrestore(&skdev->lock, flags);
3580
3581         /* poll every 100ms, 1 second timeout */
3582         for (i = 0; i < 10; i++) {
3583                 dev_state =
3584                         SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3585                 if (dev_state == FIT_SR_DRIVE_INIT)
3586                         break;
3587                 set_current_state(TASK_INTERRUPTIBLE);
3588                 schedule_timeout(msecs_to_jiffies(100));
3589         }
3590
3591         if (dev_state != FIT_SR_DRIVE_INIT)
3592                 pr_err("(%s): skd_stop_device state error 0x%02x\n",
3593                        skd_name(skdev), dev_state);
3594 }
3595
3596 /* assume spinlock is held */
3597 static void skd_restart_device(struct skd_device *skdev)
3598 {
3599         u32 state;
3600
3601         /* ack all ghost interrupts */
3602         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3603
3604         state = SKD_READL(skdev, FIT_STATUS);
3605
3606         pr_debug("%s:%s:%d drive status=0x%x\n",
3607                  skdev->name, __func__, __LINE__, state);
3608
3609         state &= FIT_SR_DRIVE_STATE_MASK;
3610         skdev->drive_state = state;
3611         skdev->last_mtd = 0;
3612
3613         skdev->state = SKD_DRVR_STATE_RESTARTING;
3614         skdev->timer_countdown = SKD_RESTARTING_TIMO;
3615
3616         skd_soft_reset(skdev);
3617 }
3618
3619 /* assume spinlock is held */
3620 static int skd_quiesce_dev(struct skd_device *skdev)
3621 {
3622         int rc = 0;
3623
3624         switch (skdev->state) {
3625         case SKD_DRVR_STATE_BUSY:
3626         case SKD_DRVR_STATE_BUSY_IMMINENT:
3627                 pr_debug("%s:%s:%d stopping %s queue\n",
3628                          skdev->name, __func__, __LINE__, skdev->name);
3629                 blk_stop_queue(skdev->queue);
3630                 break;
3631         case SKD_DRVR_STATE_ONLINE:
3632         case SKD_DRVR_STATE_STOPPING:
3633         case SKD_DRVR_STATE_SYNCING:
3634         case SKD_DRVR_STATE_PAUSING:
3635         case SKD_DRVR_STATE_PAUSED:
3636         case SKD_DRVR_STATE_STARTING:
3637         case SKD_DRVR_STATE_RESTARTING:
3638         case SKD_DRVR_STATE_RESUMING:
3639         default:
3640                 rc = -EINVAL;
3641                 pr_debug("%s:%s:%d state [%d] not implemented\n",
3642                          skdev->name, __func__, __LINE__, skdev->state);
3643         }
3644         return rc;
3645 }
3646
3647 /* assume spinlock is held */
3648 static int skd_unquiesce_dev(struct skd_device *skdev)
3649 {
3650         int prev_driver_state = skdev->state;
3651
3652         skd_log_skdev(skdev, "unquiesce");
3653         if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3654                 pr_debug("%s:%s:%d **** device already ONLINE\n",
3655                          skdev->name, __func__, __LINE__);
3656                 return 0;
3657         }
3658         if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3659                 /*
3660                  * If there has been an state change to other than
3661                  * ONLINE, we will rely on controller state change
3662                  * to come back online and restart the queue.
3663                  * The BUSY state means that driver is ready to
3664                  * continue normal processing but waiting for controller
3665                  * to become available.
3666                  */
3667                 skdev->state = SKD_DRVR_STATE_BUSY;
3668                 pr_debug("%s:%s:%d drive BUSY state\n",
3669                          skdev->name, __func__, __LINE__);
3670                 return 0;
3671         }
3672
3673         /*
3674          * Drive has just come online, driver is either in startup,
3675          * paused performing a task, or bust waiting for hardware.
3676          */
3677         switch (skdev->state) {
3678         case SKD_DRVR_STATE_PAUSED:
3679         case SKD_DRVR_STATE_BUSY:
3680         case SKD_DRVR_STATE_BUSY_IMMINENT:
3681         case SKD_DRVR_STATE_BUSY_ERASE:
3682         case SKD_DRVR_STATE_STARTING:
3683         case SKD_DRVR_STATE_RESTARTING:
3684         case SKD_DRVR_STATE_FAULT:
3685         case SKD_DRVR_STATE_IDLE:
3686         case SKD_DRVR_STATE_LOAD:
3687                 skdev->state = SKD_DRVR_STATE_ONLINE;
3688                 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3689                        skd_name(skdev),
3690                        skd_skdev_state_to_str(prev_driver_state),
3691                        prev_driver_state, skd_skdev_state_to_str(skdev->state),
3692                        skdev->state);
3693                 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3694                          skdev->name, __func__, __LINE__);
3695                 pr_debug("%s:%s:%d starting %s queue\n",
3696                          skdev->name, __func__, __LINE__, skdev->name);
3697                 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
3698                 blk_start_queue(skdev->queue);
3699                 skdev->gendisk_on = 1;
3700                 wake_up_interruptible(&skdev->waitq);
3701                 break;
3702
3703         case SKD_DRVR_STATE_DISAPPEARED:
3704         default:
3705                 pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3706                          skdev->name, __func__, __LINE__,
3707                          skdev->state);
3708                 return -EBUSY;
3709         }
3710         return 0;
3711 }
3712
3713 /*
3714  *****************************************************************************
3715  * PCIe MSI/MSI-X INTERRUPT HANDLERS
3716  *****************************************************************************
3717  */
3718
3719 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3720 {
3721         struct skd_device *skdev = skd_host_data;
3722         unsigned long flags;
3723
3724         spin_lock_irqsave(&skdev->lock, flags);
3725         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3726                  skdev->name, __func__, __LINE__,
3727                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3728         pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
3729                irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
3730         SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3731         spin_unlock_irqrestore(&skdev->lock, flags);
3732         return IRQ_HANDLED;
3733 }
3734
3735 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3736 {
3737         struct skd_device *skdev = skd_host_data;
3738         unsigned long flags;
3739
3740         spin_lock_irqsave(&skdev->lock, flags);
3741         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3742                  skdev->name, __func__, __LINE__,
3743                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3744         SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3745         skd_isr_fwstate(skdev);
3746         spin_unlock_irqrestore(&skdev->lock, flags);
3747         return IRQ_HANDLED;
3748 }
3749
3750 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3751 {
3752         struct skd_device *skdev = skd_host_data;
3753         unsigned long flags;
3754         int flush_enqueued = 0;
3755         int deferred;
3756
3757         spin_lock_irqsave(&skdev->lock, flags);
3758         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3759                  skdev->name, __func__, __LINE__,
3760                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3761         SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3762         deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3763                                                 &flush_enqueued);
3764         if (flush_enqueued)
3765                 skd_request_fn(skdev->queue);
3766
3767         if (deferred)
3768                 schedule_work(&skdev->completion_worker);
3769         else if (!flush_enqueued)
3770                 skd_request_fn(skdev->queue);
3771
3772         spin_unlock_irqrestore(&skdev->lock, flags);
3773
3774         return IRQ_HANDLED;
3775 }
3776
3777 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3778 {
3779         struct skd_device *skdev = skd_host_data;
3780         unsigned long flags;
3781
3782         spin_lock_irqsave(&skdev->lock, flags);
3783         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3784                  skdev->name, __func__, __LINE__,
3785                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3786         SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3787         skd_isr_msg_from_dev(skdev);
3788         spin_unlock_irqrestore(&skdev->lock, flags);
3789         return IRQ_HANDLED;
3790 }
3791
3792 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3793 {
3794         struct skd_device *skdev = skd_host_data;
3795         unsigned long flags;
3796
3797         spin_lock_irqsave(&skdev->lock, flags);
3798         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3799                  skdev->name, __func__, __LINE__,
3800                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3801         SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3802         spin_unlock_irqrestore(&skdev->lock, flags);
3803         return IRQ_HANDLED;
3804 }
3805
3806 /*
3807  *****************************************************************************
3808  * PCIe MSI/MSI-X SETUP
3809  *****************************************************************************
3810  */
3811
3812 struct skd_msix_entry {
3813         char isr_name[30];
3814 };
3815
3816 struct skd_init_msix_entry {
3817         const char *name;
3818         irq_handler_t handler;
3819 };
3820
3821 #define SKD_MAX_MSIX_COUNT              13
3822 #define SKD_MIN_MSIX_COUNT              7
3823 #define SKD_BASE_MSIX_IRQ               4
3824
3825 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3826         { "(DMA 0)",        skd_reserved_isr },
3827         { "(DMA 1)",        skd_reserved_isr },
3828         { "(DMA 2)",        skd_reserved_isr },
3829         { "(DMA 3)",        skd_reserved_isr },
3830         { "(State Change)", skd_statec_isr   },
3831         { "(COMPL_Q)",      skd_comp_q       },
3832         { "(MSG)",          skd_msg_isr      },
3833         { "(Reserved)",     skd_reserved_isr },
3834         { "(Reserved)",     skd_reserved_isr },
3835         { "(Queue Full 0)", skd_qfull_isr    },
3836         { "(Queue Full 1)", skd_qfull_isr    },
3837         { "(Queue Full 2)", skd_qfull_isr    },
3838         { "(Queue Full 3)", skd_qfull_isr    },
3839 };
3840
3841 static int skd_acquire_msix(struct skd_device *skdev)
3842 {
3843         int i, rc;
3844         struct pci_dev *pdev = skdev->pdev;
3845
3846         rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
3847                         PCI_IRQ_MSIX);
3848         if (rc < 0) {
3849                 pr_err("(%s): failed to enable MSI-X %d\n",
3850                        skd_name(skdev), rc);
3851                 goto out;
3852         }
3853
3854         skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
3855                         sizeof(struct skd_msix_entry), GFP_KERNEL);
3856         if (!skdev->msix_entries) {
3857                 rc = -ENOMEM;
3858                 pr_err("(%s): msix table allocation error\n",
3859                        skd_name(skdev));
3860                 goto out;
3861         }
3862
3863         /* Enable MSI-X vectors for the base queue */
3864         for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3865                 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
3866
3867                 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3868                          "%s%d-msix %s", DRV_NAME, skdev->devno,
3869                          msix_entries[i].name);
3870
3871                 rc = devm_request_irq(&skdev->pdev->dev,
3872                                 pci_irq_vector(skdev->pdev, i),
3873                                 msix_entries[i].handler, 0,
3874                                 qentry->isr_name, skdev);
3875                 if (rc) {
3876                         pr_err("(%s): Unable to register(%d) MSI-X "
3877                                "handler %d: %s\n",
3878                                skd_name(skdev), rc, i, qentry->isr_name);
3879                         goto msix_out;
3880                 }
3881         }
3882
3883         pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
3884                  skdev->name, __func__, __LINE__,
3885                  pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
3886         return 0;
3887
3888 msix_out:
3889         while (--i >= 0)
3890                 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
3891 out:
3892         kfree(skdev->msix_entries);
3893         skdev->msix_entries = NULL;
3894         return rc;
3895 }
3896
3897 static int skd_acquire_irq(struct skd_device *skdev)
3898 {
3899         struct pci_dev *pdev = skdev->pdev;
3900         unsigned int irq_flag = PCI_IRQ_LEGACY;
3901         int rc;
3902
3903         if (skd_isr_type == SKD_IRQ_MSIX) {
3904                 rc = skd_acquire_msix(skdev);
3905                 if (!rc)
3906                         return 0;
3907
3908                 pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
3909                        skd_name(skdev), rc);
3910         }
3911
3912         snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
3913                         skdev->devno);
3914
3915         if (skd_isr_type != SKD_IRQ_LEGACY)
3916                 irq_flag |= PCI_IRQ_MSI;
3917         rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
3918         if (rc < 0) {
3919                 pr_err("(%s): failed to allocate the MSI interrupt %d\n",
3920                         skd_name(skdev), rc);
3921                 return rc;
3922         }
3923
3924         rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
3925                         pdev->msi_enabled ? 0 : IRQF_SHARED,
3926                         skdev->isr_name, skdev);
3927         if (rc) {
3928                 pci_free_irq_vectors(pdev);
3929                 pr_err("(%s): failed to allocate interrupt %d\n",
3930                         skd_name(skdev), rc);
3931                 return rc;
3932         }
3933
3934         return 0;
3935 }
3936
3937 static void skd_release_irq(struct skd_device *skdev)
3938 {
3939         struct pci_dev *pdev = skdev->pdev;
3940
3941         if (skdev->msix_entries) {
3942                 int i;
3943
3944                 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3945                         devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
3946                                         skdev);
3947                 }
3948
3949                 kfree(skdev->msix_entries);
3950                 skdev->msix_entries = NULL;
3951         } else {
3952                 devm_free_irq(&pdev->dev, pdev->irq, skdev);
3953         }
3954
3955         pci_free_irq_vectors(pdev);
3956 }
3957
3958 /*
3959  *****************************************************************************
3960  * CONSTRUCT
3961  *****************************************************************************
3962  */
3963
3964 static int skd_cons_skcomp(struct skd_device *skdev)
3965 {
3966         int rc = 0;
3967         struct fit_completion_entry_v1 *skcomp;
3968         u32 nbytes;
3969
3970         nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
3971         nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3972
3973         pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
3974                  skdev->name, __func__, __LINE__,
3975                  nbytes, SKD_N_COMPLETION_ENTRY);
3976
3977         skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
3978                                        &skdev->cq_dma_address);
3979
3980         if (skcomp == NULL) {
3981                 rc = -ENOMEM;
3982                 goto err_out;
3983         }
3984
3985         skdev->skcomp_table = skcomp;
3986         skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
3987                                                            sizeof(*skcomp) *
3988                                                            SKD_N_COMPLETION_ENTRY);
3989
3990 err_out:
3991         return rc;
3992 }
3993
3994 static int skd_cons_skmsg(struct skd_device *skdev)
3995 {
3996         int rc = 0;
3997         u32 i;
3998
3999         pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
4000                  skdev->name, __func__, __LINE__,
4001                  sizeof(struct skd_fitmsg_context),
4002                  skdev->num_fitmsg_context,
4003                  sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
4004
4005         skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
4006                                      *skdev->num_fitmsg_context, GFP_KERNEL);
4007         if (skdev->skmsg_table == NULL) {
4008                 rc = -ENOMEM;
4009                 goto err_out;
4010         }
4011
4012         for (i = 0; i < skdev->num_fitmsg_context; i++) {
4013                 struct skd_fitmsg_context *skmsg;
4014
4015                 skmsg = &skdev->skmsg_table[i];
4016
4017                 skmsg->id = i + SKD_ID_FIT_MSG;
4018
4019                 skmsg->state = SKD_MSG_STATE_IDLE;
4020                 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4021                                                       SKD_N_FITMSG_BYTES + 64,
4022                                                       &skmsg->mb_dma_address);
4023
4024                 if (skmsg->msg_buf == NULL) {
4025                         rc = -ENOMEM;
4026                         goto err_out;
4027                 }
4028
4029                 skmsg->offset = (u32)((u64)skmsg->msg_buf &
4030                                       (~FIT_QCMD_BASE_ADDRESS_MASK));
4031                 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4032                 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4033                                        FIT_QCMD_BASE_ADDRESS_MASK);
4034                 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4035                 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4036                 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4037
4038                 skmsg->next = &skmsg[1];
4039         }
4040
4041         /* Free list is in order starting with the 0th entry. */
4042         skdev->skmsg_table[i - 1].next = NULL;
4043         skdev->skmsg_free_list = skdev->skmsg_table;
4044
4045 err_out:
4046         return rc;
4047 }
4048
4049 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4050                                                   u32 n_sg,
4051                                                   dma_addr_t *ret_dma_addr)
4052 {
4053         struct fit_sg_descriptor *sg_list;
4054         u32 nbytes;
4055
4056         nbytes = sizeof(*sg_list) * n_sg;
4057
4058         sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4059
4060         if (sg_list != NULL) {
4061                 uint64_t dma_address = *ret_dma_addr;
4062                 u32 i;
4063
4064                 memset(sg_list, 0, nbytes);
4065
4066                 for (i = 0; i < n_sg - 1; i++) {
4067                         uint64_t ndp_off;
4068                         ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4069
4070                         sg_list[i].next_desc_ptr = dma_address + ndp_off;
4071                 }
4072                 sg_list[i].next_desc_ptr = 0LL;
4073         }
4074
4075         return sg_list;
4076 }
4077
4078 static int skd_cons_skreq(struct skd_device *skdev)
4079 {
4080         int rc = 0;
4081         u32 i;
4082
4083         pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4084                  skdev->name, __func__, __LINE__,
4085                  sizeof(struct skd_request_context),
4086                  skdev->num_req_context,
4087                  sizeof(struct skd_request_context) * skdev->num_req_context);
4088
4089         skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4090                                      * skdev->num_req_context, GFP_KERNEL);
4091         if (skdev->skreq_table == NULL) {
4092                 rc = -ENOMEM;
4093                 goto err_out;
4094         }
4095
4096         pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4097                  skdev->name, __func__, __LINE__,
4098                  skdev->sgs_per_request, sizeof(struct scatterlist),
4099                  skdev->sgs_per_request * sizeof(struct scatterlist));
4100
4101         for (i = 0; i < skdev->num_req_context; i++) {
4102                 struct skd_request_context *skreq;
4103
4104                 skreq = &skdev->skreq_table[i];
4105
4106                 skreq->id = i + SKD_ID_RW_REQUEST;
4107                 skreq->state = SKD_REQ_STATE_IDLE;
4108
4109                 skreq->sg = kzalloc(sizeof(struct scatterlist) *
4110                                     skdev->sgs_per_request, GFP_KERNEL);
4111                 if (skreq->sg == NULL) {
4112                         rc = -ENOMEM;
4113                         goto err_out;
4114                 }
4115                 sg_init_table(skreq->sg, skdev->sgs_per_request);
4116
4117                 skreq->sksg_list = skd_cons_sg_list(skdev,
4118                                                     skdev->sgs_per_request,
4119                                                     &skreq->sksg_dma_address);
4120
4121                 if (skreq->sksg_list == NULL) {
4122                         rc = -ENOMEM;
4123                         goto err_out;
4124                 }
4125
4126                 skreq->next = &skreq[1];
4127         }
4128
4129         /* Free list is in order starting with the 0th entry. */
4130         skdev->skreq_table[i - 1].next = NULL;
4131         skdev->skreq_free_list = skdev->skreq_table;
4132
4133 err_out:
4134         return rc;
4135 }
4136
4137 static int skd_cons_skspcl(struct skd_device *skdev)
4138 {
4139         int rc = 0;
4140         u32 i, nbytes;
4141
4142         pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4143                  skdev->name, __func__, __LINE__,
4144                  sizeof(struct skd_special_context),
4145                  skdev->n_special,
4146                  sizeof(struct skd_special_context) * skdev->n_special);
4147
4148         skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4149                                       * skdev->n_special, GFP_KERNEL);
4150         if (skdev->skspcl_table == NULL) {
4151                 rc = -ENOMEM;
4152                 goto err_out;
4153         }
4154
4155         for (i = 0; i < skdev->n_special; i++) {
4156                 struct skd_special_context *skspcl;
4157
4158                 skspcl = &skdev->skspcl_table[i];
4159
4160                 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4161                 skspcl->req.state = SKD_REQ_STATE_IDLE;
4162
4163                 skspcl->req.next = &skspcl[1].req;
4164
4165                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4166
4167                 skspcl->msg_buf =
4168                         pci_zalloc_consistent(skdev->pdev, nbytes,
4169                                               &skspcl->mb_dma_address);
4170                 if (skspcl->msg_buf == NULL) {
4171                         rc = -ENOMEM;
4172                         goto err_out;
4173                 }
4174
4175                 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4176                                          SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4177                 if (skspcl->req.sg == NULL) {
4178                         rc = -ENOMEM;
4179                         goto err_out;
4180                 }
4181
4182                 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4183                                                          SKD_N_SG_PER_SPECIAL,
4184                                                          &skspcl->req.
4185                                                          sksg_dma_address);
4186                 if (skspcl->req.sksg_list == NULL) {
4187                         rc = -ENOMEM;
4188                         goto err_out;
4189                 }
4190         }
4191
4192         /* Free list is in order starting with the 0th entry. */
4193         skdev->skspcl_table[i - 1].req.next = NULL;
4194         skdev->skspcl_free_list = skdev->skspcl_table;
4195
4196         return rc;
4197
4198 err_out:
4199         return rc;
4200 }
4201
4202 static int skd_cons_sksb(struct skd_device *skdev)
4203 {
4204         int rc = 0;
4205         struct skd_special_context *skspcl;
4206         u32 nbytes;
4207
4208         skspcl = &skdev->internal_skspcl;
4209
4210         skspcl->req.id = 0 + SKD_ID_INTERNAL;
4211         skspcl->req.state = SKD_REQ_STATE_IDLE;
4212
4213         nbytes = SKD_N_INTERNAL_BYTES;
4214
4215         skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4216                                                  &skspcl->db_dma_address);
4217         if (skspcl->data_buf == NULL) {
4218                 rc = -ENOMEM;
4219                 goto err_out;
4220         }
4221
4222         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4223         skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4224                                                 &skspcl->mb_dma_address);
4225         if (skspcl->msg_buf == NULL) {
4226                 rc = -ENOMEM;
4227                 goto err_out;
4228         }
4229
4230         skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4231                                                  &skspcl->req.sksg_dma_address);
4232         if (skspcl->req.sksg_list == NULL) {
4233                 rc = -ENOMEM;
4234                 goto err_out;
4235         }
4236
4237         if (!skd_format_internal_skspcl(skdev)) {
4238                 rc = -EINVAL;
4239                 goto err_out;
4240         }
4241
4242 err_out:
4243         return rc;
4244 }
4245
4246 static int skd_cons_disk(struct skd_device *skdev)
4247 {
4248         int rc = 0;
4249         struct gendisk *disk;
4250         struct request_queue *q;
4251         unsigned long flags;
4252
4253         disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4254         if (!disk) {
4255                 rc = -ENOMEM;
4256                 goto err_out;
4257         }
4258
4259         skdev->disk = disk;
4260         sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4261
4262         disk->major = skdev->major;
4263         disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4264         disk->fops = &skd_blockdev_ops;
4265         disk->private_data = skdev;
4266
4267         q = blk_init_queue(skd_request_fn, &skdev->lock);
4268         if (!q) {
4269                 rc = -ENOMEM;
4270                 goto err_out;
4271         }
4272
4273         skdev->queue = q;
4274         disk->queue = q;
4275         q->queuedata = skdev;
4276
4277         blk_queue_write_cache(q, true, true);
4278         blk_queue_max_segments(q, skdev->sgs_per_request);
4279         blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4280
4281         /* set sysfs ptimal_io_size to 8K */
4282         blk_queue_io_opt(q, 8192);
4283
4284         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4285         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4286
4287         spin_lock_irqsave(&skdev->lock, flags);
4288         pr_debug("%s:%s:%d stopping %s queue\n",
4289                  skdev->name, __func__, __LINE__, skdev->name);
4290         blk_stop_queue(skdev->queue);
4291         spin_unlock_irqrestore(&skdev->lock, flags);
4292
4293 err_out:
4294         return rc;
4295 }
4296
4297 #define SKD_N_DEV_TABLE         16u
4298 static u32 skd_next_devno;
4299
4300 static struct skd_device *skd_construct(struct pci_dev *pdev)
4301 {
4302         struct skd_device *skdev;
4303         int blk_major = skd_major;
4304         int rc;
4305
4306         skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4307
4308         if (!skdev) {
4309                 pr_err(PFX "(%s): memory alloc failure\n",
4310                        pci_name(pdev));
4311                 return NULL;
4312         }
4313
4314         skdev->state = SKD_DRVR_STATE_LOAD;
4315         skdev->pdev = pdev;
4316         skdev->devno = skd_next_devno++;
4317         skdev->major = blk_major;
4318         sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4319         skdev->dev_max_queue_depth = 0;
4320
4321         skdev->num_req_context = skd_max_queue_depth;
4322         skdev->num_fitmsg_context = skd_max_queue_depth;
4323         skdev->n_special = skd_max_pass_thru;
4324         skdev->cur_max_queue_depth = 1;
4325         skdev->queue_low_water_mark = 1;
4326         skdev->proto_ver = 99;
4327         skdev->sgs_per_request = skd_sgs_per_request;
4328         skdev->dbg_level = skd_dbg_level;
4329
4330         atomic_set(&skdev->device_count, 0);
4331
4332         spin_lock_init(&skdev->lock);
4333
4334         INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4335
4336         pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4337         rc = skd_cons_skcomp(skdev);
4338         if (rc < 0)
4339                 goto err_out;
4340
4341         pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4342         rc = skd_cons_skmsg(skdev);
4343         if (rc < 0)
4344                 goto err_out;
4345
4346         pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4347         rc = skd_cons_skreq(skdev);
4348         if (rc < 0)
4349                 goto err_out;
4350
4351         pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4352         rc = skd_cons_skspcl(skdev);
4353         if (rc < 0)
4354                 goto err_out;
4355
4356         pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4357         rc = skd_cons_sksb(skdev);
4358         if (rc < 0)
4359                 goto err_out;
4360
4361         pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4362         rc = skd_cons_disk(skdev);
4363         if (rc < 0)
4364                 goto err_out;
4365
4366         pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
4367         return skdev;
4368
4369 err_out:
4370         pr_debug("%s:%s:%d construct failed\n",
4371                  skdev->name, __func__, __LINE__);
4372         skd_destruct(skdev);
4373         return NULL;
4374 }
4375
4376 /*
4377  *****************************************************************************
4378  * DESTRUCT (FREE)
4379  *****************************************************************************
4380  */
4381
4382 static void skd_free_skcomp(struct skd_device *skdev)
4383 {
4384         if (skdev->skcomp_table != NULL) {
4385                 u32 nbytes;
4386
4387                 nbytes = sizeof(skdev->skcomp_table[0]) *
4388                          SKD_N_COMPLETION_ENTRY;
4389                 pci_free_consistent(skdev->pdev, nbytes,
4390                                     skdev->skcomp_table, skdev->cq_dma_address);
4391         }
4392
4393         skdev->skcomp_table = NULL;
4394         skdev->cq_dma_address = 0;
4395 }
4396
4397 static void skd_free_skmsg(struct skd_device *skdev)
4398 {
4399         u32 i;
4400
4401         if (skdev->skmsg_table == NULL)
4402                 return;
4403
4404         for (i = 0; i < skdev->num_fitmsg_context; i++) {
4405                 struct skd_fitmsg_context *skmsg;
4406
4407                 skmsg = &skdev->skmsg_table[i];
4408
4409                 if (skmsg->msg_buf != NULL) {
4410                         skmsg->msg_buf += skmsg->offset;
4411                         skmsg->mb_dma_address += skmsg->offset;
4412                         pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4413                                             skmsg->msg_buf,
4414                                             skmsg->mb_dma_address);
4415                 }
4416                 skmsg->msg_buf = NULL;
4417                 skmsg->mb_dma_address = 0;
4418         }
4419
4420         kfree(skdev->skmsg_table);
4421         skdev->skmsg_table = NULL;
4422 }
4423
4424 static void skd_free_sg_list(struct skd_device *skdev,
4425                              struct fit_sg_descriptor *sg_list,
4426                              u32 n_sg, dma_addr_t dma_addr)
4427 {
4428         if (sg_list != NULL) {
4429                 u32 nbytes;
4430
4431                 nbytes = sizeof(*sg_list) * n_sg;
4432
4433                 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4434         }
4435 }
4436
4437 static void skd_free_skreq(struct skd_device *skdev)
4438 {
4439         u32 i;
4440
4441         if (skdev->skreq_table == NULL)
4442                 return;
4443
4444         for (i = 0; i < skdev->num_req_context; i++) {
4445                 struct skd_request_context *skreq;
4446
4447                 skreq = &skdev->skreq_table[i];
4448
4449                 skd_free_sg_list(skdev, skreq->sksg_list,
4450                                  skdev->sgs_per_request,
4451                                  skreq->sksg_dma_address);
4452
4453                 skreq->sksg_list = NULL;
4454                 skreq->sksg_dma_address = 0;
4455
4456                 kfree(skreq->sg);
4457         }
4458
4459         kfree(skdev->skreq_table);
4460         skdev->skreq_table = NULL;
4461 }
4462
4463 static void skd_free_skspcl(struct skd_device *skdev)
4464 {
4465         u32 i;
4466         u32 nbytes;
4467
4468         if (skdev->skspcl_table == NULL)
4469                 return;
4470
4471         for (i = 0; i < skdev->n_special; i++) {
4472                 struct skd_special_context *skspcl;
4473
4474                 skspcl = &skdev->skspcl_table[i];
4475
4476                 if (skspcl->msg_buf != NULL) {
4477                         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4478                         pci_free_consistent(skdev->pdev, nbytes,
4479                                             skspcl->msg_buf,
4480                                             skspcl->mb_dma_address);
4481                 }
4482
4483                 skspcl->msg_buf = NULL;
4484                 skspcl->mb_dma_address = 0;
4485
4486                 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4487                                  SKD_N_SG_PER_SPECIAL,
4488                                  skspcl->req.sksg_dma_address);
4489
4490                 skspcl->req.sksg_list = NULL;
4491                 skspcl->req.sksg_dma_address = 0;
4492
4493                 kfree(skspcl->req.sg);
4494         }
4495
4496         kfree(skdev->skspcl_table);
4497         skdev->skspcl_table = NULL;
4498 }
4499
4500 static void skd_free_sksb(struct skd_device *skdev)
4501 {
4502         struct skd_special_context *skspcl;
4503         u32 nbytes;
4504
4505         skspcl = &skdev->internal_skspcl;
4506
4507         if (skspcl->data_buf != NULL) {
4508                 nbytes = SKD_N_INTERNAL_BYTES;
4509
4510                 pci_free_consistent(skdev->pdev, nbytes,
4511                                     skspcl->data_buf, skspcl->db_dma_address);
4512         }
4513
4514         skspcl->data_buf = NULL;
4515         skspcl->db_dma_address = 0;
4516
4517         if (skspcl->msg_buf != NULL) {
4518                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4519                 pci_free_consistent(skdev->pdev, nbytes,
4520                                     skspcl->msg_buf, skspcl->mb_dma_address);
4521         }
4522
4523         skspcl->msg_buf = NULL;
4524         skspcl->mb_dma_address = 0;
4525
4526         skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4527                          skspcl->req.sksg_dma_address);
4528
4529         skspcl->req.sksg_list = NULL;
4530         skspcl->req.sksg_dma_address = 0;
4531 }
4532
4533 static void skd_free_disk(struct skd_device *skdev)
4534 {
4535         struct gendisk *disk = skdev->disk;
4536
4537         if (disk != NULL) {
4538                 struct request_queue *q = disk->queue;
4539
4540                 if (disk->flags & GENHD_FL_UP)
4541                         del_gendisk(disk);
4542                 if (q)
4543                         blk_cleanup_queue(q);
4544                 put_disk(disk);
4545         }
4546         skdev->disk = NULL;
4547 }
4548
4549 static void skd_destruct(struct skd_device *skdev)
4550 {
4551         if (skdev == NULL)
4552                 return;
4553
4554
4555         pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4556         skd_free_disk(skdev);
4557
4558         pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4559         skd_free_sksb(skdev);
4560
4561         pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4562         skd_free_skspcl(skdev);
4563
4564         pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4565         skd_free_skreq(skdev);
4566
4567         pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4568         skd_free_skmsg(skdev);
4569
4570         pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4571         skd_free_skcomp(skdev);
4572
4573         pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
4574         kfree(skdev);
4575 }
4576
4577 /*
4578  *****************************************************************************
4579  * BLOCK DEVICE (BDEV) GLUE
4580  *****************************************************************************
4581  */
4582
4583 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4584 {
4585         struct skd_device *skdev;
4586         u64 capacity;
4587
4588         skdev = bdev->bd_disk->private_data;
4589
4590         pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4591                  skdev->name, __func__, __LINE__,
4592                  bdev->bd_disk->disk_name, current->comm);
4593
4594         if (skdev->read_cap_is_valid) {
4595                 capacity = get_capacity(skdev->disk);
4596                 geo->heads = 64;
4597                 geo->sectors = 255;
4598                 geo->cylinders = (capacity) / (255 * 64);
4599
4600                 return 0;
4601         }
4602         return -EIO;
4603 }
4604
4605 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
4606 {
4607         pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
4608         device_add_disk(parent, skdev->disk);
4609         return 0;
4610 }
4611
4612 static const struct block_device_operations skd_blockdev_ops = {
4613         .owner          = THIS_MODULE,
4614         .ioctl          = skd_bdev_ioctl,
4615         .getgeo         = skd_bdev_getgeo,
4616 };
4617
4618
4619 /*
4620  *****************************************************************************
4621  * PCIe DRIVER GLUE
4622  *****************************************************************************
4623  */
4624
4625 static const struct pci_device_id skd_pci_tbl[] = {
4626         { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4627           PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4628         { 0 }                     /* terminate list */
4629 };
4630
4631 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4632
4633 static char *skd_pci_info(struct skd_device *skdev, char *str)
4634 {
4635         int pcie_reg;
4636
4637         strcpy(str, "PCIe (");
4638         pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4639
4640         if (pcie_reg) {
4641
4642                 char lwstr[6];
4643                 uint16_t pcie_lstat, lspeed, lwidth;
4644
4645                 pcie_reg += 0x12;
4646                 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4647                 lspeed = pcie_lstat & (0xF);
4648                 lwidth = (pcie_lstat & 0x3F0) >> 4;
4649
4650                 if (lspeed == 1)
4651                         strcat(str, "2.5GT/s ");
4652                 else if (lspeed == 2)
4653                         strcat(str, "5.0GT/s ");
4654                 else
4655                         strcat(str, "<unknown> ");
4656                 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4657                 strcat(str, lwstr);
4658         }
4659         return str;
4660 }
4661
4662 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4663 {
4664         int i;
4665         int rc = 0;
4666         char pci_str[32];
4667         struct skd_device *skdev;
4668
4669         pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4670                DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4671         pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4672                pci_name(pdev), pdev->vendor, pdev->device);
4673
4674         rc = pci_enable_device(pdev);
4675         if (rc)
4676                 return rc;
4677         rc = pci_request_regions(pdev, DRV_NAME);
4678         if (rc)
4679                 goto err_out;
4680         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4681         if (!rc) {
4682                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4683
4684                         pr_err("(%s): consistent DMA mask error %d\n",
4685                                pci_name(pdev), rc);
4686                 }
4687         } else {
4688                 (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
4689                 if (rc) {
4690
4691                         pr_err("(%s): DMA mask error %d\n",
4692                                pci_name(pdev), rc);
4693                         goto err_out_regions;
4694                 }
4695         }
4696
4697         if (!skd_major) {
4698                 rc = register_blkdev(0, DRV_NAME);
4699                 if (rc < 0)
4700                         goto err_out_regions;
4701                 BUG_ON(!rc);
4702                 skd_major = rc;
4703         }
4704
4705         skdev = skd_construct(pdev);
4706         if (skdev == NULL) {
4707                 rc = -ENOMEM;
4708                 goto err_out_regions;
4709         }
4710
4711         skd_pci_info(skdev, pci_str);
4712         pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
4713
4714         pci_set_master(pdev);
4715         rc = pci_enable_pcie_error_reporting(pdev);
4716         if (rc) {
4717                 pr_err(
4718                        "(%s): bad enable of PCIe error reporting rc=%d\n",
4719                        skd_name(skdev), rc);
4720                 skdev->pcie_error_reporting_is_enabled = 0;
4721         } else
4722                 skdev->pcie_error_reporting_is_enabled = 1;
4723
4724
4725         pci_set_drvdata(pdev, skdev);
4726
4727         for (i = 0; i < SKD_MAX_BARS; i++) {
4728                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4729                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4730                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4731                                             skdev->mem_size[i]);
4732                 if (!skdev->mem_map[i]) {
4733                         pr_err("(%s): Unable to map adapter memory!\n",
4734                                skd_name(skdev));
4735                         rc = -ENODEV;
4736                         goto err_out_iounmap;
4737                 }
4738                 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4739                          skdev->name, __func__, __LINE__,
4740                          skdev->mem_map[i],
4741                          (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4742         }
4743
4744         rc = skd_acquire_irq(skdev);
4745         if (rc) {
4746                 pr_err("(%s): interrupt resource error %d\n",
4747                        skd_name(skdev), rc);
4748                 goto err_out_iounmap;
4749         }
4750
4751         rc = skd_start_timer(skdev);
4752         if (rc)
4753                 goto err_out_timer;
4754
4755         init_waitqueue_head(&skdev->waitq);
4756
4757         skd_start_device(skdev);
4758
4759         rc = wait_event_interruptible_timeout(skdev->waitq,
4760                                               (skdev->gendisk_on),
4761                                               (SKD_START_WAIT_SECONDS * HZ));
4762         if (skdev->gendisk_on > 0) {
4763                 /* device came on-line after reset */
4764                 skd_bdev_attach(&pdev->dev, skdev);
4765                 rc = 0;
4766         } else {
4767                 /* we timed out, something is wrong with the device,
4768                    don't add the disk structure */
4769                 pr_err(
4770                        "(%s): error: waiting for s1120 timed out %d!\n",
4771                        skd_name(skdev), rc);
4772                 /* in case of no error; we timeout with ENXIO */
4773                 if (!rc)
4774                         rc = -ENXIO;
4775                 goto err_out_timer;
4776         }
4777
4778
4779 #ifdef SKD_VMK_POLL_HANDLER
4780         if (skdev->irq_type == SKD_IRQ_MSIX) {
4781                 /* MSIX completion handler is being used for coredump */
4782                 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4783                                                   skdev->msix_entries[5].vector,
4784                                                   skd_comp_q, skdev);
4785         } else {
4786                 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4787                                                   skdev->pdev->irq, skd_isr,
4788                                                   skdev);
4789         }
4790 #endif  /* SKD_VMK_POLL_HANDLER */
4791
4792         return rc;
4793
4794 err_out_timer:
4795         skd_stop_device(skdev);
4796         skd_release_irq(skdev);
4797
4798 err_out_iounmap:
4799         for (i = 0; i < SKD_MAX_BARS; i++)
4800                 if (skdev->mem_map[i])
4801                         iounmap(skdev->mem_map[i]);
4802
4803         if (skdev->pcie_error_reporting_is_enabled)
4804                 pci_disable_pcie_error_reporting(pdev);
4805
4806         skd_destruct(skdev);
4807
4808 err_out_regions:
4809         pci_release_regions(pdev);
4810
4811 err_out:
4812         pci_disable_device(pdev);
4813         pci_set_drvdata(pdev, NULL);
4814         return rc;
4815 }
4816
4817 static void skd_pci_remove(struct pci_dev *pdev)
4818 {
4819         int i;
4820         struct skd_device *skdev;
4821
4822         skdev = pci_get_drvdata(pdev);
4823         if (!skdev) {
4824                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4825                 return;
4826         }
4827         skd_stop_device(skdev);
4828         skd_release_irq(skdev);
4829
4830         for (i = 0; i < SKD_MAX_BARS; i++)
4831                 if (skdev->mem_map[i])
4832                         iounmap((u32 *)skdev->mem_map[i]);
4833
4834         if (skdev->pcie_error_reporting_is_enabled)
4835                 pci_disable_pcie_error_reporting(pdev);
4836
4837         skd_destruct(skdev);
4838
4839         pci_release_regions(pdev);
4840         pci_disable_device(pdev);
4841         pci_set_drvdata(pdev, NULL);
4842
4843         return;
4844 }
4845
4846 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4847 {
4848         int i;
4849         struct skd_device *skdev;
4850
4851         skdev = pci_get_drvdata(pdev);
4852         if (!skdev) {
4853                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4854                 return -EIO;
4855         }
4856
4857         skd_stop_device(skdev);
4858
4859         skd_release_irq(skdev);
4860
4861         for (i = 0; i < SKD_MAX_BARS; i++)
4862                 if (skdev->mem_map[i])
4863                         iounmap((u32 *)skdev->mem_map[i]);
4864
4865         if (skdev->pcie_error_reporting_is_enabled)
4866                 pci_disable_pcie_error_reporting(pdev);
4867
4868         pci_release_regions(pdev);
4869         pci_save_state(pdev);
4870         pci_disable_device(pdev);
4871         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4872         return 0;
4873 }
4874
4875 static int skd_pci_resume(struct pci_dev *pdev)
4876 {
4877         int i;
4878         int rc = 0;
4879         struct skd_device *skdev;
4880
4881         skdev = pci_get_drvdata(pdev);
4882         if (!skdev) {
4883                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4884                 return -1;
4885         }
4886
4887         pci_set_power_state(pdev, PCI_D0);
4888         pci_enable_wake(pdev, PCI_D0, 0);
4889         pci_restore_state(pdev);
4890
4891         rc = pci_enable_device(pdev);
4892         if (rc)
4893                 return rc;
4894         rc = pci_request_regions(pdev, DRV_NAME);
4895         if (rc)
4896                 goto err_out;
4897         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4898         if (!rc) {
4899                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4900
4901                         pr_err("(%s): consistent DMA mask error %d\n",
4902                                pci_name(pdev), rc);
4903                 }
4904         } else {
4905                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4906                 if (rc) {
4907
4908                         pr_err("(%s): DMA mask error %d\n",
4909                                pci_name(pdev), rc);
4910                         goto err_out_regions;
4911                 }
4912         }
4913
4914         pci_set_master(pdev);
4915         rc = pci_enable_pcie_error_reporting(pdev);
4916         if (rc) {
4917                 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
4918                        skdev->name, rc);
4919                 skdev->pcie_error_reporting_is_enabled = 0;
4920         } else
4921                 skdev->pcie_error_reporting_is_enabled = 1;
4922
4923         for (i = 0; i < SKD_MAX_BARS; i++) {
4924
4925                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4926                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4927                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4928                                             skdev->mem_size[i]);
4929                 if (!skdev->mem_map[i]) {
4930                         pr_err("(%s): Unable to map adapter memory!\n",
4931                                skd_name(skdev));
4932                         rc = -ENODEV;
4933                         goto err_out_iounmap;
4934                 }
4935                 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4936                          skdev->name, __func__, __LINE__,
4937                          skdev->mem_map[i],
4938                          (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4939         }
4940         rc = skd_acquire_irq(skdev);
4941         if (rc) {
4942
4943                 pr_err("(%s): interrupt resource error %d\n",
4944                        pci_name(pdev), rc);
4945                 goto err_out_iounmap;
4946         }
4947
4948         rc = skd_start_timer(skdev);
4949         if (rc)
4950                 goto err_out_timer;
4951
4952         init_waitqueue_head(&skdev->waitq);
4953
4954         skd_start_device(skdev);
4955
4956         return rc;
4957
4958 err_out_timer:
4959         skd_stop_device(skdev);
4960         skd_release_irq(skdev);
4961
4962 err_out_iounmap:
4963         for (i = 0; i < SKD_MAX_BARS; i++)
4964                 if (skdev->mem_map[i])
4965                         iounmap(skdev->mem_map[i]);
4966
4967         if (skdev->pcie_error_reporting_is_enabled)
4968                 pci_disable_pcie_error_reporting(pdev);
4969
4970 err_out_regions:
4971         pci_release_regions(pdev);
4972
4973 err_out:
4974         pci_disable_device(pdev);
4975         return rc;
4976 }
4977
4978 static void skd_pci_shutdown(struct pci_dev *pdev)
4979 {
4980         struct skd_device *skdev;
4981
4982         pr_err("skd_pci_shutdown called\n");
4983
4984         skdev = pci_get_drvdata(pdev);
4985         if (!skdev) {
4986                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4987                 return;
4988         }
4989
4990         pr_err("%s: calling stop\n", skd_name(skdev));
4991         skd_stop_device(skdev);
4992 }
4993
4994 static struct pci_driver skd_driver = {
4995         .name           = DRV_NAME,
4996         .id_table       = skd_pci_tbl,
4997         .probe          = skd_pci_probe,
4998         .remove         = skd_pci_remove,
4999         .suspend        = skd_pci_suspend,
5000         .resume         = skd_pci_resume,
5001         .shutdown       = skd_pci_shutdown,
5002 };
5003
5004 /*
5005  *****************************************************************************
5006  * LOGGING SUPPORT
5007  *****************************************************************************
5008  */
5009
5010 static const char *skd_name(struct skd_device *skdev)
5011 {
5012         memset(skdev->id_str, 0, sizeof(skdev->id_str));
5013
5014         if (skdev->inquiry_is_valid)
5015                 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
5016                          skdev->name, skdev->inq_serial_num,
5017                          pci_name(skdev->pdev));
5018         else
5019                 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
5020                          skdev->name, pci_name(skdev->pdev));
5021
5022         return skdev->id_str;
5023 }
5024
5025 const char *skd_drive_state_to_str(int state)
5026 {
5027         switch (state) {
5028         case FIT_SR_DRIVE_OFFLINE:
5029                 return "OFFLINE";
5030         case FIT_SR_DRIVE_INIT:
5031                 return "INIT";
5032         case FIT_SR_DRIVE_ONLINE:
5033                 return "ONLINE";
5034         case FIT_SR_DRIVE_BUSY:
5035                 return "BUSY";
5036         case FIT_SR_DRIVE_FAULT:
5037                 return "FAULT";
5038         case FIT_SR_DRIVE_DEGRADED:
5039                 return "DEGRADED";
5040         case FIT_SR_PCIE_LINK_DOWN:
5041                 return "INK_DOWN";
5042         case FIT_SR_DRIVE_SOFT_RESET:
5043                 return "SOFT_RESET";
5044         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5045                 return "NEED_FW";
5046         case FIT_SR_DRIVE_INIT_FAULT:
5047                 return "INIT_FAULT";
5048         case FIT_SR_DRIVE_BUSY_SANITIZE:
5049                 return "BUSY_SANITIZE";
5050         case FIT_SR_DRIVE_BUSY_ERASE:
5051                 return "BUSY_ERASE";
5052         case FIT_SR_DRIVE_FW_BOOTING:
5053                 return "FW_BOOTING";
5054         default:
5055                 return "???";
5056         }
5057 }
5058
5059 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5060 {
5061         switch (state) {
5062         case SKD_DRVR_STATE_LOAD:
5063                 return "LOAD";
5064         case SKD_DRVR_STATE_IDLE:
5065                 return "IDLE";
5066         case SKD_DRVR_STATE_BUSY:
5067                 return "BUSY";
5068         case SKD_DRVR_STATE_STARTING:
5069                 return "STARTING";
5070         case SKD_DRVR_STATE_ONLINE:
5071                 return "ONLINE";
5072         case SKD_DRVR_STATE_PAUSING:
5073                 return "PAUSING";
5074         case SKD_DRVR_STATE_PAUSED:
5075                 return "PAUSED";
5076         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5077                 return "DRAINING_TIMEOUT";
5078         case SKD_DRVR_STATE_RESTARTING:
5079                 return "RESTARTING";
5080         case SKD_DRVR_STATE_RESUMING:
5081                 return "RESUMING";
5082         case SKD_DRVR_STATE_STOPPING:
5083                 return "STOPPING";
5084         case SKD_DRVR_STATE_SYNCING:
5085                 return "SYNCING";
5086         case SKD_DRVR_STATE_FAULT:
5087                 return "FAULT";
5088         case SKD_DRVR_STATE_DISAPPEARED:
5089                 return "DISAPPEARED";
5090         case SKD_DRVR_STATE_BUSY_ERASE:
5091                 return "BUSY_ERASE";
5092         case SKD_DRVR_STATE_BUSY_SANITIZE:
5093                 return "BUSY_SANITIZE";
5094         case SKD_DRVR_STATE_BUSY_IMMINENT:
5095                 return "BUSY_IMMINENT";
5096         case SKD_DRVR_STATE_WAIT_BOOT:
5097                 return "WAIT_BOOT";
5098
5099         default:
5100                 return "???";
5101         }
5102 }
5103
5104 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5105 {
5106         switch (state) {
5107         case SKD_MSG_STATE_IDLE:
5108                 return "IDLE";
5109         case SKD_MSG_STATE_BUSY:
5110                 return "BUSY";
5111         default:
5112                 return "???";
5113         }
5114 }
5115
5116 static const char *skd_skreq_state_to_str(enum skd_req_state state)
5117 {
5118         switch (state) {
5119         case SKD_REQ_STATE_IDLE:
5120                 return "IDLE";
5121         case SKD_REQ_STATE_SETUP:
5122                 return "SETUP";
5123         case SKD_REQ_STATE_BUSY:
5124                 return "BUSY";
5125         case SKD_REQ_STATE_COMPLETED:
5126                 return "COMPLETED";
5127         case SKD_REQ_STATE_TIMEOUT:
5128                 return "TIMEOUT";
5129         case SKD_REQ_STATE_ABORTED:
5130                 return "ABORTED";
5131         default:
5132                 return "???";
5133         }
5134 }
5135
5136 static void skd_log_skdev(struct skd_device *skdev, const char *event)
5137 {
5138         pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5139                  skdev->name, __func__, __LINE__, skdev->name, skdev, event);
5140         pr_debug("%s:%s:%d   drive_state=%s(%d) driver_state=%s(%d)\n",
5141                  skdev->name, __func__, __LINE__,
5142                  skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5143                  skd_skdev_state_to_str(skdev->state), skdev->state);
5144         pr_debug("%s:%s:%d   busy=%d limit=%d dev=%d lowat=%d\n",
5145                  skdev->name, __func__, __LINE__,
5146                  skdev->in_flight, skdev->cur_max_queue_depth,
5147                  skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5148         pr_debug("%s:%s:%d   timestamp=0x%x cycle=%d cycle_ix=%d\n",
5149                  skdev->name, __func__, __LINE__,
5150                  skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
5151 }
5152
5153 static void skd_log_skmsg(struct skd_device *skdev,
5154                           struct skd_fitmsg_context *skmsg, const char *event)
5155 {
5156         pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5157                  skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
5158         pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x length=%d\n",
5159                  skdev->name, __func__, __LINE__,
5160                  skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5161                  skmsg->id, skmsg->length);
5162 }
5163
5164 static void skd_log_skreq(struct skd_device *skdev,
5165                           struct skd_request_context *skreq, const char *event)
5166 {
5167         pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5168                  skdev->name, __func__, __LINE__, skdev->name, skreq, event);
5169         pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5170                  skdev->name, __func__, __LINE__,
5171                  skd_skreq_state_to_str(skreq->state), skreq->state,
5172                  skreq->id, skreq->fitmsg_id);
5173         pr_debug("%s:%s:%d   timo=0x%x sg_dir=%d n_sg=%d\n",
5174                  skdev->name, __func__, __LINE__,
5175                  skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5176
5177         if (skreq->req != NULL) {
5178                 struct request *req = skreq->req;
5179                 u32 lba = (u32)blk_rq_pos(req);
5180                 u32 count = blk_rq_sectors(req);
5181
5182                 pr_debug("%s:%s:%d "
5183                          "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5184                          skdev->name, __func__, __LINE__,
5185                          req, lba, lba, count, count,
5186                          (int)rq_data_dir(req));
5187         } else
5188                 pr_debug("%s:%s:%d req=NULL\n",
5189                          skdev->name, __func__, __LINE__);
5190 }
5191
5192 /*
5193  *****************************************************************************
5194  * MODULE GLUE
5195  *****************************************************************************
5196  */
5197
5198 static int __init skd_init(void)
5199 {
5200         pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5201
5202         switch (skd_isr_type) {
5203         case SKD_IRQ_LEGACY:
5204         case SKD_IRQ_MSI:
5205         case SKD_IRQ_MSIX:
5206                 break;
5207         default:
5208                 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5209                        skd_isr_type, SKD_IRQ_DEFAULT);
5210                 skd_isr_type = SKD_IRQ_DEFAULT;
5211         }
5212
5213         if (skd_max_queue_depth < 1 ||
5214             skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5215                 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5216                        skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5217                 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5218         }
5219
5220         if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5221                 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5222                        skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5223                 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5224         }
5225
5226         if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5227                 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5228                        skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5229                 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5230         }
5231
5232         if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5233                 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5234                        skd_dbg_level, 0);
5235                 skd_dbg_level = 0;
5236         }
5237
5238         if (skd_isr_comp_limit < 0) {
5239                 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5240                        skd_isr_comp_limit, 0);
5241                 skd_isr_comp_limit = 0;
5242         }
5243
5244         if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5245                 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5246                        skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5247                 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5248         }
5249
5250         return pci_register_driver(&skd_driver);
5251 }
5252
5253 static void __exit skd_exit(void)
5254 {
5255         pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5256
5257         pci_unregister_driver(&skd_driver);
5258
5259         if (skd_major)
5260                 unregister_blkdev(skd_major, DRV_NAME);
5261 }
5262
5263 module_init(skd_init);
5264 module_exit(skd_exit);