Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[sfrench/cifs-2.6.git] / drivers / staging / unisys / visorhba / visorhba_main.c
1 /*
2  * Copyright (c) 2012 - 2015 UNISYS CORPORATION
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or (at
8  * your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/kthread.h>
19 #include <linux/idr.h>
20 #include <linux/seq_file.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_device.h>
25
26 #include "visorbus.h"
27 #include "iochannel.h"
28
29 /* The Send and Receive Buffers of the IO Queue may both be full */
30
31 #define IOS_ERROR_THRESHOLD  1000
32 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
33 #define VISORHBA_ERROR_COUNT 30
34
35 static struct dentry *visorhba_debugfs_dir;
36
37 /* GUIDS for HBA channel type supported by this driver */
38 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
39         /* Note that the only channel type we expect to be reported by the
40          * bus driver is the VISOR_VHBA channel.
41          */
42         { VISOR_VHBA_CHANNEL_GUID, "sparvhba" },
43         {}
44 };
45
46 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
47 MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
48
49 struct visordisk_info {
50         struct scsi_device *sdev;
51         u32 valid;
52         atomic_t ios_threshold;
53         atomic_t error_count;
54         struct visordisk_info *next;
55 };
56
57 struct scsipending {
58         struct uiscmdrsp cmdrsp;
59         /* The Data being tracked */
60         void *sent;
61         /* Type of pointer that is being stored */
62         char cmdtype;
63 };
64
65 /* Each scsi_host has a host_data area that contains this struct. */
66 struct visorhba_devdata {
67         struct Scsi_Host *scsihost;
68         struct visor_device *dev;
69         struct list_head dev_info_list;
70         /* Tracks the requests that have been forwarded to
71          * the IOVM and haven't returned yet
72          */
73         struct scsipending pending[MAX_PENDING_REQUESTS];
74         /* Start search for next pending free slot here */
75         unsigned int nextinsert;
76         /* lock to protect data in devdata */
77         spinlock_t privlock;
78         bool serverdown;
79         bool serverchangingstate;
80         unsigned long long acquire_failed_cnt;
81         unsigned long long interrupts_rcvd;
82         unsigned long long interrupts_notme;
83         unsigned long long interrupts_disabled;
84         u64 __iomem *flags_addr;
85         atomic_t interrupt_rcvd;
86         wait_queue_head_t rsp_queue;
87         struct visordisk_info head;
88         unsigned int max_buff_len;
89         int devnum;
90         struct task_struct *thread;
91         int thread_wait_ms;
92
93         /*
94          * allows us to pass int handles back-and-forth between us and
95          * iovm, instead of raw pointers
96          */
97         struct idr idr;
98
99         struct dentry *debugfs_dir;
100         struct dentry *debugfs_info;
101 };
102
103 struct visorhba_devices_open {
104         struct visorhba_devdata *devdata;
105 };
106
107 /*
108  * visor_thread_start - Starts a thread for the device
109  * @threadfn:   Function the thread starts
110  * @thrcontext: Context to pass to the thread, i.e. devdata
111  * @name:       String describing name of thread
112  *
113  * Starts a thread for the device.
114  *
115  * Return: The task_struct * denoting the thread on success,
116  *         or NULL on failure
117  */
118 static struct task_struct *visor_thread_start(int (*threadfn)(void *),
119                                               void *thrcontext, char *name)
120 {
121         struct task_struct *task;
122
123         task = kthread_run(threadfn, thrcontext, "%s", name);
124         if (IS_ERR(task)) {
125                 pr_err("visorbus failed to start thread\n");
126                 return NULL;
127         }
128         return task;
129 }
130
131 /*
132  * visor_thread_stop - Stops the thread if it is running
133  * @task: Description of process to stop
134  */
135 static void visor_thread_stop(struct task_struct *task)
136 {
137         kthread_stop(task);
138 }
139
140 /*
141  * add_scsipending_entry - Save off io command that is pending in
142  *                         Service Partition
143  * @devdata: Pointer to devdata
144  * @cmdtype: Specifies the type of command pending
145  * @new:     The command to be saved
146  *
147  * Saves off the io command that is being handled by the Service
148  * Partition so that it can be handled when it completes. If new is
149  * NULL it is assumed the entry refers only to the cmdrsp.
150  *
151  * Return: Insert_location where entry was added on success,
152  *         -EBUSY if it can't
153  */
154 static int add_scsipending_entry(struct visorhba_devdata *devdata,
155                                  char cmdtype, void *new)
156 {
157         unsigned long flags;
158         struct scsipending *entry;
159         int insert_location;
160
161         spin_lock_irqsave(&devdata->privlock, flags);
162         insert_location = devdata->nextinsert;
163         while (devdata->pending[insert_location].sent) {
164                 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
165                 if (insert_location == (int)devdata->nextinsert) {
166                         spin_unlock_irqrestore(&devdata->privlock, flags);
167                         return -EBUSY;
168                 }
169         }
170
171         entry = &devdata->pending[insert_location];
172         memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
173         entry->cmdtype = cmdtype;
174         if (new)
175                 entry->sent = new;
176         /* wants to send cmdrsp */
177         else
178                 entry->sent = &entry->cmdrsp;
179         devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
180         spin_unlock_irqrestore(&devdata->privlock, flags);
181
182         return insert_location;
183 }
184
185 /*
186  * del_scsipending_ent - Removes an entry from the pending array
187  * @devdata: Device holding the pending array
188  * @del:     Entry to remove
189  *
190  * Removes the entry pointed at by del and returns it.
191  *
192  * Return: The scsipending entry pointed to on success, NULL on failure
193  */
194 static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
195 {
196         unsigned long flags;
197         void *sent;
198
199         if (del >= MAX_PENDING_REQUESTS)
200                 return NULL;
201
202         spin_lock_irqsave(&devdata->privlock, flags);
203         sent = devdata->pending[del].sent;
204         devdata->pending[del].cmdtype = 0;
205         devdata->pending[del].sent = NULL;
206         spin_unlock_irqrestore(&devdata->privlock, flags);
207
208         return sent;
209 }
210
211 /*
212  * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
213  * @ddata: Device holding the pending array
214  * @ent:   Entry that stores the cmdrsp
215  *
216  * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
217  * if the "sent" field is not NULL.
218  *
219  * Return: A pointer to the cmdrsp, NULL on failure
220  */
221 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
222                                                 int ent)
223 {
224         if (ddata->pending[ent].sent)
225                 return &ddata->pending[ent].cmdrsp;
226
227         return NULL;
228 }
229
230 /*
231  * simple_idr_get - Associate a provided pointer with an int value
232  *                  1 <= value <= INT_MAX, and return this int value;
233  *                  the pointer value can be obtained later by passing
234  *                  this int value to idr_find()
235  * @idrtable: The data object maintaining the pointer<-->int mappings
236  * @p:        The pointer value to be remembered
237  * @lock:     A spinlock used when exclusive access to idrtable is needed
238  *
239  * Return: The id number mapped to pointer 'p', 0 on failure
240  */
241 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
242                                    spinlock_t *lock)
243 {
244         int id;
245         unsigned long flags;
246
247         idr_preload(GFP_KERNEL);
248         spin_lock_irqsave(lock, flags);
249         id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
250         spin_unlock_irqrestore(lock, flags);
251         idr_preload_end();
252         /* failure */
253         if (id < 0)
254                 return 0;
255         /* idr_alloc() guarantees > 0 */
256         return (unsigned int)(id);
257 }
258
259 /*
260  * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
261  *                              completion processing logic for a taskmgmt
262  *                              cmd will be able to find who to wake up
263  *                              and where to stash the result
264  * @idrtable: The data object maintaining the pointer<-->int mappings
265  * @lock:     A spinlock used when exclusive access to idrtable is needed
266  * @cmdrsp:   Response from the IOVM
267  * @event:    The event handle to associate with an id
268  * @result:   The location to place the result of the event handle into
269  */
270 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
271                                        struct uiscmdrsp *cmdrsp,
272                                        wait_queue_head_t *event, int *result)
273 {
274         /* specify the event that has to be triggered when this */
275         /* cmd is complete */
276         cmdrsp->scsitaskmgmt.notify_handle =
277                 simple_idr_get(idrtable, event, lock);
278         cmdrsp->scsitaskmgmt.notifyresult_handle =
279                 simple_idr_get(idrtable, result, lock);
280 }
281
282 /*
283  * cleanup_scsitaskmgmt_handles - Forget handles created by
284  *                                setup_scsitaskmgmt_handles()
285  * @idrtable: The data object maintaining the pointer<-->int mappings
286  * @cmdrsp:   Response from the IOVM
287  */
288 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
289                                          struct uiscmdrsp *cmdrsp)
290 {
291         if (cmdrsp->scsitaskmgmt.notify_handle)
292                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
293         if (cmdrsp->scsitaskmgmt.notifyresult_handle)
294                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
295 }
296
297 /*
298  * forward_taskmgmt_command - Send taskmegmt command to the Service
299  *                            Partition
300  * @tasktype: Type of taskmgmt command
301  * @scsidev:  Scsidev that issued command
302  *
303  * Create a cmdrsp packet and send it to the Serivce Partition
304  * that will service this request.
305  *
306  * Return: Int representing whether command was queued successfully or not
307  */
308 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
309                                     struct scsi_device *scsidev)
310 {
311         struct uiscmdrsp *cmdrsp;
312         struct visorhba_devdata *devdata =
313                 (struct visorhba_devdata *)scsidev->host->hostdata;
314         int notifyresult = 0xffff;
315         wait_queue_head_t notifyevent;
316         int scsicmd_id = 0;
317
318         if (devdata->serverdown || devdata->serverchangingstate)
319                 return FAILED;
320
321         scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
322                                            NULL);
323         if (scsicmd_id < 0)
324                 return FAILED;
325
326         cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
327
328         init_waitqueue_head(&notifyevent);
329
330         /* issue TASK_MGMT_ABORT_TASK */
331         cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
332         setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
333                                    &notifyevent, &notifyresult);
334
335         /* save destination */
336         cmdrsp->scsitaskmgmt.tasktype = tasktype;
337         cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
338         cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
339         cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
340         cmdrsp->scsitaskmgmt.handle = scsicmd_id;
341
342         dev_dbg(&scsidev->sdev_gendev,
343                 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
344         if (visorchannel_signalinsert(devdata->dev->visorchannel,
345                                       IOCHAN_TO_IOPART,
346                                       cmdrsp))
347                 goto err_del_scsipending_ent;
348
349         /* It can take the Service Partition up to 35 seconds to complete
350          * an IO in some cases, so wait 45 seconds and error out
351          */
352         if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
353                                 msecs_to_jiffies(45000)))
354                 goto err_del_scsipending_ent;
355
356         dev_dbg(&scsidev->sdev_gendev,
357                 "visorhba: taskmgmt type=%d success; result=0x%x\n",
358                  tasktype, notifyresult);
359         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
360         return SUCCESS;
361
362 err_del_scsipending_ent:
363         dev_dbg(&scsidev->sdev_gendev,
364                 "visorhba: taskmgmt type=%d not executed\n", tasktype);
365         del_scsipending_ent(devdata, scsicmd_id);
366         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
367         return FAILED;
368 }
369
370 /*
371  * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
372  * @scsicmd: The scsicmd that needs aborted
373  *
374  * Return: SUCCESS if inserted, FAILED otherwise
375  */
376 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
377 {
378         /* issue TASK_MGMT_ABORT_TASK */
379         struct scsi_device *scsidev;
380         struct visordisk_info *vdisk;
381         int rtn;
382
383         scsidev = scsicmd->device;
384         vdisk = scsidev->hostdata;
385         if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
386                 atomic_inc(&vdisk->error_count);
387         else
388                 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
389         rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
390         if (rtn == SUCCESS) {
391                 scsicmd->result = DID_ABORT << 16;
392                 scsicmd->scsi_done(scsicmd);
393         }
394         return rtn;
395 }
396
397 /*
398  * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
399  * @scsicmd: The scsicmd that needs aborted
400  *
401  * Return: SUCCESS if inserted, FAILED otherwise
402  */
403 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
404 {
405         /* issue TASK_MGMT_LUN_RESET */
406         struct scsi_device *scsidev;
407         struct visordisk_info *vdisk;
408         int rtn;
409
410         scsidev = scsicmd->device;
411         vdisk = scsidev->hostdata;
412         if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
413                 atomic_inc(&vdisk->error_count);
414         else
415                 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
416         rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
417         if (rtn == SUCCESS) {
418                 scsicmd->result = DID_RESET << 16;
419                 scsicmd->scsi_done(scsicmd);
420         }
421         return rtn;
422 }
423
424 /*
425  * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
426  *                              target on the bus
427  * @scsicmd: The scsicmd that needs aborted
428  *
429  * Return: SUCCESS if inserted, FAILED otherwise
430  */
431 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
432 {
433         struct scsi_device *scsidev;
434         struct visordisk_info *vdisk;
435         int rtn;
436
437         scsidev = scsicmd->device;
438         shost_for_each_device(scsidev, scsidev->host) {
439                 vdisk = scsidev->hostdata;
440                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
441                         atomic_inc(&vdisk->error_count);
442                 else
443                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
444         }
445         rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
446         if (rtn == SUCCESS) {
447                 scsicmd->result = DID_RESET << 16;
448                 scsicmd->scsi_done(scsicmd);
449         }
450         return rtn;
451 }
452
453 /*
454  * visorhba_host_reset_handler - Not supported
455  * @scsicmd: The scsicmd that needs to be aborted
456  *
457  * Return: Not supported, return SUCCESS
458  */
459 static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
460 {
461         /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
462         return SUCCESS;
463 }
464
465 /*
466  * visorhba_get_info - Get information about SCSI device
467  * @shp: Scsi host that is requesting information
468  *
469  * Return: String with visorhba information
470  */
471 static const char *visorhba_get_info(struct Scsi_Host *shp)
472 {
473         /* Return version string */
474         return "visorhba";
475 }
476
477 /*
478  * dma_data_dir_linux_to_spar - convert dma_data_direction value to
479  *                              Unisys-specific equivalent
480  * @d: dma direction value to convert
481  *
482  * Returns the Unisys-specific dma direction value corresponding to @d
483  */
484 static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
485 {
486         switch (d) {
487         case DMA_BIDIRECTIONAL:
488                 return UIS_DMA_BIDIRECTIONAL;
489         case DMA_TO_DEVICE:
490                 return UIS_DMA_TO_DEVICE;
491         case DMA_FROM_DEVICE:
492                 return UIS_DMA_FROM_DEVICE;
493         case DMA_NONE:
494                 return UIS_DMA_NONE;
495         default:
496                 return UIS_DMA_NONE;
497         }
498 }
499
500 /*
501  * visorhba_queue_command_lck - Queues command to the Service Partition
502  * @scsicmd:            Command to be queued
503  * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
504  *
505  * Queues to scsicmd to the ServicePartition after converting it to a
506  * uiscmdrsp structure.
507  *
508  * Return: 0 if successfully queued to the Service Partition, otherwise
509  *         error code
510  */
511 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
512                                       void (*visorhba_cmnd_done)
513                                            (struct scsi_cmnd *))
514 {
515         struct uiscmdrsp *cmdrsp;
516         struct scsi_device *scsidev = scsicmd->device;
517         int insert_location;
518         unsigned char *cdb = scsicmd->cmnd;
519         struct Scsi_Host *scsihost = scsidev->host;
520         unsigned int i;
521         struct visorhba_devdata *devdata =
522                 (struct visorhba_devdata *)scsihost->hostdata;
523         struct scatterlist *sg = NULL;
524         struct scatterlist *sglist = NULL;
525
526         if (devdata->serverdown || devdata->serverchangingstate)
527                 return SCSI_MLQUEUE_DEVICE_BUSY;
528
529         insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
530                                                 (void *)scsicmd);
531         if (insert_location < 0)
532                 return SCSI_MLQUEUE_DEVICE_BUSY;
533
534         cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
535         cmdrsp->cmdtype = CMD_SCSI_TYPE;
536         /* save the pending insertion location. Deletion from pending
537          * will return the scsicmd pointer for completion
538          */
539         cmdrsp->scsi.handle = insert_location;
540
541         /* save done function that we have call when cmd is complete */
542         scsicmd->scsi_done = visorhba_cmnd_done;
543         /* save destination */
544         cmdrsp->scsi.vdest.channel = scsidev->channel;
545         cmdrsp->scsi.vdest.id = scsidev->id;
546         cmdrsp->scsi.vdest.lun = scsidev->lun;
547         /* save datadir */
548         cmdrsp->scsi.data_dir =
549                 dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
550         memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
551         cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
552
553         /* keep track of the max buffer length so far. */
554         if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
555                 devdata->max_buff_len = cmdrsp->scsi.bufflen;
556
557         if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
558                 goto err_del_scsipending_ent;
559
560         /* convert buffer to phys information  */
561         /* buffer is scatterlist - copy it out */
562         sglist = scsi_sglist(scsicmd);
563
564         for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
565                 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
566                 cmdrsp->scsi.gpi_list[i].length = sg->length;
567         }
568         cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
569
570         if (visorchannel_signalinsert(devdata->dev->visorchannel,
571                                       IOCHAN_TO_IOPART,
572                                       cmdrsp))
573                 /* queue must be full and we aren't going to wait */
574                 goto err_del_scsipending_ent;
575
576         return 0;
577
578 err_del_scsipending_ent:
579         del_scsipending_ent(devdata, insert_location);
580         return SCSI_MLQUEUE_DEVICE_BUSY;
581 }
582
583 #ifdef DEF_SCSI_QCMD
584 static DEF_SCSI_QCMD(visorhba_queue_command)
585 #else
586 #define visorhba_queue_command visorhba_queue_command_lck
587 #endif
588
589 /*
590  * visorhba_slave_alloc - Called when new disk is discovered
591  * @scsidev: New disk
592  *
593  * Create a new visordisk_info structure and add it to our
594  * list of vdisks.
595  *
596  * Return: 0 on success, -ENOMEM on failure.
597  */
598 static int visorhba_slave_alloc(struct scsi_device *scsidev)
599 {
600         /* this is called by the midlayer before scan for new devices --
601          * LLD can alloc any struct & do init if needed.
602          */
603         struct visordisk_info *vdisk;
604         struct visorhba_devdata *devdata;
605         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
606
607         /* already allocated return success */
608         if (scsidev->hostdata)
609                 return 0;
610
611         /* even though we errored, treat as success */
612         devdata = (struct visorhba_devdata *)scsihost->hostdata;
613         if (!devdata)
614                 return 0;
615
616         vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
617         if (!vdisk)
618                 return -ENOMEM;
619
620         vdisk->sdev = scsidev;
621         scsidev->hostdata = vdisk;
622         return 0;
623 }
624
625 /*
626  * visorhba_slave_destroy - Disk is going away, clean up resources.
627  * @scsidev: Scsi device to destroy
628  */
629 static void visorhba_slave_destroy(struct scsi_device *scsidev)
630 {
631         /* midlevel calls this after device has been quiesced and
632          * before it is to be deleted.
633          */
634         struct visordisk_info *vdisk;
635
636         vdisk = scsidev->hostdata;
637         scsidev->hostdata = NULL;
638         kfree(vdisk);
639 }
640
641 static struct scsi_host_template visorhba_driver_template = {
642         .name = "Unisys Visor HBA",
643         .info = visorhba_get_info,
644         .queuecommand = visorhba_queue_command,
645         .eh_abort_handler = visorhba_abort_handler,
646         .eh_device_reset_handler = visorhba_device_reset_handler,
647         .eh_bus_reset_handler = visorhba_bus_reset_handler,
648         .eh_host_reset_handler = visorhba_host_reset_handler,
649         .shost_attrs = NULL,
650 #define visorhba_MAX_CMNDS 128
651         .can_queue = visorhba_MAX_CMNDS,
652         .sg_tablesize = 64,
653         .this_id = -1,
654         .slave_alloc = visorhba_slave_alloc,
655         .slave_destroy = visorhba_slave_destroy,
656         .use_clustering = ENABLE_CLUSTERING,
657 };
658
659 /*
660  * info_debugfs_show - Debugfs interface to dump visorhba states
661  * @seq: The sequence file to write information to
662  * @v:   Unused, but needed for use with seq file single_open invocation
663  *
664  * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
665  *
666  * Return: SUCCESS
667  */
668 static int info_debugfs_show(struct seq_file *seq, void *v)
669 {
670         struct visorhba_devdata *devdata = seq->private;
671
672         seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
673         seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
674         seq_printf(seq, "interrupts_disabled = %llu\n",
675                    devdata->interrupts_disabled);
676         seq_printf(seq, "interrupts_notme = %llu\n",
677                    devdata->interrupts_notme);
678         seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
679         if (devdata->flags_addr) {
680                 u64 phys_flags_addr =
681                         virt_to_phys((__force  void *)devdata->flags_addr);
682                 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
683                            phys_flags_addr);
684                 seq_printf(seq, "FeatureFlags = %llu\n",
685                            (u64)readq(devdata->flags_addr));
686         }
687         seq_printf(seq, "acquire_failed_cnt = %llu\n",
688                    devdata->acquire_failed_cnt);
689
690         return 0;
691 }
692
693 static int info_debugfs_open(struct inode *inode, struct file *file)
694 {
695         return single_open(file, info_debugfs_show, inode->i_private);
696 }
697
698 static const struct file_operations info_debugfs_fops = {
699         .owner = THIS_MODULE,
700         .open = info_debugfs_open,
701         .read = seq_read,
702         .llseek = seq_lseek,
703         .release = single_release,
704 };
705
706 /*
707  * complete_taskmgmt_command - Complete task management
708  * @idrtable: The data object maintaining the pointer<-->int mappings
709  * @cmdrsp:   Response from the IOVM
710  * @result:   The result of the task management command
711  *
712  * Service Partition returned the result of the task management
713  * command. Wake up anyone waiting for it.
714  */
715 static void complete_taskmgmt_command(struct idr *idrtable,
716                                       struct uiscmdrsp *cmdrsp, int result)
717 {
718         wait_queue_head_t *wq =
719                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
720         int *scsi_result_ptr =
721                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
722         if (unlikely(!(wq && scsi_result_ptr))) {
723                 pr_err("visorhba: no completion context; cmd will time out\n");
724                 return;
725         }
726
727         /* copy the result of the taskmgmt and
728          * wake up the error handler that is waiting for this
729          */
730         pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
731         *scsi_result_ptr = result;
732         wake_up_all(wq);
733 }
734
735 /*
736  * visorhba_serverdown_complete - Called when we are done cleaning up
737  *                                from serverdown
738  * @devdata: Visorhba instance on which to complete serverdown
739  *
740  * Called when we are done cleanning up from serverdown, stop processing
741  * queue, fail pending IOs.
742  */
743 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
744 {
745         int i;
746         struct scsipending *pendingdel = NULL;
747         struct scsi_cmnd *scsicmd = NULL;
748         struct uiscmdrsp *cmdrsp;
749         unsigned long flags;
750
751         /* Stop using the IOVM response queue (queue should be drained
752          * by the end)
753          */
754         visor_thread_stop(devdata->thread);
755
756         /* Fail commands that weren't completed */
757         spin_lock_irqsave(&devdata->privlock, flags);
758         for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
759                 pendingdel = &devdata->pending[i];
760                 switch (pendingdel->cmdtype) {
761                 case CMD_SCSI_TYPE:
762                         scsicmd = pendingdel->sent;
763                         scsicmd->result = DID_RESET << 16;
764                         if (scsicmd->scsi_done)
765                                 scsicmd->scsi_done(scsicmd);
766                         break;
767                 case CMD_SCSITASKMGMT_TYPE:
768                         cmdrsp = pendingdel->sent;
769                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
770                                                   TASK_MGMT_FAILED);
771                         break;
772                 default:
773                         break;
774                 }
775                 pendingdel->cmdtype = 0;
776                 pendingdel->sent = NULL;
777         }
778         spin_unlock_irqrestore(&devdata->privlock, flags);
779
780         devdata->serverdown = true;
781         devdata->serverchangingstate = false;
782 }
783
784 /*
785  * visorhba_serverdown - Got notified that the IOVM is down
786  * @devdata: Visorhba that is being serviced by downed IOVM
787  *
788  * Something happened to the IOVM, return immediately and
789  * schedule cleanup work.
790  *
791  * Return: 0 on success, -EINVAL on failure
792  */
793 static int visorhba_serverdown(struct visorhba_devdata *devdata)
794 {
795         if (!devdata->serverdown && !devdata->serverchangingstate) {
796                 devdata->serverchangingstate = true;
797                 visorhba_serverdown_complete(devdata);
798         } else if (devdata->serverchangingstate) {
799                 return -EINVAL;
800         }
801         return 0;
802 }
803
804 /*
805  * do_scsi_linuxstat - Scsi command returned linuxstat
806  * @cmdrsp:  Response from IOVM
807  * @scsicmd: Command issued
808  *
809  * Don't log errors for disk-not-present inquiries.
810  */
811 static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
812                               struct scsi_cmnd *scsicmd)
813 {
814         struct visordisk_info *vdisk;
815         struct scsi_device *scsidev;
816
817         scsidev = scsicmd->device;
818         memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
819
820         /* Do not log errors for disk-not-present inquiries */
821         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
822             (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
823             (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
824                 return;
825         /* Okay see what our error_count is here.... */
826         vdisk = scsidev->hostdata;
827         if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
828                 atomic_inc(&vdisk->error_count);
829                 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
830         }
831 }
832
833 static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
834                                       bool is_lun0)
835 {
836         if (len < NO_DISK_INQUIRY_RESULT_LEN)
837                 return -EINVAL;
838         memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
839         buf[2] = SCSI_SPC2_VER;
840         if (is_lun0) {
841                 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
842                 buf[3] = DEV_HISUPPORT;
843         } else {
844                 buf[0] = DEV_NOT_CAPABLE;
845         }
846         buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
847         strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
848         return 0;
849 }
850
851 /*
852  * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
853  * @cmdrsp:  Response from IOVM
854  * @scsicmd: Command issued
855  *
856  * Handle response when no linuxstat was returned.
857  */
858 static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
859                                 struct scsi_cmnd *scsicmd)
860 {
861         struct scsi_device *scsidev;
862         unsigned char *buf;
863         struct scatterlist *sg;
864         unsigned int i;
865         char *this_page;
866         char *this_page_orig;
867         int bufind = 0;
868         struct visordisk_info *vdisk;
869
870         scsidev = scsicmd->device;
871         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
872             (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
873                 if (cmdrsp->scsi.no_disk_result == 0)
874                         return;
875
876                 buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
877                 if (!buf)
878                         return;
879
880                 /* Linux scsi code wants a device at Lun 0
881                  * to issue report luns, but we don't want
882                  * a disk there so we'll present a processor
883                  * there.
884                  */
885                 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
886                                            scsidev->lun == 0);
887
888                 if (scsi_sg_count(scsicmd) == 0) {
889                         memcpy(scsi_sglist(scsicmd), buf,
890                                cmdrsp->scsi.bufflen);
891                         kfree(buf);
892                         return;
893                 }
894
895                 sg = scsi_sglist(scsicmd);
896                 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
897                         this_page_orig = kmap_atomic(sg_page(sg + i));
898                         this_page = (void *)((unsigned long)this_page_orig |
899                                              sg[i].offset);
900                         memcpy(this_page, buf + bufind, sg[i].length);
901                         kunmap_atomic(this_page_orig);
902                 }
903                 kfree(buf);
904         } else {
905                 vdisk = scsidev->hostdata;
906                 if (atomic_read(&vdisk->ios_threshold) > 0) {
907                         atomic_dec(&vdisk->ios_threshold);
908                         if (atomic_read(&vdisk->ios_threshold) == 0)
909                                 atomic_set(&vdisk->error_count, 0);
910                 }
911         }
912 }
913
914 /*
915  * complete_scsi_command - Complete a scsi command
916  * @uiscmdrsp: Response from Service Partition
917  * @scsicmd:   The scsi command
918  *
919  * Response was returned by the Service Partition. Finish it and send
920  * completion to the scsi midlayer.
921  */
922 static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
923                                   struct scsi_cmnd *scsicmd)
924 {
925         /* take what we need out of cmdrsp and complete the scsicmd */
926         scsicmd->result = cmdrsp->scsi.linuxstat;
927         if (cmdrsp->scsi.linuxstat)
928                 do_scsi_linuxstat(cmdrsp, scsicmd);
929         else
930                 do_scsi_nolinuxstat(cmdrsp, scsicmd);
931
932         scsicmd->scsi_done(scsicmd);
933 }
934
935 /*
936  * drain_queue - Pull responses out of iochannel
937  * @cmdrsp:  Response from the IOSP
938  * @devdata: Device that owns this iochannel
939  *
940  * Pulls responses out of the iochannel and process the responses.
941  */
942 static void drain_queue(struct uiscmdrsp *cmdrsp,
943                         struct visorhba_devdata *devdata)
944 {
945         struct scsi_cmnd *scsicmd;
946
947         while (1) {
948                 /* queue empty */
949                 if (visorchannel_signalremove(devdata->dev->visorchannel,
950                                               IOCHAN_FROM_IOPART,
951                                               cmdrsp))
952                         break;
953                 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
954                         /* scsicmd location is returned by the
955                          * deletion
956                          */
957                         scsicmd = del_scsipending_ent(devdata,
958                                                       cmdrsp->scsi.handle);
959                         if (!scsicmd)
960                                 break;
961                         /* complete the orig cmd */
962                         complete_scsi_command(cmdrsp, scsicmd);
963                 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
964                         if (!del_scsipending_ent(devdata,
965                                                  cmdrsp->scsitaskmgmt.handle))
966                                 break;
967                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
968                                                   cmdrsp->scsitaskmgmt.result);
969                 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
970                         dev_err_once(&devdata->dev->device,
971                                      "ignoring unsupported NOTIFYGUEST\n");
972                 /* cmdrsp is now available for re-use */
973         }
974 }
975
976 /*
977  * process_incoming_rsps - Process responses from IOSP
978  * @v:  Void pointer to visorhba_devdata
979  *
980  * Main function for the thread that processes the responses
981  * from the IO Service Partition. When the queue is empty, wait
982  * to check to see if it is full again.
983  *
984  * Return: 0 on success, -ENOMEM on failure
985  */
986 static int process_incoming_rsps(void *v)
987 {
988         struct visorhba_devdata *devdata = v;
989         struct uiscmdrsp *cmdrsp = NULL;
990         const int size = sizeof(*cmdrsp);
991
992         cmdrsp = kmalloc(size, GFP_ATOMIC);
993         if (!cmdrsp)
994                 return -ENOMEM;
995
996         while (1) {
997                 if (kthread_should_stop())
998                         break;
999                 wait_event_interruptible_timeout(
1000                         devdata->rsp_queue, (atomic_read(
1001                                              &devdata->interrupt_rcvd) == 1),
1002                                 msecs_to_jiffies(devdata->thread_wait_ms));
1003                 /* drain queue */
1004                 drain_queue(cmdrsp, devdata);
1005         }
1006         kfree(cmdrsp);
1007         return 0;
1008 }
1009
1010 /*
1011  * visorhba_pause - Function to handle visorbus pause messages
1012  * @dev:           Device that is pausing
1013  * @complete_func: Function to call when finished
1014  *
1015  * Something has happened to the IO Service Partition that is
1016  * handling this device. Quiet this device and reset commands
1017  * so that the Service Partition can be corrected.
1018  *
1019  * Return: SUCCESS
1020  */
1021 static int visorhba_pause(struct visor_device *dev,
1022                           visorbus_state_complete_func complete_func)
1023 {
1024         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1025
1026         visorhba_serverdown(devdata);
1027         complete_func(dev, 0);
1028         return 0;
1029 }
1030
1031 /*
1032  * visorhba_resume - Function called when the IO Service Partition is back
1033  * @dev:           Device that is pausing
1034  * @complete_func: Function to call when finished
1035  *
1036  * Yay! The IO Service Partition is back, the channel has been wiped
1037  * so lets re-establish connection and start processing responses.
1038  *
1039  * Return: 0 on success, -EINVAL on failure
1040  */
1041 static int visorhba_resume(struct visor_device *dev,
1042                            visorbus_state_complete_func complete_func)
1043 {
1044         struct visorhba_devdata *devdata;
1045
1046         devdata = dev_get_drvdata(&dev->device);
1047         if (!devdata)
1048                 return -EINVAL;
1049
1050         if (devdata->serverdown && !devdata->serverchangingstate)
1051                 devdata->serverchangingstate = true;
1052
1053         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1054                                              "vhba_incming");
1055         devdata->serverdown = false;
1056         devdata->serverchangingstate = false;
1057
1058         return 0;
1059 }
1060
1061 /*
1062  * visorhba_probe - Device has been discovered; do acquire
1063  * @dev: visor_device that was discovered
1064  *
1065  * A new HBA was discovered; do the initial connections of it.
1066  *
1067  * Return: 0 on success, otherwise error code
1068  */
1069 static int visorhba_probe(struct visor_device *dev)
1070 {
1071         struct Scsi_Host *scsihost;
1072         struct vhba_config_max max;
1073         struct visorhba_devdata *devdata = NULL;
1074         int err, channel_offset;
1075         u64 features;
1076
1077         scsihost = scsi_host_alloc(&visorhba_driver_template,
1078                                    sizeof(*devdata));
1079         if (!scsihost)
1080                 return -ENODEV;
1081
1082         channel_offset = offsetof(struct visor_io_channel, vhba.max);
1083         err = visorbus_read_channel(dev, channel_offset, &max,
1084                                     sizeof(struct vhba_config_max));
1085         if (err < 0)
1086                 goto err_scsi_host_put;
1087
1088         scsihost->max_id = (unsigned int)max.max_id;
1089         scsihost->max_lun = (unsigned int)max.max_lun;
1090         scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1091         scsihost->max_sectors =
1092             (unsigned short)(max.max_io_size >> 9);
1093         scsihost->sg_tablesize =
1094             (unsigned short)(max.max_io_size / PAGE_SIZE);
1095         if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1096                 scsihost->sg_tablesize = MAX_PHYS_INFO;
1097         err = scsi_add_host(scsihost, &dev->device);
1098         if (err < 0)
1099                 goto err_scsi_host_put;
1100
1101         devdata = (struct visorhba_devdata *)scsihost->hostdata;
1102         devdata->dev = dev;
1103         dev_set_drvdata(&dev->device, devdata);
1104
1105         devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1106                                                   visorhba_debugfs_dir);
1107         if (!devdata->debugfs_dir) {
1108                 err = -ENOMEM;
1109                 goto err_scsi_remove_host;
1110         }
1111         devdata->debugfs_info =
1112                 debugfs_create_file("info", 0440,
1113                                     devdata->debugfs_dir, devdata,
1114                                     &info_debugfs_fops);
1115         if (!devdata->debugfs_info) {
1116                 err = -ENOMEM;
1117                 goto err_debugfs_dir;
1118         }
1119
1120         init_waitqueue_head(&devdata->rsp_queue);
1121         spin_lock_init(&devdata->privlock);
1122         devdata->serverdown = false;
1123         devdata->serverchangingstate = false;
1124         devdata->scsihost = scsihost;
1125
1126         channel_offset = offsetof(struct visor_io_channel,
1127                                   channel_header.features);
1128         err = visorbus_read_channel(dev, channel_offset, &features, 8);
1129         if (err)
1130                 goto err_debugfs_info;
1131         features |= VISOR_CHANNEL_IS_POLLING;
1132         err = visorbus_write_channel(dev, channel_offset, &features, 8);
1133         if (err)
1134                 goto err_debugfs_info;
1135
1136         idr_init(&devdata->idr);
1137
1138         devdata->thread_wait_ms = 2;
1139         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1140                                              "vhba_incoming");
1141
1142         scsi_scan_host(scsihost);
1143
1144         return 0;
1145
1146 err_debugfs_info:
1147         debugfs_remove(devdata->debugfs_info);
1148
1149 err_debugfs_dir:
1150         debugfs_remove_recursive(devdata->debugfs_dir);
1151
1152 err_scsi_remove_host:
1153         scsi_remove_host(scsihost);
1154
1155 err_scsi_host_put:
1156         scsi_host_put(scsihost);
1157         return err;
1158 }
1159
1160 /*
1161  * visorhba_remove - Remove a visorhba device
1162  * @dev: Device to remove
1163  *
1164  * Removes the visorhba device.
1165  */
1166 static void visorhba_remove(struct visor_device *dev)
1167 {
1168         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1169         struct Scsi_Host *scsihost = NULL;
1170
1171         if (!devdata)
1172                 return;
1173
1174         scsihost = devdata->scsihost;
1175         visor_thread_stop(devdata->thread);
1176         scsi_remove_host(scsihost);
1177         scsi_host_put(scsihost);
1178
1179         idr_destroy(&devdata->idr);
1180
1181         dev_set_drvdata(&dev->device, NULL);
1182         debugfs_remove(devdata->debugfs_info);
1183         debugfs_remove_recursive(devdata->debugfs_dir);
1184 }
1185
1186 /* This is used to tell the visorbus driver which types of visor devices
1187  * we support, and what functions to call when a visor device that we support
1188  * is attached or removed.
1189  */
1190 static struct visor_driver visorhba_driver = {
1191         .name = "visorhba",
1192         .owner = THIS_MODULE,
1193         .channel_types = visorhba_channel_types,
1194         .probe = visorhba_probe,
1195         .remove = visorhba_remove,
1196         .pause = visorhba_pause,
1197         .resume = visorhba_resume,
1198         .channel_interrupt = NULL,
1199 };
1200
1201 /*
1202  * visorhba_init - Driver init routine
1203  *
1204  * Initialize the visorhba driver and register it with visorbus
1205  * to handle s-Par virtual host bus adapter.
1206  *
1207  * Return: 0 on success, error code otherwise
1208  */
1209 static int visorhba_init(void)
1210 {
1211         int rc = -ENOMEM;
1212
1213         visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1214         if (!visorhba_debugfs_dir)
1215                 return -ENOMEM;
1216
1217         rc = visorbus_register_visor_driver(&visorhba_driver);
1218         if (rc)
1219                 goto cleanup_debugfs;
1220
1221         return 0;
1222
1223 cleanup_debugfs:
1224         debugfs_remove_recursive(visorhba_debugfs_dir);
1225
1226         return rc;
1227 }
1228
1229 /*
1230  * visorhba_exit - Driver exit routine
1231  *
1232  * Unregister driver from the bus and free up memory.
1233  */
1234 static void visorhba_exit(void)
1235 {
1236         visorbus_unregister_visor_driver(&visorhba_driver);
1237         debugfs_remove_recursive(visorhba_debugfs_dir);
1238 }
1239
1240 module_init(visorhba_init);
1241 module_exit(visorhba_exit);
1242
1243 MODULE_AUTHOR("Unisys");
1244 MODULE_LICENSE("GPL");
1245 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");