Merge branch 'fixes' into misc
[sfrench/cifs-2.6.git] / drivers / staging / unisys / visorhba / visorhba_main.c
1 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2  * All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or (at
7  * your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  * NON INFRINGEMENT.  See the GNU General Public License for more
13  * details.
14  */
15
16 #include <linux/debugfs.h>
17 #include <linux/skbuff.h>
18 #include <linux/kthread.h>
19 #include <linux/idr.h>
20 #include <linux/seq_file.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_device.h>
25
26 #include "visorbus.h"
27 #include "iochannel.h"
28
29 /* The Send and Receive Buffers of the IO Queue may both be full */
30
31 #define IOS_ERROR_THRESHOLD  1000
32 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
33 #define VISORHBA_ERROR_COUNT 30
34
35 static struct dentry *visorhba_debugfs_dir;
36
37 /* GUIDS for HBA channel type supported by this driver */
38 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
39         /* Note that the only channel type we expect to be reported by the
40          * bus driver is the VISOR_VHBA channel.
41          */
42         { VISOR_VHBA_CHANNEL_UUID, "sparvhba" },
43         { NULL_UUID_LE, NULL }
44 };
45
46 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
47 MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_UUID_STR);
48
49 struct visordisk_info {
50         struct scsi_device *sdev;
51         u32 valid;
52         atomic_t ios_threshold;
53         atomic_t error_count;
54         struct visordisk_info *next;
55 };
56
57 struct scsipending {
58         struct uiscmdrsp cmdrsp;
59         void *sent;             /* The Data being tracked */
60         char cmdtype;           /* Type of pointer that is being stored */
61 };
62
63 /* Each scsi_host has a host_data area that contains this struct. */
64 struct visorhba_devdata {
65         struct Scsi_Host *scsihost;
66         struct visor_device *dev;
67         struct list_head dev_info_list;
68         /* Tracks the requests that have been forwarded to
69          * the IOVM and haven't returned yet
70          */
71         struct scsipending pending[MAX_PENDING_REQUESTS];
72         /* Start search for next pending free slot here */
73         unsigned int nextinsert;
74         spinlock_t privlock; /* lock to protect data in devdata */
75         bool serverdown;
76         bool serverchangingstate;
77         unsigned long long acquire_failed_cnt;
78         unsigned long long interrupts_rcvd;
79         unsigned long long interrupts_notme;
80         unsigned long long interrupts_disabled;
81         u64 __iomem *flags_addr;
82         atomic_t interrupt_rcvd;
83         wait_queue_head_t rsp_queue;
84         struct visordisk_info head;
85         unsigned int max_buff_len;
86         int devnum;
87         struct task_struct *thread;
88         int thread_wait_ms;
89
90         /*
91          * allows us to pass int handles back-and-forth between us and
92          * iovm, instead of raw pointers
93          */
94         struct idr idr;
95
96         struct dentry *debugfs_dir;
97         struct dentry *debugfs_info;
98 };
99
100 struct visorhba_devices_open {
101         struct visorhba_devdata *devdata;
102 };
103
104 /*
105  *      visor_thread_start - starts a thread for the device
106  *      @threadfn: Function the thread starts
107  *      @thrcontext: Context to pass to the thread, i.e. devdata
108  *      @name: string describing name of thread
109  *
110  *      Starts a thread for the device.
111  *
112  *      Return the task_struct * denoting the thread on success,
113  *             or NULL on failure
114  */
115 static struct task_struct *visor_thread_start
116 (int (*threadfn)(void *), void *thrcontext, char *name)
117 {
118         struct task_struct *task;
119
120         task = kthread_run(threadfn, thrcontext, "%s", name);
121         if (IS_ERR(task)) {
122                 pr_err("visorbus failed to start thread\n");
123                 return NULL;
124         }
125         return task;
126 }
127
128 /*
129  *      visor_thread_stop - stops the thread if it is running
130  */
131 static void visor_thread_stop(struct task_struct *task)
132 {
133         if (!task)
134                 return;  /* no thread running */
135         kthread_stop(task);
136 }
137
138 /*
139  *      add_scsipending_entry - save off io command that is pending in
140  *                              Service Partition
141  *      @devdata: Pointer to devdata
142  *      @cmdtype: Specifies the type of command pending
143  *      @new:   The command to be saved
144  *
145  *      Saves off the io command that is being handled by the Service
146  *      Partition so that it can be handled when it completes. If new is
147  *      NULL it is assumed the entry refers only to the cmdrsp.
148  *      Returns insert_location where entry was added,
149  *      -EBUSY if it can't
150  */
151 static int add_scsipending_entry(struct visorhba_devdata *devdata,
152                                  char cmdtype, void *new)
153 {
154         unsigned long flags;
155         struct scsipending *entry;
156         int insert_location;
157
158         spin_lock_irqsave(&devdata->privlock, flags);
159         insert_location = devdata->nextinsert;
160         while (devdata->pending[insert_location].sent) {
161                 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
162                 if (insert_location == (int)devdata->nextinsert) {
163                         spin_unlock_irqrestore(&devdata->privlock, flags);
164                         return -EBUSY;
165                 }
166         }
167
168         entry = &devdata->pending[insert_location];
169         memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
170         entry->cmdtype = cmdtype;
171         if (new)
172                 entry->sent = new;
173         else /* wants to send cmdrsp */
174                 entry->sent = &entry->cmdrsp;
175         devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
176         spin_unlock_irqrestore(&devdata->privlock, flags);
177
178         return insert_location;
179 }
180
181 /*
182  *      del_scsipending_ent - removes an entry from the pending array
183  *      @devdata: Device holding the pending array
184  *      @del: Entry to remove
185  *
186  *      Removes the entry pointed at by del and returns it.
187  *      Returns the scsipending entry pointed at
188  */
189 static void *del_scsipending_ent(struct visorhba_devdata *devdata,
190                                  int del)
191 {
192         unsigned long flags;
193         void *sent;
194
195         if (del >= MAX_PENDING_REQUESTS)
196                 return NULL;
197
198         spin_lock_irqsave(&devdata->privlock, flags);
199         sent = devdata->pending[del].sent;
200
201         devdata->pending[del].cmdtype = 0;
202         devdata->pending[del].sent = NULL;
203         spin_unlock_irqrestore(&devdata->privlock, flags);
204
205         return sent;
206 }
207
208 /*
209  *      get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
210  *      @ddata: Device holding the pending array
211  *      @ent: Entry that stores the cmdrsp
212  *
213  *      Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
214  *      if the "sent" field is not NULL
215  *      Returns a pointer to the cmdrsp.
216  */
217 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
218                                                 int ent)
219 {
220         if (ddata->pending[ent].sent)
221                 return &ddata->pending[ent].cmdrsp;
222
223         return NULL;
224 }
225
226 /*
227  *      simple_idr_get - associate a provided pointer with an int value
228  *                       1 <= value <= INT_MAX, and return this int value;
229  *                       the pointer value can be obtained later by passing
230  *                       this int value to idr_find()
231  *      @idrtable: the data object maintaining the pointer<-->int mappings
232  *      @p: the pointer value to be remembered
233  *      @lock: a spinlock used when exclusive access to idrtable is needed
234  */
235 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
236                                    spinlock_t *lock)
237 {
238         int id;
239         unsigned long flags;
240
241         idr_preload(GFP_KERNEL);
242         spin_lock_irqsave(lock, flags);
243         id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
244         spin_unlock_irqrestore(lock, flags);
245         idr_preload_end();
246         if (id < 0)
247                 return 0;  /* failure */
248         return (unsigned int)(id);  /* idr_alloc() guarantees > 0 */
249 }
250
251 /*
252  *      setup_scsitaskmgmt_handles - stash the necessary handles so that the
253  *                                   completion processing logic for a taskmgmt
254  *                                   cmd will be able to find who to wake up
255  *                                   and where to stash the result
256  */
257 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
258                                        struct uiscmdrsp *cmdrsp,
259                                        wait_queue_head_t *event, int *result)
260 {
261         /* specify the event that has to be triggered when this */
262         /* cmd is complete */
263         cmdrsp->scsitaskmgmt.notify_handle =
264                 simple_idr_get(idrtable, event, lock);
265         cmdrsp->scsitaskmgmt.notifyresult_handle =
266                 simple_idr_get(idrtable, result, lock);
267 }
268
269 /*
270  *      cleanup_scsitaskmgmt_handles - forget handles created by
271  *                                     setup_scsitaskmgmt_handles()
272  */
273 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
274                                          struct uiscmdrsp *cmdrsp)
275 {
276         if (cmdrsp->scsitaskmgmt.notify_handle)
277                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
278         if (cmdrsp->scsitaskmgmt.notifyresult_handle)
279                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
280 }
281
282 /*
283  *      forward_taskmgmt_command - send taskmegmt command to the Service
284  *                                 Partition
285  *      @tasktype: Type of taskmgmt command
286  *      @scsidev: Scsidev that issued command
287  *
288  *      Create a cmdrsp packet and send it to the Serivce Partition
289  *      that will service this request.
290  *      Returns whether the command was queued successfully or not.
291  */
292 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
293                                     struct scsi_device *scsidev)
294 {
295         struct uiscmdrsp *cmdrsp;
296         struct visorhba_devdata *devdata =
297                 (struct visorhba_devdata *)scsidev->host->hostdata;
298         int notifyresult = 0xffff;
299         wait_queue_head_t notifyevent;
300         int scsicmd_id = 0;
301
302         if (devdata->serverdown || devdata->serverchangingstate)
303                 return FAILED;
304
305         scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
306                                            NULL);
307         if (scsicmd_id < 0)
308                 return FAILED;
309
310         cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
311
312         init_waitqueue_head(&notifyevent);
313
314         /* issue TASK_MGMT_ABORT_TASK */
315         cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
316         setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
317                                    &notifyevent, &notifyresult);
318
319         /* save destination */
320         cmdrsp->scsitaskmgmt.tasktype = tasktype;
321         cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
322         cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
323         cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
324         cmdrsp->scsitaskmgmt.handle = scsicmd_id;
325
326         dev_dbg(&scsidev->sdev_gendev,
327                 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
328         if (visorchannel_signalinsert(devdata->dev->visorchannel,
329                                       IOCHAN_TO_IOPART,
330                                       cmdrsp))
331                 goto err_del_scsipending_ent;
332
333         /* It can take the Service Partition up to 35 seconds to complete
334          * an IO in some cases, so wait 45 seconds and error out
335          */
336         if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
337                                 msecs_to_jiffies(45000)))
338                 goto err_del_scsipending_ent;
339
340         dev_dbg(&scsidev->sdev_gendev,
341                 "visorhba: taskmgmt type=%d success; result=0x%x\n",
342                  tasktype, notifyresult);
343         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
344         return SUCCESS;
345
346 err_del_scsipending_ent:
347         dev_dbg(&scsidev->sdev_gendev,
348                 "visorhba: taskmgmt type=%d not executed\n", tasktype);
349         del_scsipending_ent(devdata, scsicmd_id);
350         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
351         return FAILED;
352 }
353
354 /*
355  *      visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
356  *      @scsicmd: The scsicmd that needs aborted
357  *
358  *      Returns SUCCESS if inserted, failure otherwise
359  *
360  */
361 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
362 {
363         /* issue TASK_MGMT_ABORT_TASK */
364         struct scsi_device *scsidev;
365         struct visordisk_info *vdisk;
366         int rtn;
367
368         scsidev = scsicmd->device;
369         vdisk = scsidev->hostdata;
370         if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
371                 atomic_inc(&vdisk->error_count);
372         else
373                 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
374         rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
375         if (rtn == SUCCESS) {
376                 scsicmd->result = DID_ABORT << 16;
377                 scsicmd->scsi_done(scsicmd);
378         }
379         return rtn;
380 }
381
382 /*
383  *      visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
384  *      @scsicmd: The scsicmd that needs aborted
385  *
386  *      Returns SUCCESS if inserted, failure otherwise
387  */
388 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
389 {
390         /* issue TASK_MGMT_LUN_RESET */
391         struct scsi_device *scsidev;
392         struct visordisk_info *vdisk;
393         int rtn;
394
395         scsidev = scsicmd->device;
396         vdisk = scsidev->hostdata;
397         if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
398                 atomic_inc(&vdisk->error_count);
399         else
400                 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
401         rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
402         if (rtn == SUCCESS) {
403                 scsicmd->result = DID_RESET << 16;
404                 scsicmd->scsi_done(scsicmd);
405         }
406         return rtn;
407 }
408
409 /*
410  *      visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
411  *                                   target on the bus
412  *      @scsicmd: The scsicmd that needs aborted
413  *
414  *      Returns SUCCESS
415  */
416 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
417 {
418         struct scsi_device *scsidev;
419         struct visordisk_info *vdisk;
420         int rtn;
421
422         scsidev = scsicmd->device;
423         shost_for_each_device(scsidev, scsidev->host) {
424                 vdisk = scsidev->hostdata;
425                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
426                         atomic_inc(&vdisk->error_count);
427                 else
428                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
429         }
430         rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
431         if (rtn == SUCCESS) {
432                 scsicmd->result = DID_RESET << 16;
433                 scsicmd->scsi_done(scsicmd);
434         }
435         return rtn;
436 }
437
438 /*
439  *      visorhba_host_reset_handler - Not supported
440  *      @scsicmd: The scsicmd that needs aborted
441  *
442  *      Not supported, return SUCCESS
443  *      Returns SUCCESS
444  */
445 static int
446 visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
447 {
448         /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
449         return SUCCESS;
450 }
451
452 /*
453  *      visorhba_get_info
454  *      @shp: Scsi host that is requesting information
455  *
456  *      Returns string with info
457  */
458 static const char *visorhba_get_info(struct Scsi_Host *shp)
459 {
460         /* Return version string */
461         return "visorhba";
462 }
463
464 /*
465  *      visorhba_queue_command_lck -- queues command to the Service Partition
466  *      @scsicmd: Command to be queued
467  *      @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
468  *
469  *      Queues to scsicmd to the ServicePartition after converting it to a
470  *      uiscmdrsp structure.
471  *
472  *      Returns success if queued to the Service Partition, otherwise
473  *      failure.
474  */
475 static int
476 visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
477                            void (*visorhba_cmnd_done)(struct scsi_cmnd *))
478 {
479         struct uiscmdrsp *cmdrsp;
480         struct scsi_device *scsidev = scsicmd->device;
481         int insert_location;
482         unsigned char *cdb = scsicmd->cmnd;
483         struct Scsi_Host *scsihost = scsidev->host;
484         unsigned int i;
485         struct visorhba_devdata *devdata =
486                 (struct visorhba_devdata *)scsihost->hostdata;
487         struct scatterlist *sg = NULL;
488         struct scatterlist *sglist = NULL;
489
490         if (devdata->serverdown || devdata->serverchangingstate)
491                 return SCSI_MLQUEUE_DEVICE_BUSY;
492
493         insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
494                                                 (void *)scsicmd);
495
496         if (insert_location < 0)
497                 return SCSI_MLQUEUE_DEVICE_BUSY;
498
499         cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
500
501         cmdrsp->cmdtype = CMD_SCSI_TYPE;
502         /* save the pending insertion location. Deletion from pending
503          * will return the scsicmd pointer for completion
504          */
505         cmdrsp->scsi.handle = insert_location;
506
507         /* save done function that we have call when cmd is complete */
508         scsicmd->scsi_done = visorhba_cmnd_done;
509         /* save destination */
510         cmdrsp->scsi.vdest.channel = scsidev->channel;
511         cmdrsp->scsi.vdest.id = scsidev->id;
512         cmdrsp->scsi.vdest.lun = scsidev->lun;
513         /* save datadir */
514         cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
515         memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
516
517         cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
518
519         /* keep track of the max buffer length so far. */
520         if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
521                 devdata->max_buff_len = cmdrsp->scsi.bufflen;
522
523         if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
524                 goto err_del_scsipending_ent;
525
526         /* convert buffer to phys information  */
527         /* buffer is scatterlist - copy it out */
528         sglist = scsi_sglist(scsicmd);
529
530         for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
531                 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
532                 cmdrsp->scsi.gpi_list[i].length = sg->length;
533         }
534         cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
535
536         if (visorchannel_signalinsert(devdata->dev->visorchannel,
537                                       IOCHAN_TO_IOPART,
538                                       cmdrsp))
539                 /* queue must be full and we aren't going to wait */
540                 goto err_del_scsipending_ent;
541
542         return 0;
543
544 err_del_scsipending_ent:
545         del_scsipending_ent(devdata, insert_location);
546         return SCSI_MLQUEUE_DEVICE_BUSY;
547 }
548
549 #ifdef DEF_SCSI_QCMD
550 static DEF_SCSI_QCMD(visorhba_queue_command)
551 #else
552 #define visorhba_queue_command visorhba_queue_command_lck
553 #endif
554
555 /*
556  *      visorhba_slave_alloc - called when new disk is discovered
557  *      @scsidev: New disk
558  *
559  *      Create a new visordisk_info structure and add it to our
560  *      list of vdisks.
561  *
562  *      Returns success when created, otherwise error.
563  */
564 static int visorhba_slave_alloc(struct scsi_device *scsidev)
565 {
566         /* this is called by the midlayer before scan for new devices --
567          * LLD can alloc any struct & do init if needed.
568          */
569         struct visordisk_info *vdisk;
570         struct visorhba_devdata *devdata;
571         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
572
573         if (scsidev->hostdata)
574                 return 0; /* already allocated return success */
575
576         devdata = (struct visorhba_devdata *)scsihost->hostdata;
577         if (!devdata)
578                 return 0; /* even though we errored, treat as success */
579
580         vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
581         if (!vdisk)
582                 return -ENOMEM;
583
584         vdisk->sdev = scsidev;
585         scsidev->hostdata = vdisk;
586         return 0;
587 }
588
589 /*
590  *      visorhba_slave_destroy - disk is going away
591  *      @scsidev: scsi device going away
592  *
593  *      Disk is going away, clean up resources.
594  *      Returns void.
595  */
596 static void visorhba_slave_destroy(struct scsi_device *scsidev)
597 {
598         /* midlevel calls this after device has been quiesced and
599          * before it is to be deleted.
600          */
601         struct visordisk_info *vdisk;
602
603         vdisk = scsidev->hostdata;
604         scsidev->hostdata = NULL;
605         kfree(vdisk);
606 }
607
608 static struct scsi_host_template visorhba_driver_template = {
609         .name = "Unisys Visor HBA",
610         .info = visorhba_get_info,
611         .queuecommand = visorhba_queue_command,
612         .eh_abort_handler = visorhba_abort_handler,
613         .eh_device_reset_handler = visorhba_device_reset_handler,
614         .eh_bus_reset_handler = visorhba_bus_reset_handler,
615         .eh_host_reset_handler = visorhba_host_reset_handler,
616         .shost_attrs = NULL,
617 #define visorhba_MAX_CMNDS 128
618         .can_queue = visorhba_MAX_CMNDS,
619         .sg_tablesize = 64,
620         .this_id = -1,
621         .slave_alloc = visorhba_slave_alloc,
622         .slave_destroy = visorhba_slave_destroy,
623         .use_clustering = ENABLE_CLUSTERING,
624 };
625
626 /*
627  *      info_debugfs_show - debugfs interface to dump visorhba states
628  *
629  *      This presents a file in the debugfs tree named:
630  *          /visorhba/vbus<x>:dev<y>/info
631  */
632 static int info_debugfs_show(struct seq_file *seq, void *v)
633 {
634         struct visorhba_devdata *devdata = seq->private;
635
636         seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
637         seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
638         seq_printf(seq, "interrupts_disabled = %llu\n",
639                    devdata->interrupts_disabled);
640         seq_printf(seq, "interrupts_notme = %llu\n",
641                    devdata->interrupts_notme);
642         seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
643         if (devdata->flags_addr) {
644                 u64 phys_flags_addr =
645                         virt_to_phys((__force  void *)devdata->flags_addr);
646                 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
647                            phys_flags_addr);
648                 seq_printf(seq, "FeatureFlags = %llu\n",
649                            (u64)readq(devdata->flags_addr));
650         }
651         seq_printf(seq, "acquire_failed_cnt = %llu\n",
652                    devdata->acquire_failed_cnt);
653
654         return 0;
655 }
656
657 static int info_debugfs_open(struct inode *inode, struct file *file)
658 {
659         return single_open(file, info_debugfs_show, inode->i_private);
660 }
661
662 static const struct file_operations info_debugfs_fops = {
663         .owner = THIS_MODULE,
664         .open = info_debugfs_open,
665         .read = seq_read,
666         .llseek = seq_lseek,
667         .release = single_release,
668 };
669
670 /*
671  *      complete_taskmgmt_command - complete task management
672  *      @cmdrsp: Response from the IOVM
673  *
674  *      Service Partition returned the result of the task management
675  *      command. Wake up anyone waiting for it.
676  *      Returns void
677  */
678 static void complete_taskmgmt_command(struct idr *idrtable,
679                                       struct uiscmdrsp *cmdrsp, int result)
680 {
681         wait_queue_head_t *wq =
682                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
683         int *scsi_result_ptr =
684                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
685
686         if (unlikely(!(wq && scsi_result_ptr))) {
687                 pr_err("visorhba: no completion context; cmd will time out\n");
688                 return;
689         }
690
691         /* copy the result of the taskmgmt and
692          * wake up the error handler that is waiting for this
693          */
694         pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
695         *scsi_result_ptr = result;
696         wake_up_all(wq);
697 }
698
699 /*
700  *      visorhba_serverdown_complete - Called when we are done cleaning up
701  *                                     from serverdown
702  *      @work: work structure for this serverdown request
703  *
704  *      Called when we are done cleanning up from serverdown, stop processing
705  *      queue, fail pending IOs.
706  *      Returns void when finished cleaning up
707  */
708 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
709 {
710         int i;
711         struct scsipending *pendingdel = NULL;
712         struct scsi_cmnd *scsicmd = NULL;
713         struct uiscmdrsp *cmdrsp;
714         unsigned long flags;
715
716         /* Stop using the IOVM response queue (queue should be drained
717          * by the end)
718          */
719         visor_thread_stop(devdata->thread);
720
721         /* Fail commands that weren't completed */
722         spin_lock_irqsave(&devdata->privlock, flags);
723         for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
724                 pendingdel = &devdata->pending[i];
725                 switch (pendingdel->cmdtype) {
726                 case CMD_SCSI_TYPE:
727                         scsicmd = pendingdel->sent;
728                         scsicmd->result = DID_RESET << 16;
729                         if (scsicmd->scsi_done)
730                                 scsicmd->scsi_done(scsicmd);
731                         break;
732                 case CMD_SCSITASKMGMT_TYPE:
733                         cmdrsp = pendingdel->sent;
734                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
735                                                   TASK_MGMT_FAILED);
736                         break;
737                 default:
738                         break;
739                 }
740                 pendingdel->cmdtype = 0;
741                 pendingdel->sent = NULL;
742         }
743         spin_unlock_irqrestore(&devdata->privlock, flags);
744
745         devdata->serverdown = true;
746         devdata->serverchangingstate = false;
747 }
748
749 /*
750  *      visorhba_serverdown - Got notified that the IOVM is down
751  *      @devdata: visorhba that is being serviced by downed IOVM.
752  *
753  *      Something happened to the IOVM, return immediately and
754  *      schedule work cleanup work.
755  *      Return SUCCESS or EINVAL
756  */
757 static int visorhba_serverdown(struct visorhba_devdata *devdata)
758 {
759         if (!devdata->serverdown && !devdata->serverchangingstate) {
760                 devdata->serverchangingstate = true;
761                 visorhba_serverdown_complete(devdata);
762         } else if (devdata->serverchangingstate) {
763                 return -EINVAL;
764         }
765         return 0;
766 }
767
768 /*
769  *      do_scsi_linuxstat - scsi command returned linuxstat
770  *      @cmdrsp: response from IOVM
771  *      @scsicmd: Command issued.
772  *
773  *      Don't log errors for disk-not-present inquiries
774  *      Returns void
775  */
776 static void
777 do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
778 {
779         struct visordisk_info *vdisk;
780         struct scsi_device *scsidev;
781
782         scsidev = scsicmd->device;
783         memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
784
785         /* Do not log errors for disk-not-present inquiries */
786         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
787             (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
788             (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
789                 return;
790         /* Okay see what our error_count is here.... */
791         vdisk = scsidev->hostdata;
792         if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
793                 atomic_inc(&vdisk->error_count);
794                 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
795         }
796 }
797
798 static int set_no_disk_inquiry_result(unsigned char *buf,
799                                       size_t len, bool is_lun0)
800 {
801         if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
802                 return -EINVAL;
803         memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
804         buf[2] = SCSI_SPC2_VER;
805         if (is_lun0) {
806                 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
807                 buf[3] = DEV_HISUPPORT;
808         } else {
809                 buf[0] = DEV_NOT_CAPABLE;
810         }
811         buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
812         strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
813         return 0;
814 }
815
816 /*
817  *      do_scsi_nolinuxstat - scsi command didn't have linuxstat
818  *      @cmdrsp: response from IOVM
819  *      @scsicmd: Command issued.
820  *
821  *      Handle response when no linuxstat was returned
822  *      Returns void
823  */
824 static void
825 do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
826 {
827         struct scsi_device *scsidev;
828         unsigned char *buf;
829         struct scatterlist *sg;
830         unsigned int i;
831         char *this_page;
832         char *this_page_orig;
833         int bufind = 0;
834         struct visordisk_info *vdisk;
835
836         scsidev = scsicmd->device;
837         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
838             (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
839                 if (cmdrsp->scsi.no_disk_result == 0)
840                         return;
841
842                 buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
843                 if (!buf)
844                         return;
845
846                 /* Linux scsi code wants a device at Lun 0
847                  * to issue report luns, but we don't want
848                  * a disk there so we'll present a processor
849                  * there.
850                  */
851                 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
852                                            scsidev->lun == 0);
853
854                 if (scsi_sg_count(scsicmd) == 0) {
855                         memcpy(scsi_sglist(scsicmd), buf,
856                                cmdrsp->scsi.bufflen);
857                         kfree(buf);
858                         return;
859                 }
860
861                 sg = scsi_sglist(scsicmd);
862                 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
863                         this_page_orig = kmap_atomic(sg_page(sg + i));
864                         this_page = (void *)((unsigned long)this_page_orig |
865                                              sg[i].offset);
866                         memcpy(this_page, buf + bufind, sg[i].length);
867                         kunmap_atomic(this_page_orig);
868                 }
869                 kfree(buf);
870         } else {
871                 vdisk = scsidev->hostdata;
872                 if (atomic_read(&vdisk->ios_threshold) > 0) {
873                         atomic_dec(&vdisk->ios_threshold);
874                         if (atomic_read(&vdisk->ios_threshold) == 0)
875                                 atomic_set(&vdisk->error_count, 0);
876                 }
877         }
878 }
879
880 /*
881  *      complete_scsi_command - complete a scsi command
882  *      @uiscmdrsp: Response from Service Partition
883  *      @scsicmd: The scsi command
884  *
885  *      Response returned by the Service Partition, finish it and send
886  *      completion to the scsi midlayer.
887  *      Returns void.
888  */
889 static void
890 complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
891 {
892         /* take what we need out of cmdrsp and complete the scsicmd */
893         scsicmd->result = cmdrsp->scsi.linuxstat;
894         if (cmdrsp->scsi.linuxstat)
895                 do_scsi_linuxstat(cmdrsp, scsicmd);
896         else
897                 do_scsi_nolinuxstat(cmdrsp, scsicmd);
898
899         scsicmd->scsi_done(scsicmd);
900 }
901
902 /*
903  *      drain_queue - pull responses out of iochannel
904  *      @cmdrsp: Response from the IOSP
905  *      @devdata: device that owns this iochannel
906  *
907  *      Pulls responses out of the iochannel and process the responses.
908  *      Restuns void
909  */
910 static void
911 drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
912 {
913         struct scsi_cmnd *scsicmd;
914
915         while (1) {
916                 if (visorchannel_signalremove(devdata->dev->visorchannel,
917                                               IOCHAN_FROM_IOPART,
918                                               cmdrsp))
919                         break; /* queue empty */
920
921                 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
922                         /* scsicmd location is returned by the
923                          * deletion
924                          */
925                         scsicmd = del_scsipending_ent(devdata,
926                                                       cmdrsp->scsi.handle);
927                         if (!scsicmd)
928                                 break;
929                         /* complete the orig cmd */
930                         complete_scsi_command(cmdrsp, scsicmd);
931                 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
932                         if (!del_scsipending_ent(devdata,
933                                                  cmdrsp->scsitaskmgmt.handle))
934                                 break;
935                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
936                                                   cmdrsp->scsitaskmgmt.result);
937                 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
938                         dev_err_once(&devdata->dev->device,
939                                      "ignoring unsupported NOTIFYGUEST\n");
940                 /* cmdrsp is now available for re-use */
941         }
942 }
943
944 /*
945  *      process_incoming_rsps - Process responses from IOSP
946  *      @v: void pointer to visorhba_devdata
947  *
948  *      Main function for the thread that processes the responses
949  *      from the IO Service Partition. When the queue is empty, wait
950  *      to check to see if it is full again.
951  */
952 static int process_incoming_rsps(void *v)
953 {
954         struct visorhba_devdata *devdata = v;
955         struct uiscmdrsp *cmdrsp = NULL;
956         const int size = sizeof(*cmdrsp);
957
958         cmdrsp = kmalloc(size, GFP_ATOMIC);
959         if (!cmdrsp)
960                 return -ENOMEM;
961
962         while (1) {
963                 if (kthread_should_stop())
964                         break;
965                 wait_event_interruptible_timeout(
966                         devdata->rsp_queue, (atomic_read(
967                                              &devdata->interrupt_rcvd) == 1),
968                                 msecs_to_jiffies(devdata->thread_wait_ms));
969                 /* drain queue */
970                 drain_queue(cmdrsp, devdata);
971         }
972         kfree(cmdrsp);
973         return 0;
974 }
975
976 /*
977  *      visorhba_pause - function to handle visorbus pause messages
978  *      @dev: device that is pausing.
979  *      @complete_func: function to call when finished
980  *
981  *      Something has happened to the IO Service Partition that is
982  *      handling this device. Quiet this device and reset commands
983  *      so that the Service Partition can be corrected.
984  *      Returns SUCCESS
985  */
986 static int visorhba_pause(struct visor_device *dev,
987                           visorbus_state_complete_func complete_func)
988 {
989         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
990
991         visorhba_serverdown(devdata);
992         complete_func(dev, 0);
993         return 0;
994 }
995
996 /*
997  *      visorhba_resume - function called when the IO Service Partition is back
998  *      @dev: device that is pausing.
999  *      @complete_func: function to call when finished
1000  *
1001  *      Yay! The IO Service Partition is back, the channel has been wiped
1002  *      so lets re-establish connection and start processing responses.
1003  *      Returns 0 on success, error on failure.
1004  */
1005 static int visorhba_resume(struct visor_device *dev,
1006                            visorbus_state_complete_func complete_func)
1007 {
1008         struct visorhba_devdata *devdata;
1009
1010         devdata = dev_get_drvdata(&dev->device);
1011         if (!devdata)
1012                 return -EINVAL;
1013
1014         if (devdata->serverdown && !devdata->serverchangingstate)
1015                 devdata->serverchangingstate = true;
1016
1017         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1018                                              "vhba_incming");
1019
1020         devdata->serverdown = false;
1021         devdata->serverchangingstate = false;
1022
1023         return 0;
1024 }
1025
1026 /*
1027  *      visorhba_probe - device has been discovered, do acquire
1028  *      @dev: visor_device that was discovered
1029  *
1030  *      A new HBA was discovered, do the initial connections of it.
1031  *      Return 0 on success, otherwise error.
1032  */
1033 static int visorhba_probe(struct visor_device *dev)
1034 {
1035         struct Scsi_Host *scsihost;
1036         struct vhba_config_max max;
1037         struct visorhba_devdata *devdata = NULL;
1038         int err, channel_offset;
1039         u64 features;
1040
1041         scsihost = scsi_host_alloc(&visorhba_driver_template,
1042                                    sizeof(*devdata));
1043         if (!scsihost)
1044                 return -ENODEV;
1045
1046         channel_offset = offsetof(struct visor_io_channel, vhba.max);
1047         err = visorbus_read_channel(dev, channel_offset, &max,
1048                                     sizeof(struct vhba_config_max));
1049         if (err < 0)
1050                 goto err_scsi_host_put;
1051
1052         scsihost->max_id = (unsigned int)max.max_id;
1053         scsihost->max_lun = (unsigned int)max.max_lun;
1054         scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1055         scsihost->max_sectors =
1056             (unsigned short)(max.max_io_size >> 9);
1057         scsihost->sg_tablesize =
1058             (unsigned short)(max.max_io_size / PAGE_SIZE);
1059         if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1060                 scsihost->sg_tablesize = MAX_PHYS_INFO;
1061         err = scsi_add_host(scsihost, &dev->device);
1062         if (err < 0)
1063                 goto err_scsi_host_put;
1064
1065         devdata = (struct visorhba_devdata *)scsihost->hostdata;
1066         devdata->dev = dev;
1067         dev_set_drvdata(&dev->device, devdata);
1068
1069         devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1070                                                   visorhba_debugfs_dir);
1071         if (!devdata->debugfs_dir) {
1072                 err = -ENOMEM;
1073                 goto err_scsi_remove_host;
1074         }
1075         devdata->debugfs_info =
1076                 debugfs_create_file("info", 0440,
1077                                     devdata->debugfs_dir, devdata,
1078                                     &info_debugfs_fops);
1079         if (!devdata->debugfs_info) {
1080                 err = -ENOMEM;
1081                 goto err_debugfs_dir;
1082         }
1083
1084         init_waitqueue_head(&devdata->rsp_queue);
1085         spin_lock_init(&devdata->privlock);
1086         devdata->serverdown = false;
1087         devdata->serverchangingstate = false;
1088         devdata->scsihost = scsihost;
1089
1090         channel_offset = offsetof(struct visor_io_channel,
1091                                   channel_header.features);
1092         err = visorbus_read_channel(dev, channel_offset, &features, 8);
1093         if (err)
1094                 goto err_debugfs_info;
1095         features |= VISOR_CHANNEL_IS_POLLING;
1096         err = visorbus_write_channel(dev, channel_offset, &features, 8);
1097         if (err)
1098                 goto err_debugfs_info;
1099
1100         idr_init(&devdata->idr);
1101
1102         devdata->thread_wait_ms = 2;
1103         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1104                                              "vhba_incoming");
1105
1106         scsi_scan_host(scsihost);
1107
1108         return 0;
1109
1110 err_debugfs_info:
1111         debugfs_remove(devdata->debugfs_info);
1112
1113 err_debugfs_dir:
1114         debugfs_remove_recursive(devdata->debugfs_dir);
1115
1116 err_scsi_remove_host:
1117         scsi_remove_host(scsihost);
1118
1119 err_scsi_host_put:
1120         scsi_host_put(scsihost);
1121         return err;
1122 }
1123
1124 /*
1125  *      visorhba_remove - remove a visorhba device
1126  *      @dev: Device to remove
1127  *
1128  *      Removes the visorhba device.
1129  *      Returns void.
1130  */
1131 static void visorhba_remove(struct visor_device *dev)
1132 {
1133         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1134         struct Scsi_Host *scsihost = NULL;
1135
1136         if (!devdata)
1137                 return;
1138
1139         scsihost = devdata->scsihost;
1140         visor_thread_stop(devdata->thread);
1141         scsi_remove_host(scsihost);
1142         scsi_host_put(scsihost);
1143
1144         idr_destroy(&devdata->idr);
1145
1146         dev_set_drvdata(&dev->device, NULL);
1147         debugfs_remove(devdata->debugfs_info);
1148         debugfs_remove_recursive(devdata->debugfs_dir);
1149 }
1150
1151 /* This is used to tell the visorbus driver which types of visor devices
1152  * we support, and what functions to call when a visor device that we support
1153  * is attached or removed.
1154  */
1155 static struct visor_driver visorhba_driver = {
1156         .name = "visorhba",
1157         .owner = THIS_MODULE,
1158         .channel_types = visorhba_channel_types,
1159         .probe = visorhba_probe,
1160         .remove = visorhba_remove,
1161         .pause = visorhba_pause,
1162         .resume = visorhba_resume,
1163         .channel_interrupt = NULL,
1164 };
1165
1166 /*
1167  *      visorhba_init           - driver init routine
1168  *
1169  *      Initialize the visorhba driver and register it with visorbus
1170  *      to handle s-Par virtual host bus adapter.
1171  */
1172 static int visorhba_init(void)
1173 {
1174         int rc = -ENOMEM;
1175
1176         visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1177         if (!visorhba_debugfs_dir)
1178                 return -ENOMEM;
1179
1180         rc = visorbus_register_visor_driver(&visorhba_driver);
1181         if (rc)
1182                 goto cleanup_debugfs;
1183
1184         return 0;
1185
1186 cleanup_debugfs:
1187         debugfs_remove_recursive(visorhba_debugfs_dir);
1188
1189         return rc;
1190 }
1191
1192 /*
1193  *      visorhba_exit   - driver exit routine
1194  *
1195  *      Unregister driver from the bus and free up memory.
1196  */
1197 static void visorhba_exit(void)
1198 {
1199         visorbus_unregister_visor_driver(&visorhba_driver);
1200         debugfs_remove_recursive(visorhba_debugfs_dir);
1201 }
1202
1203 module_init(visorhba_init);
1204 module_exit(visorhba_exit);
1205
1206 MODULE_AUTHOR("Unisys");
1207 MODULE_LICENSE("GPL");
1208 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");