Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/pmladek...
[sfrench/cifs-2.6.git] / drivers / staging / unisys / visorhba / visorhba_main.c
1 /*
2  * Copyright (c) 2012 - 2015 UNISYS CORPORATION
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or (at
8  * your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/kthread.h>
19 #include <linux/idr.h>
20 #include <linux/seq_file.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_device.h>
25
26 #include "visorbus.h"
27 #include "iochannel.h"
28
29 /* The Send and Receive Buffers of the IO Queue may both be full */
30
31 #define IOS_ERROR_THRESHOLD  1000
32 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
33 #define VISORHBA_ERROR_COUNT 30
34
35 static struct dentry *visorhba_debugfs_dir;
36
37 /* GUIDS for HBA channel type supported by this driver */
38 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
39         /* Note that the only channel type we expect to be reported by the
40          * bus driver is the VISOR_VHBA channel.
41          */
42         { VISOR_VHBA_CHANNEL_GUID, "sparvhba" },
43         {}
44 };
45
46 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
47 MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
48
49 struct visordisk_info {
50         u32 valid;
51         /* Disk Path */
52         u32 channel, id, lun;
53         atomic_t ios_threshold;
54         atomic_t error_count;
55         struct visordisk_info *next;
56 };
57
58 struct scsipending {
59         struct uiscmdrsp cmdrsp;
60         /* The Data being tracked */
61         void *sent;
62         /* Type of pointer that is being stored */
63         char cmdtype;
64 };
65
66 /* Each scsi_host has a host_data area that contains this struct. */
67 struct visorhba_devdata {
68         struct Scsi_Host *scsihost;
69         struct visor_device *dev;
70         struct list_head dev_info_list;
71         /* Tracks the requests that have been forwarded to
72          * the IOVM and haven't returned yet
73          */
74         struct scsipending pending[MAX_PENDING_REQUESTS];
75         /* Start search for next pending free slot here */
76         unsigned int nextinsert;
77         /* lock to protect data in devdata */
78         spinlock_t privlock;
79         bool serverdown;
80         bool serverchangingstate;
81         unsigned long long acquire_failed_cnt;
82         unsigned long long interrupts_rcvd;
83         unsigned long long interrupts_notme;
84         unsigned long long interrupts_disabled;
85         u64 __iomem *flags_addr;
86         atomic_t interrupt_rcvd;
87         wait_queue_head_t rsp_queue;
88         struct visordisk_info head;
89         unsigned int max_buff_len;
90         int devnum;
91         struct task_struct *thread;
92         int thread_wait_ms;
93
94         /*
95          * allows us to pass int handles back-and-forth between us and
96          * iovm, instead of raw pointers
97          */
98         struct idr idr;
99
100         struct dentry *debugfs_dir;
101         struct dentry *debugfs_info;
102 };
103
104 struct visorhba_devices_open {
105         struct visorhba_devdata *devdata;
106 };
107
108 #define for_each_vdisk_match(iter, list, match) \
109         for (iter = &list->head; iter->next; iter = iter->next) \
110                 if ((iter->channel == match->channel) && \
111                     (iter->id == match->id) && \
112                     (iter->lun == match->lun))
113
114 /*
115  * visor_thread_start - Starts a thread for the device
116  * @threadfn:   Function the thread starts
117  * @thrcontext: Context to pass to the thread, i.e. devdata
118  * @name:       String describing name of thread
119  *
120  * Starts a thread for the device.
121  *
122  * Return: The task_struct * denoting the thread on success,
123  *         or NULL on failure
124  */
125 static struct task_struct *visor_thread_start(int (*threadfn)(void *),
126                                               void *thrcontext, char *name)
127 {
128         struct task_struct *task;
129
130         task = kthread_run(threadfn, thrcontext, "%s", name);
131         if (IS_ERR(task)) {
132                 pr_err("visorbus failed to start thread\n");
133                 return NULL;
134         }
135         return task;
136 }
137
138 /*
139  * visor_thread_stop - Stops the thread if it is running
140  * @task: Description of process to stop
141  */
142 static void visor_thread_stop(struct task_struct *task)
143 {
144         kthread_stop(task);
145 }
146
147 /*
148  * add_scsipending_entry - Save off io command that is pending in
149  *                         Service Partition
150  * @devdata: Pointer to devdata
151  * @cmdtype: Specifies the type of command pending
152  * @new:     The command to be saved
153  *
154  * Saves off the io command that is being handled by the Service
155  * Partition so that it can be handled when it completes. If new is
156  * NULL it is assumed the entry refers only to the cmdrsp.
157  *
158  * Return: Insert_location where entry was added on success,
159  *         -EBUSY if it can't
160  */
161 static int add_scsipending_entry(struct visorhba_devdata *devdata,
162                                  char cmdtype, void *new)
163 {
164         unsigned long flags;
165         struct scsipending *entry;
166         int insert_location;
167
168         spin_lock_irqsave(&devdata->privlock, flags);
169         insert_location = devdata->nextinsert;
170         while (devdata->pending[insert_location].sent) {
171                 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
172                 if (insert_location == (int)devdata->nextinsert) {
173                         spin_unlock_irqrestore(&devdata->privlock, flags);
174                         return -EBUSY;
175                 }
176         }
177
178         entry = &devdata->pending[insert_location];
179         memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
180         entry->cmdtype = cmdtype;
181         if (new)
182                 entry->sent = new;
183         /* wants to send cmdrsp */
184         else
185                 entry->sent = &entry->cmdrsp;
186         devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
187         spin_unlock_irqrestore(&devdata->privlock, flags);
188
189         return insert_location;
190 }
191
192 /*
193  * del_scsipending_ent - Removes an entry from the pending array
194  * @devdata: Device holding the pending array
195  * @del:     Entry to remove
196  *
197  * Removes the entry pointed at by del and returns it.
198  *
199  * Return: The scsipending entry pointed to on success, NULL on failure
200  */
201 static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
202 {
203         unsigned long flags;
204         void *sent;
205
206         if (del >= MAX_PENDING_REQUESTS)
207                 return NULL;
208
209         spin_lock_irqsave(&devdata->privlock, flags);
210         sent = devdata->pending[del].sent;
211         devdata->pending[del].cmdtype = 0;
212         devdata->pending[del].sent = NULL;
213         spin_unlock_irqrestore(&devdata->privlock, flags);
214
215         return sent;
216 }
217
218 /*
219  * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
220  * @ddata: Device holding the pending array
221  * @ent:   Entry that stores the cmdrsp
222  *
223  * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
224  * if the "sent" field is not NULL.
225  *
226  * Return: A pointer to the cmdrsp, NULL on failure
227  */
228 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
229                                                 int ent)
230 {
231         if (ddata->pending[ent].sent)
232                 return &ddata->pending[ent].cmdrsp;
233
234         return NULL;
235 }
236
237 /*
238  * simple_idr_get - Associate a provided pointer with an int value
239  *                  1 <= value <= INT_MAX, and return this int value;
240  *                  the pointer value can be obtained later by passing
241  *                  this int value to idr_find()
242  * @idrtable: The data object maintaining the pointer<-->int mappings
243  * @p:        The pointer value to be remembered
244  * @lock:     A spinlock used when exclusive access to idrtable is needed
245  *
246  * Return: The id number mapped to pointer 'p', 0 on failure
247  */
248 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
249                                    spinlock_t *lock)
250 {
251         int id;
252         unsigned long flags;
253
254         idr_preload(GFP_KERNEL);
255         spin_lock_irqsave(lock, flags);
256         id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
257         spin_unlock_irqrestore(lock, flags);
258         idr_preload_end();
259         /* failure */
260         if (id < 0)
261                 return 0;
262         /* idr_alloc() guarantees > 0 */
263         return (unsigned int)(id);
264 }
265
266 /*
267  * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
268  *                              completion processing logic for a taskmgmt
269  *                              cmd will be able to find who to wake up
270  *                              and where to stash the result
271  * @idrtable: The data object maintaining the pointer<-->int mappings
272  * @lock:     A spinlock used when exclusive access to idrtable is needed
273  * @cmdrsp:   Response from the IOVM
274  * @event:    The event handle to associate with an id
275  * @result:   The location to place the result of the event handle into
276  */
277 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
278                                        struct uiscmdrsp *cmdrsp,
279                                        wait_queue_head_t *event, int *result)
280 {
281         /* specify the event that has to be triggered when this */
282         /* cmd is complete */
283         cmdrsp->scsitaskmgmt.notify_handle =
284                 simple_idr_get(idrtable, event, lock);
285         cmdrsp->scsitaskmgmt.notifyresult_handle =
286                 simple_idr_get(idrtable, result, lock);
287 }
288
289 /*
290  * cleanup_scsitaskmgmt_handles - Forget handles created by
291  *                                setup_scsitaskmgmt_handles()
292  * @idrtable: The data object maintaining the pointer<-->int mappings
293  * @cmdrsp:   Response from the IOVM
294  */
295 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
296                                          struct uiscmdrsp *cmdrsp)
297 {
298         if (cmdrsp->scsitaskmgmt.notify_handle)
299                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
300         if (cmdrsp->scsitaskmgmt.notifyresult_handle)
301                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
302 }
303
304 /*
305  * forward_taskmgmt_command - Send taskmegmt command to the Service
306  *                            Partition
307  * @tasktype: Type of taskmgmt command
308  * @scsidev:  Scsidev that issued command
309  *
310  * Create a cmdrsp packet and send it to the Serivce Partition
311  * that will service this request.
312  *
313  * Return: Int representing whether command was queued successfully or not
314  */
315 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
316                                     struct scsi_cmnd *scsicmd)
317 {
318         struct uiscmdrsp *cmdrsp;
319         struct scsi_device *scsidev = scsicmd->device;
320         struct visorhba_devdata *devdata =
321                 (struct visorhba_devdata *)scsidev->host->hostdata;
322         int notifyresult = 0xffff;
323         wait_queue_head_t notifyevent;
324         int scsicmd_id = 0;
325
326         if (devdata->serverdown || devdata->serverchangingstate)
327                 return FAILED;
328
329         scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
330                                            NULL);
331         if (scsicmd_id < 0)
332                 return FAILED;
333
334         cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
335
336         init_waitqueue_head(&notifyevent);
337
338         /* issue TASK_MGMT_ABORT_TASK */
339         cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
340         setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
341                                    &notifyevent, &notifyresult);
342
343         /* save destination */
344         cmdrsp->scsitaskmgmt.tasktype = tasktype;
345         cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
346         cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
347         cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
348         cmdrsp->scsitaskmgmt.handle = scsicmd_id;
349
350         dev_dbg(&scsidev->sdev_gendev,
351                 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
352         if (visorchannel_signalinsert(devdata->dev->visorchannel,
353                                       IOCHAN_TO_IOPART,
354                                       cmdrsp))
355                 goto err_del_scsipending_ent;
356
357         /* It can take the Service Partition up to 35 seconds to complete
358          * an IO in some cases, so wait 45 seconds and error out
359          */
360         if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
361                                 msecs_to_jiffies(45000)))
362                 goto err_del_scsipending_ent;
363
364         dev_dbg(&scsidev->sdev_gendev,
365                 "visorhba: taskmgmt type=%d success; result=0x%x\n",
366                  tasktype, notifyresult);
367         if (tasktype == TASK_MGMT_ABORT_TASK)
368                 scsicmd->result = DID_ABORT << 16;
369         else
370                 scsicmd->result = DID_RESET << 16;
371
372         scsicmd->scsi_done(scsicmd);
373         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
374         return SUCCESS;
375
376 err_del_scsipending_ent:
377         dev_dbg(&scsidev->sdev_gendev,
378                 "visorhba: taskmgmt type=%d not executed\n", tasktype);
379         del_scsipending_ent(devdata, scsicmd_id);
380         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
381         return FAILED;
382 }
383
384 /*
385  * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
386  * @scsicmd: The scsicmd that needs aborted
387  *
388  * Return: SUCCESS if inserted, FAILED otherwise
389  */
390 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
391 {
392         /* issue TASK_MGMT_ABORT_TASK */
393         struct scsi_device *scsidev;
394         struct visordisk_info *vdisk;
395         struct visorhba_devdata *devdata;
396
397         scsidev = scsicmd->device;
398         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
399         for_each_vdisk_match(vdisk, devdata, scsidev) {
400                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
401                         atomic_inc(&vdisk->error_count);
402                 else
403                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
404         }
405         return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
406 }
407
408 /*
409  * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
410  * @scsicmd: The scsicmd that needs aborted
411  *
412  * Return: SUCCESS if inserted, FAILED otherwise
413  */
414 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
415 {
416         /* issue TASK_MGMT_LUN_RESET */
417         struct scsi_device *scsidev;
418         struct visordisk_info *vdisk;
419         struct visorhba_devdata *devdata;
420
421         scsidev = scsicmd->device;
422         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
423         for_each_vdisk_match(vdisk, devdata, scsidev) {
424                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
425                         atomic_inc(&vdisk->error_count);
426                 else
427                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
428         }
429         return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
430 }
431
432 /*
433  * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
434  *                              target on the bus
435  * @scsicmd: The scsicmd that needs aborted
436  *
437  * Return: SUCCESS if inserted, FAILED otherwise
438  */
439 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
440 {
441         struct scsi_device *scsidev;
442         struct visordisk_info *vdisk;
443         struct visorhba_devdata *devdata;
444
445         scsidev = scsicmd->device;
446         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
447         for_each_vdisk_match(vdisk, devdata, scsidev) {
448                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
449                         atomic_inc(&vdisk->error_count);
450                 else
451                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
452         }
453         return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
454 }
455
456 /*
457  * visorhba_host_reset_handler - Not supported
458  * @scsicmd: The scsicmd that needs to be aborted
459  *
460  * Return: Not supported, return SUCCESS
461  */
462 static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
463 {
464         /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
465         return SUCCESS;
466 }
467
468 /*
469  * visorhba_get_info - Get information about SCSI device
470  * @shp: Scsi host that is requesting information
471  *
472  * Return: String with visorhba information
473  */
474 static const char *visorhba_get_info(struct Scsi_Host *shp)
475 {
476         /* Return version string */
477         return "visorhba";
478 }
479
480 /*
481  * dma_data_dir_linux_to_spar - convert dma_data_direction value to
482  *                              Unisys-specific equivalent
483  * @d: dma direction value to convert
484  *
485  * Returns the Unisys-specific dma direction value corresponding to @d
486  */
487 static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
488 {
489         switch (d) {
490         case DMA_BIDIRECTIONAL:
491                 return UIS_DMA_BIDIRECTIONAL;
492         case DMA_TO_DEVICE:
493                 return UIS_DMA_TO_DEVICE;
494         case DMA_FROM_DEVICE:
495                 return UIS_DMA_FROM_DEVICE;
496         case DMA_NONE:
497                 return UIS_DMA_NONE;
498         default:
499                 return UIS_DMA_NONE;
500         }
501 }
502
503 /*
504  * visorhba_queue_command_lck - Queues command to the Service Partition
505  * @scsicmd:            Command to be queued
506  * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
507  *
508  * Queues to scsicmd to the ServicePartition after converting it to a
509  * uiscmdrsp structure.
510  *
511  * Return: 0 if successfully queued to the Service Partition, otherwise
512  *         error code
513  */
514 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
515                                       void (*visorhba_cmnd_done)
516                                            (struct scsi_cmnd *))
517 {
518         struct uiscmdrsp *cmdrsp;
519         struct scsi_device *scsidev = scsicmd->device;
520         int insert_location;
521         unsigned char *cdb = scsicmd->cmnd;
522         struct Scsi_Host *scsihost = scsidev->host;
523         unsigned int i;
524         struct visorhba_devdata *devdata =
525                 (struct visorhba_devdata *)scsihost->hostdata;
526         struct scatterlist *sg = NULL;
527         struct scatterlist *sglist = NULL;
528
529         if (devdata->serverdown || devdata->serverchangingstate)
530                 return SCSI_MLQUEUE_DEVICE_BUSY;
531
532         insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
533                                                 (void *)scsicmd);
534         if (insert_location < 0)
535                 return SCSI_MLQUEUE_DEVICE_BUSY;
536
537         cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
538         cmdrsp->cmdtype = CMD_SCSI_TYPE;
539         /* save the pending insertion location. Deletion from pending
540          * will return the scsicmd pointer for completion
541          */
542         cmdrsp->scsi.handle = insert_location;
543
544         /* save done function that we have call when cmd is complete */
545         scsicmd->scsi_done = visorhba_cmnd_done;
546         /* save destination */
547         cmdrsp->scsi.vdest.channel = scsidev->channel;
548         cmdrsp->scsi.vdest.id = scsidev->id;
549         cmdrsp->scsi.vdest.lun = scsidev->lun;
550         /* save datadir */
551         cmdrsp->scsi.data_dir =
552                 dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
553         memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
554         cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
555
556         /* keep track of the max buffer length so far. */
557         if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
558                 devdata->max_buff_len = cmdrsp->scsi.bufflen;
559
560         if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
561                 goto err_del_scsipending_ent;
562
563         /* convert buffer to phys information  */
564         /* buffer is scatterlist - copy it out */
565         sglist = scsi_sglist(scsicmd);
566
567         for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
568                 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
569                 cmdrsp->scsi.gpi_list[i].length = sg->length;
570         }
571         cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
572
573         if (visorchannel_signalinsert(devdata->dev->visorchannel,
574                                       IOCHAN_TO_IOPART,
575                                       cmdrsp))
576                 /* queue must be full and we aren't going to wait */
577                 goto err_del_scsipending_ent;
578
579         return 0;
580
581 err_del_scsipending_ent:
582         del_scsipending_ent(devdata, insert_location);
583         return SCSI_MLQUEUE_DEVICE_BUSY;
584 }
585
586 #ifdef DEF_SCSI_QCMD
587 static DEF_SCSI_QCMD(visorhba_queue_command)
588 #else
589 #define visorhba_queue_command visorhba_queue_command_lck
590 #endif
591
592 /*
593  * visorhba_slave_alloc - Called when new disk is discovered
594  * @scsidev: New disk
595  *
596  * Create a new visordisk_info structure and add it to our
597  * list of vdisks.
598  *
599  * Return: 0 on success, -ENOMEM on failure.
600  */
601 static int visorhba_slave_alloc(struct scsi_device *scsidev)
602 {
603         /* this is called by the midlayer before scan for new devices --
604          * LLD can alloc any struct & do init if needed.
605          */
606         struct visordisk_info *vdisk;
607         struct visordisk_info *tmpvdisk;
608         struct visorhba_devdata *devdata;
609         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
610
611         /* even though we errored, treat as success */
612         devdata = (struct visorhba_devdata *)scsihost->hostdata;
613         if (!devdata)
614                 return 0;
615
616         /* already allocated return success */
617         for_each_vdisk_match(vdisk, devdata, scsidev)
618                 return 0;
619
620         tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
621         if (!tmpvdisk)
622                 return -ENOMEM;
623
624         tmpvdisk->channel = scsidev->channel;
625         tmpvdisk->id = scsidev->id;
626         tmpvdisk->lun = scsidev->lun;
627         vdisk->next = tmpvdisk;
628         return 0;
629 }
630
631 /*
632  * visorhba_slave_destroy - Disk is going away, clean up resources.
633  * @scsidev: Scsi device to destroy
634  */
635 static void visorhba_slave_destroy(struct scsi_device *scsidev)
636 {
637         /* midlevel calls this after device has been quiesced and
638          * before it is to be deleted.
639          */
640         struct visordisk_info *vdisk, *delvdisk;
641         struct visorhba_devdata *devdata;
642         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
643
644         devdata = (struct visorhba_devdata *)scsihost->hostdata;
645         for_each_vdisk_match(vdisk, devdata, scsidev) {
646                 delvdisk = vdisk->next;
647                 vdisk->next = delvdisk->next;
648                 kfree(delvdisk);
649                 return;
650         }
651 }
652
653 static struct scsi_host_template visorhba_driver_template = {
654         .name = "Unisys Visor HBA",
655         .info = visorhba_get_info,
656         .queuecommand = visorhba_queue_command,
657         .eh_abort_handler = visorhba_abort_handler,
658         .eh_device_reset_handler = visorhba_device_reset_handler,
659         .eh_bus_reset_handler = visorhba_bus_reset_handler,
660         .eh_host_reset_handler = visorhba_host_reset_handler,
661         .shost_attrs = NULL,
662 #define visorhba_MAX_CMNDS 128
663         .can_queue = visorhba_MAX_CMNDS,
664         .sg_tablesize = 64,
665         .this_id = -1,
666         .slave_alloc = visorhba_slave_alloc,
667         .slave_destroy = visorhba_slave_destroy,
668         .use_clustering = ENABLE_CLUSTERING,
669 };
670
671 /*
672  * info_debugfs_show - Debugfs interface to dump visorhba states
673  * @seq: The sequence file to write information to
674  * @v:   Unused, but needed for use with seq file single_open invocation
675  *
676  * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
677  *
678  * Return: SUCCESS
679  */
680 static int info_debugfs_show(struct seq_file *seq, void *v)
681 {
682         struct visorhba_devdata *devdata = seq->private;
683
684         seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
685         seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
686         seq_printf(seq, "interrupts_disabled = %llu\n",
687                    devdata->interrupts_disabled);
688         seq_printf(seq, "interrupts_notme = %llu\n",
689                    devdata->interrupts_notme);
690         seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
691         if (devdata->flags_addr) {
692                 u64 phys_flags_addr =
693                         virt_to_phys((__force  void *)devdata->flags_addr);
694                 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
695                            phys_flags_addr);
696                 seq_printf(seq, "FeatureFlags = %llu\n",
697                            (u64)readq(devdata->flags_addr));
698         }
699         seq_printf(seq, "acquire_failed_cnt = %llu\n",
700                    devdata->acquire_failed_cnt);
701
702         return 0;
703 }
704
705 static int info_debugfs_open(struct inode *inode, struct file *file)
706 {
707         return single_open(file, info_debugfs_show, inode->i_private);
708 }
709
710 static const struct file_operations info_debugfs_fops = {
711         .owner = THIS_MODULE,
712         .open = info_debugfs_open,
713         .read = seq_read,
714         .llseek = seq_lseek,
715         .release = single_release,
716 };
717
718 /*
719  * complete_taskmgmt_command - Complete task management
720  * @idrtable: The data object maintaining the pointer<-->int mappings
721  * @cmdrsp:   Response from the IOVM
722  * @result:   The result of the task management command
723  *
724  * Service Partition returned the result of the task management
725  * command. Wake up anyone waiting for it.
726  */
727 static void complete_taskmgmt_command(struct idr *idrtable,
728                                       struct uiscmdrsp *cmdrsp, int result)
729 {
730         wait_queue_head_t *wq =
731                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
732         int *scsi_result_ptr =
733                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
734         if (unlikely(!(wq && scsi_result_ptr))) {
735                 pr_err("visorhba: no completion context; cmd will time out\n");
736                 return;
737         }
738
739         /* copy the result of the taskmgmt and
740          * wake up the error handler that is waiting for this
741          */
742         pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
743         *scsi_result_ptr = result;
744         wake_up_all(wq);
745 }
746
747 /*
748  * visorhba_serverdown_complete - Called when we are done cleaning up
749  *                                from serverdown
750  * @devdata: Visorhba instance on which to complete serverdown
751  *
752  * Called when we are done cleanning up from serverdown, stop processing
753  * queue, fail pending IOs.
754  */
755 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
756 {
757         int i;
758         struct scsipending *pendingdel = NULL;
759         struct scsi_cmnd *scsicmd = NULL;
760         struct uiscmdrsp *cmdrsp;
761         unsigned long flags;
762
763         /* Stop using the IOVM response queue (queue should be drained
764          * by the end)
765          */
766         visor_thread_stop(devdata->thread);
767
768         /* Fail commands that weren't completed */
769         spin_lock_irqsave(&devdata->privlock, flags);
770         for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
771                 pendingdel = &devdata->pending[i];
772                 switch (pendingdel->cmdtype) {
773                 case CMD_SCSI_TYPE:
774                         scsicmd = pendingdel->sent;
775                         scsicmd->result = DID_RESET << 16;
776                         if (scsicmd->scsi_done)
777                                 scsicmd->scsi_done(scsicmd);
778                         break;
779                 case CMD_SCSITASKMGMT_TYPE:
780                         cmdrsp = pendingdel->sent;
781                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
782                                                   TASK_MGMT_FAILED);
783                         break;
784                 default:
785                         break;
786                 }
787                 pendingdel->cmdtype = 0;
788                 pendingdel->sent = NULL;
789         }
790         spin_unlock_irqrestore(&devdata->privlock, flags);
791
792         devdata->serverdown = true;
793         devdata->serverchangingstate = false;
794 }
795
796 /*
797  * visorhba_serverdown - Got notified that the IOVM is down
798  * @devdata: Visorhba that is being serviced by downed IOVM
799  *
800  * Something happened to the IOVM, return immediately and
801  * schedule cleanup work.
802  *
803  * Return: 0 on success, -EINVAL on failure
804  */
805 static int visorhba_serverdown(struct visorhba_devdata *devdata)
806 {
807         if (!devdata->serverdown && !devdata->serverchangingstate) {
808                 devdata->serverchangingstate = true;
809                 visorhba_serverdown_complete(devdata);
810         } else if (devdata->serverchangingstate) {
811                 return -EINVAL;
812         }
813         return 0;
814 }
815
816 /*
817  * do_scsi_linuxstat - Scsi command returned linuxstat
818  * @cmdrsp:  Response from IOVM
819  * @scsicmd: Command issued
820  *
821  * Don't log errors for disk-not-present inquiries.
822  */
823 static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
824                               struct scsi_cmnd *scsicmd)
825 {
826         struct visorhba_devdata *devdata;
827         struct visordisk_info *vdisk;
828         struct scsi_device *scsidev;
829
830         scsidev = scsicmd->device;
831         memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
832
833         /* Do not log errors for disk-not-present inquiries */
834         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
835             (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
836             (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
837                 return;
838         /* Okay see what our error_count is here.... */
839         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
840         for_each_vdisk_match(vdisk, devdata, scsidev) {
841                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
842                         atomic_inc(&vdisk->error_count);
843                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
844                 }
845         }
846 }
847
848 static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
849                                       bool is_lun0)
850 {
851         if (len < NO_DISK_INQUIRY_RESULT_LEN)
852                 return -EINVAL;
853         memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
854         buf[2] = SCSI_SPC2_VER;
855         if (is_lun0) {
856                 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
857                 buf[3] = DEV_HISUPPORT;
858         } else {
859                 buf[0] = DEV_NOT_CAPABLE;
860         }
861         buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
862         strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
863         return 0;
864 }
865
866 /*
867  * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
868  * @cmdrsp:  Response from IOVM
869  * @scsicmd: Command issued
870  *
871  * Handle response when no linuxstat was returned.
872  */
873 static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
874                                 struct scsi_cmnd *scsicmd)
875 {
876         struct scsi_device *scsidev;
877         unsigned char *buf;
878         struct scatterlist *sg;
879         unsigned int i;
880         char *this_page;
881         char *this_page_orig;
882         int bufind = 0;
883         struct visordisk_info *vdisk;
884         struct visorhba_devdata *devdata;
885
886         scsidev = scsicmd->device;
887         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
888             (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
889                 if (cmdrsp->scsi.no_disk_result == 0)
890                         return;
891
892                 buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
893                 if (!buf)
894                         return;
895
896                 /* Linux scsi code wants a device at Lun 0
897                  * to issue report luns, but we don't want
898                  * a disk there so we'll present a processor
899                  * there.
900                  */
901                 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
902                                            scsidev->lun == 0);
903
904                 if (scsi_sg_count(scsicmd) == 0) {
905                         memcpy(scsi_sglist(scsicmd), buf,
906                                cmdrsp->scsi.bufflen);
907                         kfree(buf);
908                         return;
909                 }
910
911                 sg = scsi_sglist(scsicmd);
912                 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
913                         this_page_orig = kmap_atomic(sg_page(sg + i));
914                         this_page = (void *)((unsigned long)this_page_orig |
915                                              sg[i].offset);
916                         memcpy(this_page, buf + bufind, sg[i].length);
917                         kunmap_atomic(this_page_orig);
918                 }
919                 kfree(buf);
920         } else {
921                 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
922                 for_each_vdisk_match(vdisk, devdata, scsidev) {
923                         if (atomic_read(&vdisk->ios_threshold) > 0) {
924                                 atomic_dec(&vdisk->ios_threshold);
925                                 if (atomic_read(&vdisk->ios_threshold) == 0)
926                                         atomic_set(&vdisk->error_count, 0);
927                         }
928                 }
929         }
930 }
931
932 /*
933  * complete_scsi_command - Complete a scsi command
934  * @uiscmdrsp: Response from Service Partition
935  * @scsicmd:   The scsi command
936  *
937  * Response was returned by the Service Partition. Finish it and send
938  * completion to the scsi midlayer.
939  */
940 static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
941                                   struct scsi_cmnd *scsicmd)
942 {
943         /* take what we need out of cmdrsp and complete the scsicmd */
944         scsicmd->result = cmdrsp->scsi.linuxstat;
945         if (cmdrsp->scsi.linuxstat)
946                 do_scsi_linuxstat(cmdrsp, scsicmd);
947         else
948                 do_scsi_nolinuxstat(cmdrsp, scsicmd);
949
950         scsicmd->scsi_done(scsicmd);
951 }
952
953 /*
954  * drain_queue - Pull responses out of iochannel
955  * @cmdrsp:  Response from the IOSP
956  * @devdata: Device that owns this iochannel
957  *
958  * Pulls responses out of the iochannel and process the responses.
959  */
960 static void drain_queue(struct uiscmdrsp *cmdrsp,
961                         struct visorhba_devdata *devdata)
962 {
963         struct scsi_cmnd *scsicmd;
964
965         while (1) {
966                 /* queue empty */
967                 if (visorchannel_signalremove(devdata->dev->visorchannel,
968                                               IOCHAN_FROM_IOPART,
969                                               cmdrsp))
970                         break;
971                 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
972                         /* scsicmd location is returned by the
973                          * deletion
974                          */
975                         scsicmd = del_scsipending_ent(devdata,
976                                                       cmdrsp->scsi.handle);
977                         if (!scsicmd)
978                                 break;
979                         /* complete the orig cmd */
980                         complete_scsi_command(cmdrsp, scsicmd);
981                 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
982                         if (!del_scsipending_ent(devdata,
983                                                  cmdrsp->scsitaskmgmt.handle))
984                                 break;
985                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
986                                                   cmdrsp->scsitaskmgmt.result);
987                 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
988                         dev_err_once(&devdata->dev->device,
989                                      "ignoring unsupported NOTIFYGUEST\n");
990                 /* cmdrsp is now available for re-use */
991         }
992 }
993
994 /*
995  * process_incoming_rsps - Process responses from IOSP
996  * @v:  Void pointer to visorhba_devdata
997  *
998  * Main function for the thread that processes the responses
999  * from the IO Service Partition. When the queue is empty, wait
1000  * to check to see if it is full again.
1001  *
1002  * Return: 0 on success, -ENOMEM on failure
1003  */
1004 static int process_incoming_rsps(void *v)
1005 {
1006         struct visorhba_devdata *devdata = v;
1007         struct uiscmdrsp *cmdrsp = NULL;
1008         const int size = sizeof(*cmdrsp);
1009
1010         cmdrsp = kmalloc(size, GFP_ATOMIC);
1011         if (!cmdrsp)
1012                 return -ENOMEM;
1013
1014         while (1) {
1015                 if (kthread_should_stop())
1016                         break;
1017                 wait_event_interruptible_timeout(
1018                         devdata->rsp_queue, (atomic_read(
1019                                              &devdata->interrupt_rcvd) == 1),
1020                                 msecs_to_jiffies(devdata->thread_wait_ms));
1021                 /* drain queue */
1022                 drain_queue(cmdrsp, devdata);
1023         }
1024         kfree(cmdrsp);
1025         return 0;
1026 }
1027
1028 /*
1029  * visorhba_pause - Function to handle visorbus pause messages
1030  * @dev:           Device that is pausing
1031  * @complete_func: Function to call when finished
1032  *
1033  * Something has happened to the IO Service Partition that is
1034  * handling this device. Quiet this device and reset commands
1035  * so that the Service Partition can be corrected.
1036  *
1037  * Return: SUCCESS
1038  */
1039 static int visorhba_pause(struct visor_device *dev,
1040                           visorbus_state_complete_func complete_func)
1041 {
1042         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1043
1044         visorhba_serverdown(devdata);
1045         complete_func(dev, 0);
1046         return 0;
1047 }
1048
1049 /*
1050  * visorhba_resume - Function called when the IO Service Partition is back
1051  * @dev:           Device that is pausing
1052  * @complete_func: Function to call when finished
1053  *
1054  * Yay! The IO Service Partition is back, the channel has been wiped
1055  * so lets re-establish connection and start processing responses.
1056  *
1057  * Return: 0 on success, -EINVAL on failure
1058  */
1059 static int visorhba_resume(struct visor_device *dev,
1060                            visorbus_state_complete_func complete_func)
1061 {
1062         struct visorhba_devdata *devdata;
1063
1064         devdata = dev_get_drvdata(&dev->device);
1065         if (!devdata)
1066                 return -EINVAL;
1067
1068         if (devdata->serverdown && !devdata->serverchangingstate)
1069                 devdata->serverchangingstate = true;
1070
1071         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1072                                              "vhba_incming");
1073         devdata->serverdown = false;
1074         devdata->serverchangingstate = false;
1075
1076         return 0;
1077 }
1078
1079 /*
1080  * visorhba_probe - Device has been discovered; do acquire
1081  * @dev: visor_device that was discovered
1082  *
1083  * A new HBA was discovered; do the initial connections of it.
1084  *
1085  * Return: 0 on success, otherwise error code
1086  */
1087 static int visorhba_probe(struct visor_device *dev)
1088 {
1089         struct Scsi_Host *scsihost;
1090         struct vhba_config_max max;
1091         struct visorhba_devdata *devdata = NULL;
1092         int err, channel_offset;
1093         u64 features;
1094
1095         scsihost = scsi_host_alloc(&visorhba_driver_template,
1096                                    sizeof(*devdata));
1097         if (!scsihost)
1098                 return -ENODEV;
1099
1100         channel_offset = offsetof(struct visor_io_channel, vhba.max);
1101         err = visorbus_read_channel(dev, channel_offset, &max,
1102                                     sizeof(struct vhba_config_max));
1103         if (err < 0)
1104                 goto err_scsi_host_put;
1105
1106         scsihost->max_id = (unsigned int)max.max_id;
1107         scsihost->max_lun = (unsigned int)max.max_lun;
1108         scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1109         scsihost->max_sectors =
1110             (unsigned short)(max.max_io_size >> 9);
1111         scsihost->sg_tablesize =
1112             (unsigned short)(max.max_io_size / PAGE_SIZE);
1113         if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1114                 scsihost->sg_tablesize = MAX_PHYS_INFO;
1115         err = scsi_add_host(scsihost, &dev->device);
1116         if (err < 0)
1117                 goto err_scsi_host_put;
1118
1119         devdata = (struct visorhba_devdata *)scsihost->hostdata;
1120         devdata->dev = dev;
1121         dev_set_drvdata(&dev->device, devdata);
1122
1123         devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1124                                                   visorhba_debugfs_dir);
1125         if (!devdata->debugfs_dir) {
1126                 err = -ENOMEM;
1127                 goto err_scsi_remove_host;
1128         }
1129         devdata->debugfs_info =
1130                 debugfs_create_file("info", 0440,
1131                                     devdata->debugfs_dir, devdata,
1132                                     &info_debugfs_fops);
1133         if (!devdata->debugfs_info) {
1134                 err = -ENOMEM;
1135                 goto err_debugfs_dir;
1136         }
1137
1138         init_waitqueue_head(&devdata->rsp_queue);
1139         spin_lock_init(&devdata->privlock);
1140         devdata->serverdown = false;
1141         devdata->serverchangingstate = false;
1142         devdata->scsihost = scsihost;
1143
1144         channel_offset = offsetof(struct visor_io_channel,
1145                                   channel_header.features);
1146         err = visorbus_read_channel(dev, channel_offset, &features, 8);
1147         if (err)
1148                 goto err_debugfs_info;
1149         features |= VISOR_CHANNEL_IS_POLLING;
1150         err = visorbus_write_channel(dev, channel_offset, &features, 8);
1151         if (err)
1152                 goto err_debugfs_info;
1153
1154         idr_init(&devdata->idr);
1155
1156         devdata->thread_wait_ms = 2;
1157         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1158                                              "vhba_incoming");
1159
1160         scsi_scan_host(scsihost);
1161
1162         return 0;
1163
1164 err_debugfs_info:
1165         debugfs_remove(devdata->debugfs_info);
1166
1167 err_debugfs_dir:
1168         debugfs_remove_recursive(devdata->debugfs_dir);
1169
1170 err_scsi_remove_host:
1171         scsi_remove_host(scsihost);
1172
1173 err_scsi_host_put:
1174         scsi_host_put(scsihost);
1175         return err;
1176 }
1177
1178 /*
1179  * visorhba_remove - Remove a visorhba device
1180  * @dev: Device to remove
1181  *
1182  * Removes the visorhba device.
1183  */
1184 static void visorhba_remove(struct visor_device *dev)
1185 {
1186         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1187         struct Scsi_Host *scsihost = NULL;
1188
1189         if (!devdata)
1190                 return;
1191
1192         scsihost = devdata->scsihost;
1193         visor_thread_stop(devdata->thread);
1194         scsi_remove_host(scsihost);
1195         scsi_host_put(scsihost);
1196
1197         idr_destroy(&devdata->idr);
1198
1199         dev_set_drvdata(&dev->device, NULL);
1200         debugfs_remove(devdata->debugfs_info);
1201         debugfs_remove_recursive(devdata->debugfs_dir);
1202 }
1203
1204 /* This is used to tell the visorbus driver which types of visor devices
1205  * we support, and what functions to call when a visor device that we support
1206  * is attached or removed.
1207  */
1208 static struct visor_driver visorhba_driver = {
1209         .name = "visorhba",
1210         .owner = THIS_MODULE,
1211         .channel_types = visorhba_channel_types,
1212         .probe = visorhba_probe,
1213         .remove = visorhba_remove,
1214         .pause = visorhba_pause,
1215         .resume = visorhba_resume,
1216         .channel_interrupt = NULL,
1217 };
1218
1219 /*
1220  * visorhba_init - Driver init routine
1221  *
1222  * Initialize the visorhba driver and register it with visorbus
1223  * to handle s-Par virtual host bus adapter.
1224  *
1225  * Return: 0 on success, error code otherwise
1226  */
1227 static int visorhba_init(void)
1228 {
1229         int rc = -ENOMEM;
1230
1231         visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1232         if (!visorhba_debugfs_dir)
1233                 return -ENOMEM;
1234
1235         rc = visorbus_register_visor_driver(&visorhba_driver);
1236         if (rc)
1237                 goto cleanup_debugfs;
1238
1239         return 0;
1240
1241 cleanup_debugfs:
1242         debugfs_remove_recursive(visorhba_debugfs_dir);
1243
1244         return rc;
1245 }
1246
1247 /*
1248  * visorhba_exit - Driver exit routine
1249  *
1250  * Unregister driver from the bus and free up memory.
1251  */
1252 static void visorhba_exit(void)
1253 {
1254         visorbus_unregister_visor_driver(&visorhba_driver);
1255         debugfs_remove_recursive(visorhba_debugfs_dir);
1256 }
1257
1258 module_init(visorhba_init);
1259 module_exit(visorhba_exit);
1260
1261 MODULE_AUTHOR("Unisys");
1262 MODULE_LICENSE("GPL");
1263 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");