USB: serial: drop bogus to_usb_serial_port() checks
[sfrench/cifs-2.6.git] / drivers / scsi / ibmvscsi / ibmvfc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4  *
5  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) IBM Corporation, 2008
8  */
9
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/kthread.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/pm.h>
20 #include <linux/stringify.h>
21 #include <linux/bsg-lib.h>
22 #include <asm/firmware.h>
23 #include <asm/irq.h>
24 #include <asm/vio.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi_bsg_fc.h>
32 #include "ibmvfc.h"
33
34 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
35 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
36 static u64 max_lun = IBMVFC_MAX_LUN;
37 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
38 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
39 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
40 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
41 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
42 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
43 static LIST_HEAD(ibmvfc_head);
44 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
45 static struct scsi_transport_template *ibmvfc_transport_template;
46
47 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
48 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
49 MODULE_LICENSE("GPL");
50 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
51
52 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
53 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
54                  "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
55 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(default_timeout,
57                  "Default timeout in seconds for initialization and EH commands. "
58                  "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
59 module_param_named(max_requests, max_requests, uint, S_IRUGO);
60 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
61                  "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
62 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
63 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
64                  "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
65 module_param_named(max_targets, max_targets, uint, S_IRUGO);
66 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
67                  "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
68 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
69 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
70                  "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
71 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
72 MODULE_PARM_DESC(debug, "Enable driver debug information. "
73                  "[Default=" __stringify(IBMVFC_DEBUG) "]");
74 module_param_named(log_level, log_level, uint, 0);
75 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
76                  "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
77 module_param_named(cls3_error, cls3_error, uint, 0);
78 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
79                  "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
80
81 static const struct {
82         u16 status;
83         u16 error;
84         u8 result;
85         u8 retry;
86         int log;
87         char *name;
88 } cmd_status [] = {
89         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
90         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
91         { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
92         { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
93         { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
94         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
95         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
96         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
97         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
98         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
99         { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
100         { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
101         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
102         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
103
104         { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
105         { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
106         { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
107         { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
108         { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
109         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
110         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
111         { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
112         { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
113         { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
114
115         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
116         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
117         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
118         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
119         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
120         { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
121         { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
122         { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
123         { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
124         { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
125         { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
126
127         { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
128         { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
129 };
130
131 static void ibmvfc_npiv_login(struct ibmvfc_host *);
132 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
133 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
134 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
135 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
136 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
137 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
138
139 static const char *unknown_error = "unknown error";
140
141 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
142 {
143         u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
144
145         return (host_caps & cap_flags) ? 1 : 0;
146 }
147
148 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
149                                                    struct ibmvfc_cmd *vfc_cmd)
150 {
151         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
152                 return &vfc_cmd->v2.iu;
153         else
154                 return &vfc_cmd->v1.iu;
155 }
156
157 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
158                                                  struct ibmvfc_cmd *vfc_cmd)
159 {
160         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
161                 return &vfc_cmd->v2.rsp;
162         else
163                 return &vfc_cmd->v1.rsp;
164 }
165
166 #ifdef CONFIG_SCSI_IBMVFC_TRACE
167 /**
168  * ibmvfc_trc_start - Log a start trace entry
169  * @evt:                ibmvfc event struct
170  *
171  **/
172 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
173 {
174         struct ibmvfc_host *vhost = evt->vhost;
175         struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
176         struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
177         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
178         struct ibmvfc_trace_entry *entry;
179
180         entry = &vhost->trace[vhost->trace_index++];
181         entry->evt = evt;
182         entry->time = jiffies;
183         entry->fmt = evt->crq.format;
184         entry->type = IBMVFC_TRC_START;
185
186         switch (entry->fmt) {
187         case IBMVFC_CMD_FORMAT:
188                 entry->op_code = iu->cdb[0];
189                 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
190                 entry->lun = scsilun_to_int(&iu->lun);
191                 entry->tmf_flags = iu->tmf_flags;
192                 entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
193                 break;
194         case IBMVFC_MAD_FORMAT:
195                 entry->op_code = be32_to_cpu(mad->opcode);
196                 break;
197         default:
198                 break;
199         }
200 }
201
202 /**
203  * ibmvfc_trc_end - Log an end trace entry
204  * @evt:                ibmvfc event struct
205  *
206  **/
207 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
208 {
209         struct ibmvfc_host *vhost = evt->vhost;
210         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
211         struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
212         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
213         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
214         struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
215
216         entry->evt = evt;
217         entry->time = jiffies;
218         entry->fmt = evt->crq.format;
219         entry->type = IBMVFC_TRC_END;
220
221         switch (entry->fmt) {
222         case IBMVFC_CMD_FORMAT:
223                 entry->op_code = iu->cdb[0];
224                 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
225                 entry->lun = scsilun_to_int(&iu->lun);
226                 entry->tmf_flags = iu->tmf_flags;
227                 entry->u.end.status = be16_to_cpu(vfc_cmd->status);
228                 entry->u.end.error = be16_to_cpu(vfc_cmd->error);
229                 entry->u.end.fcp_rsp_flags = rsp->flags;
230                 entry->u.end.rsp_code = rsp->data.info.rsp_code;
231                 entry->u.end.scsi_status = rsp->scsi_status;
232                 break;
233         case IBMVFC_MAD_FORMAT:
234                 entry->op_code = be32_to_cpu(mad->opcode);
235                 entry->u.end.status = be16_to_cpu(mad->status);
236                 break;
237         default:
238                 break;
239
240         }
241 }
242
243 #else
244 #define ibmvfc_trc_start(evt) do { } while (0)
245 #define ibmvfc_trc_end(evt) do { } while (0)
246 #endif
247
248 /**
249  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
250  * @status:             status / error class
251  * @error:              error
252  *
253  * Return value:
254  *      index into cmd_status / -EINVAL on failure
255  **/
256 static int ibmvfc_get_err_index(u16 status, u16 error)
257 {
258         int i;
259
260         for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
261                 if ((cmd_status[i].status & status) == cmd_status[i].status &&
262                     cmd_status[i].error == error)
263                         return i;
264
265         return -EINVAL;
266 }
267
268 /**
269  * ibmvfc_get_cmd_error - Find the error description for the fcp response
270  * @status:             status / error class
271  * @error:              error
272  *
273  * Return value:
274  *      error description string
275  **/
276 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
277 {
278         int rc = ibmvfc_get_err_index(status, error);
279         if (rc >= 0)
280                 return cmd_status[rc].name;
281         return unknown_error;
282 }
283
284 /**
285  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
286  * @vfc_cmd:    ibmvfc command struct
287  *
288  * Return value:
289  *      SCSI result value to return for completed command
290  **/
291 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
292 {
293         int err;
294         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
295         int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
296
297         if ((rsp->flags & FCP_RSP_LEN_VALID) &&
298             ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
299              rsp->data.info.rsp_code))
300                 return DID_ERROR << 16;
301
302         err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
303         if (err >= 0)
304                 return rsp->scsi_status | (cmd_status[err].result << 16);
305         return rsp->scsi_status | (DID_ERROR << 16);
306 }
307
308 /**
309  * ibmvfc_retry_cmd - Determine if error status is retryable
310  * @status:             status / error class
311  * @error:              error
312  *
313  * Return value:
314  *      1 if error should be retried / 0 if it should not
315  **/
316 static int ibmvfc_retry_cmd(u16 status, u16 error)
317 {
318         int rc = ibmvfc_get_err_index(status, error);
319
320         if (rc >= 0)
321                 return cmd_status[rc].retry;
322         return 1;
323 }
324
325 static const char *unknown_fc_explain = "unknown fc explain";
326
327 static const struct {
328         u16 fc_explain;
329         char *name;
330 } ls_explain [] = {
331         { 0x00, "no additional explanation" },
332         { 0x01, "service parameter error - options" },
333         { 0x03, "service parameter error - initiator control" },
334         { 0x05, "service parameter error - recipient control" },
335         { 0x07, "service parameter error - received data field size" },
336         { 0x09, "service parameter error - concurrent seq" },
337         { 0x0B, "service parameter error - credit" },
338         { 0x0D, "invalid N_Port/F_Port_Name" },
339         { 0x0E, "invalid node/Fabric Name" },
340         { 0x0F, "invalid common service parameters" },
341         { 0x11, "invalid association header" },
342         { 0x13, "association header required" },
343         { 0x15, "invalid originator S_ID" },
344         { 0x17, "invalid OX_ID-RX-ID combination" },
345         { 0x19, "command (request) already in progress" },
346         { 0x1E, "N_Port Login requested" },
347         { 0x1F, "Invalid N_Port_ID" },
348 };
349
350 static const struct {
351         u16 fc_explain;
352         char *name;
353 } gs_explain [] = {
354         { 0x00, "no additional explanation" },
355         { 0x01, "port identifier not registered" },
356         { 0x02, "port name not registered" },
357         { 0x03, "node name not registered" },
358         { 0x04, "class of service not registered" },
359         { 0x06, "initial process associator not registered" },
360         { 0x07, "FC-4 TYPEs not registered" },
361         { 0x08, "symbolic port name not registered" },
362         { 0x09, "symbolic node name not registered" },
363         { 0x0A, "port type not registered" },
364         { 0xF0, "authorization exception" },
365         { 0xF1, "authentication exception" },
366         { 0xF2, "data base full" },
367         { 0xF3, "data base empty" },
368         { 0xF4, "processing request" },
369         { 0xF5, "unable to verify connection" },
370         { 0xF6, "devices not in a common zone" },
371 };
372
373 /**
374  * ibmvfc_get_ls_explain - Return the FC Explain description text
375  * @status:     FC Explain status
376  *
377  * Returns:
378  *      error string
379  **/
380 static const char *ibmvfc_get_ls_explain(u16 status)
381 {
382         int i;
383
384         for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
385                 if (ls_explain[i].fc_explain == status)
386                         return ls_explain[i].name;
387
388         return unknown_fc_explain;
389 }
390
391 /**
392  * ibmvfc_get_gs_explain - Return the FC Explain description text
393  * @status:     FC Explain status
394  *
395  * Returns:
396  *      error string
397  **/
398 static const char *ibmvfc_get_gs_explain(u16 status)
399 {
400         int i;
401
402         for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
403                 if (gs_explain[i].fc_explain == status)
404                         return gs_explain[i].name;
405
406         return unknown_fc_explain;
407 }
408
409 static const struct {
410         enum ibmvfc_fc_type fc_type;
411         char *name;
412 } fc_type [] = {
413         { IBMVFC_FABRIC_REJECT, "fabric reject" },
414         { IBMVFC_PORT_REJECT, "port reject" },
415         { IBMVFC_LS_REJECT, "ELS reject" },
416         { IBMVFC_FABRIC_BUSY, "fabric busy" },
417         { IBMVFC_PORT_BUSY, "port busy" },
418         { IBMVFC_BASIC_REJECT, "basic reject" },
419 };
420
421 static const char *unknown_fc_type = "unknown fc type";
422
423 /**
424  * ibmvfc_get_fc_type - Return the FC Type description text
425  * @status:     FC Type error status
426  *
427  * Returns:
428  *      error string
429  **/
430 static const char *ibmvfc_get_fc_type(u16 status)
431 {
432         int i;
433
434         for (i = 0; i < ARRAY_SIZE(fc_type); i++)
435                 if (fc_type[i].fc_type == status)
436                         return fc_type[i].name;
437
438         return unknown_fc_type;
439 }
440
441 /**
442  * ibmvfc_set_tgt_action - Set the next init action for the target
443  * @tgt:                ibmvfc target struct
444  * @action:             action to perform
445  *
446  * Returns:
447  *      0 if action changed / non-zero if not changed
448  **/
449 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
450                                   enum ibmvfc_target_action action)
451 {
452         int rc = -EINVAL;
453
454         switch (tgt->action) {
455         case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
456                 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
457                     action == IBMVFC_TGT_ACTION_DEL_RPORT) {
458                         tgt->action = action;
459                         rc = 0;
460                 }
461                 break;
462         case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
463                 if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
464                     action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
465                         tgt->action = action;
466                         rc = 0;
467                 }
468                 break;
469         case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
470                 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
471                         tgt->action = action;
472                         rc = 0;
473                 }
474                 break;
475         case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
476                 if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
477                         tgt->action = action;
478                         rc = 0;
479                 }
480                 break;
481         case IBMVFC_TGT_ACTION_DEL_RPORT:
482                 if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
483                         tgt->action = action;
484                         rc = 0;
485                 }
486                 break;
487         case IBMVFC_TGT_ACTION_DELETED_RPORT:
488                 break;
489         default:
490                 tgt->action = action;
491                 rc = 0;
492                 break;
493         }
494
495         if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
496                 tgt->add_rport = 0;
497
498         return rc;
499 }
500
501 /**
502  * ibmvfc_set_host_state - Set the state for the host
503  * @vhost:              ibmvfc host struct
504  * @state:              state to set host to
505  *
506  * Returns:
507  *      0 if state changed / non-zero if not changed
508  **/
509 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
510                                   enum ibmvfc_host_state state)
511 {
512         int rc = 0;
513
514         switch (vhost->state) {
515         case IBMVFC_HOST_OFFLINE:
516                 rc = -EINVAL;
517                 break;
518         default:
519                 vhost->state = state;
520                 break;
521         }
522
523         return rc;
524 }
525
526 /**
527  * ibmvfc_set_host_action - Set the next init action for the host
528  * @vhost:              ibmvfc host struct
529  * @action:             action to perform
530  *
531  **/
532 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
533                                    enum ibmvfc_host_action action)
534 {
535         switch (action) {
536         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
537                 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
538                         vhost->action = action;
539                 break;
540         case IBMVFC_HOST_ACTION_LOGO_WAIT:
541                 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
542                         vhost->action = action;
543                 break;
544         case IBMVFC_HOST_ACTION_INIT_WAIT:
545                 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
546                         vhost->action = action;
547                 break;
548         case IBMVFC_HOST_ACTION_QUERY:
549                 switch (vhost->action) {
550                 case IBMVFC_HOST_ACTION_INIT_WAIT:
551                 case IBMVFC_HOST_ACTION_NONE:
552                 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
553                         vhost->action = action;
554                         break;
555                 default:
556                         break;
557                 }
558                 break;
559         case IBMVFC_HOST_ACTION_TGT_INIT:
560                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
561                         vhost->action = action;
562                 break;
563         case IBMVFC_HOST_ACTION_INIT:
564         case IBMVFC_HOST_ACTION_TGT_DEL:
565                 switch (vhost->action) {
566                 case IBMVFC_HOST_ACTION_RESET:
567                 case IBMVFC_HOST_ACTION_REENABLE:
568                         break;
569                 default:
570                         vhost->action = action;
571                         break;
572                 }
573                 break;
574         case IBMVFC_HOST_ACTION_LOGO:
575         case IBMVFC_HOST_ACTION_QUERY_TGTS:
576         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
577         case IBMVFC_HOST_ACTION_NONE:
578         case IBMVFC_HOST_ACTION_RESET:
579         case IBMVFC_HOST_ACTION_REENABLE:
580         default:
581                 vhost->action = action;
582                 break;
583         }
584 }
585
586 /**
587  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
588  * @vhost:              ibmvfc host struct
589  *
590  * Return value:
591  *      nothing
592  **/
593 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
594 {
595         if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
596             vhost->state == IBMVFC_ACTIVE) {
597                 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
598                         scsi_block_requests(vhost->host);
599                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
600                 }
601         } else
602                 vhost->reinit = 1;
603
604         wake_up(&vhost->work_wait_q);
605 }
606
607 /**
608  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
609  * @tgt:                ibmvfc target struct
610  * @job_step:   job step to perform
611  *
612  **/
613 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
614 {
615         if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
616                 tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
617         wake_up(&tgt->vhost->work_wait_q);
618 }
619
620 /**
621  * ibmvfc_link_down - Handle a link down event from the adapter
622  * @vhost:      ibmvfc host struct
623  * @state:      ibmvfc host state to enter
624  *
625  **/
626 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
627                              enum ibmvfc_host_state state)
628 {
629         struct ibmvfc_target *tgt;
630
631         ENTER;
632         scsi_block_requests(vhost->host);
633         list_for_each_entry(tgt, &vhost->targets, queue)
634                 ibmvfc_del_tgt(tgt);
635         ibmvfc_set_host_state(vhost, state);
636         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
637         vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
638         wake_up(&vhost->work_wait_q);
639         LEAVE;
640 }
641
642 /**
643  * ibmvfc_init_host - Start host initialization
644  * @vhost:              ibmvfc host struct
645  *
646  * Return value:
647  *      nothing
648  **/
649 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
650 {
651         struct ibmvfc_target *tgt;
652
653         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
654                 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
655                         dev_err(vhost->dev,
656                                 "Host initialization retries exceeded. Taking adapter offline\n");
657                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
658                         return;
659                 }
660         }
661
662         if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
663                 memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
664                 vhost->async_crq.cur = 0;
665
666                 list_for_each_entry(tgt, &vhost->targets, queue)
667                         ibmvfc_del_tgt(tgt);
668                 scsi_block_requests(vhost->host);
669                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
670                 vhost->job_step = ibmvfc_npiv_login;
671                 wake_up(&vhost->work_wait_q);
672         }
673 }
674
675 /**
676  * ibmvfc_send_crq - Send a CRQ
677  * @vhost:      ibmvfc host struct
678  * @word1:      the first 64 bits of the data
679  * @word2:      the second 64 bits of the data
680  *
681  * Return value:
682  *      0 on success / other on failure
683  **/
684 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
685 {
686         struct vio_dev *vdev = to_vio_dev(vhost->dev);
687         return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
688 }
689
690 /**
691  * ibmvfc_send_crq_init - Send a CRQ init message
692  * @vhost:      ibmvfc host struct
693  *
694  * Return value:
695  *      0 on success / other on failure
696  **/
697 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
698 {
699         ibmvfc_dbg(vhost, "Sending CRQ init\n");
700         return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
701 }
702
703 /**
704  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
705  * @vhost:      ibmvfc host struct
706  *
707  * Return value:
708  *      0 on success / other on failure
709  **/
710 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
711 {
712         ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
713         return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
714 }
715
716 /**
717  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
718  * @vhost:      ibmvfc host struct
719  *
720  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
721  * the crq with the hypervisor.
722  **/
723 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
724 {
725         long rc = 0;
726         struct vio_dev *vdev = to_vio_dev(vhost->dev);
727         struct ibmvfc_crq_queue *crq = &vhost->crq;
728
729         ibmvfc_dbg(vhost, "Releasing CRQ\n");
730         free_irq(vdev->irq, vhost);
731         tasklet_kill(&vhost->tasklet);
732         do {
733                 if (rc)
734                         msleep(100);
735                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
736         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
737
738         vhost->state = IBMVFC_NO_CRQ;
739         vhost->logged_in = 0;
740         dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
741         free_page((unsigned long)crq->msgs);
742 }
743
744 /**
745  * ibmvfc_reenable_crq_queue - reenables the CRQ
746  * @vhost:      ibmvfc host struct
747  *
748  * Return value:
749  *      0 on success / other on failure
750  **/
751 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
752 {
753         int rc = 0;
754         struct vio_dev *vdev = to_vio_dev(vhost->dev);
755
756         /* Re-enable the CRQ */
757         do {
758                 if (rc)
759                         msleep(100);
760                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
761         } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
762
763         if (rc)
764                 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
765
766         return rc;
767 }
768
769 /**
770  * ibmvfc_reset_crq - resets a crq after a failure
771  * @vhost:      ibmvfc host struct
772  *
773  * Return value:
774  *      0 on success / other on failure
775  **/
776 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
777 {
778         int rc = 0;
779         unsigned long flags;
780         struct vio_dev *vdev = to_vio_dev(vhost->dev);
781         struct ibmvfc_crq_queue *crq = &vhost->crq;
782
783         /* Close the CRQ */
784         do {
785                 if (rc)
786                         msleep(100);
787                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
788         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
789
790         spin_lock_irqsave(vhost->host->host_lock, flags);
791         vhost->state = IBMVFC_NO_CRQ;
792         vhost->logged_in = 0;
793
794         /* Clean out the queue */
795         memset(crq->msgs, 0, PAGE_SIZE);
796         crq->cur = 0;
797
798         /* And re-open it again */
799         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
800                                 crq->msg_token, PAGE_SIZE);
801
802         if (rc == H_CLOSED)
803                 /* Adapter is good, but other end is not ready */
804                 dev_warn(vhost->dev, "Partner adapter not ready\n");
805         else if (rc != 0)
806                 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
807         spin_unlock_irqrestore(vhost->host->host_lock, flags);
808
809         return rc;
810 }
811
812 /**
813  * ibmvfc_valid_event - Determines if event is valid.
814  * @pool:       event_pool that contains the event
815  * @evt:        ibmvfc event to be checked for validity
816  *
817  * Return value:
818  *      1 if event is valid / 0 if event is not valid
819  **/
820 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
821                               struct ibmvfc_event *evt)
822 {
823         int index = evt - pool->events;
824         if (index < 0 || index >= pool->size)   /* outside of bounds */
825                 return 0;
826         if (evt != pool->events + index)        /* unaligned */
827                 return 0;
828         return 1;
829 }
830
831 /**
832  * ibmvfc_free_event - Free the specified event
833  * @evt:        ibmvfc_event to be freed
834  *
835  **/
836 static void ibmvfc_free_event(struct ibmvfc_event *evt)
837 {
838         struct ibmvfc_host *vhost = evt->vhost;
839         struct ibmvfc_event_pool *pool = &vhost->pool;
840
841         BUG_ON(!ibmvfc_valid_event(pool, evt));
842         BUG_ON(atomic_inc_return(&evt->free) != 1);
843         list_add_tail(&evt->queue, &vhost->free);
844 }
845
846 /**
847  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
848  * @evt:        ibmvfc event struct
849  *
850  * This function does not setup any error status, that must be done
851  * before this function gets called.
852  **/
853 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
854 {
855         struct scsi_cmnd *cmnd = evt->cmnd;
856
857         if (cmnd) {
858                 scsi_dma_unmap(cmnd);
859                 cmnd->scsi_done(cmnd);
860         }
861
862         if (evt->eh_comp)
863                 complete(evt->eh_comp);
864
865         ibmvfc_free_event(evt);
866 }
867
868 /**
869  * ibmvfc_fail_request - Fail request with specified error code
870  * @evt:                ibmvfc event struct
871  * @error_code: error code to fail request with
872  *
873  * Return value:
874  *      none
875  **/
876 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
877 {
878         if (evt->cmnd) {
879                 evt->cmnd->result = (error_code << 16);
880                 evt->done = ibmvfc_scsi_eh_done;
881         } else
882                 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
883
884         list_del(&evt->queue);
885         del_timer(&evt->timer);
886         ibmvfc_trc_end(evt);
887         evt->done(evt);
888 }
889
890 /**
891  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
892  * @vhost:              ibmvfc host struct
893  * @error_code: error code to fail requests with
894  *
895  * Return value:
896  *      none
897  **/
898 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
899 {
900         struct ibmvfc_event *evt, *pos;
901
902         ibmvfc_dbg(vhost, "Purging all requests\n");
903         list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
904                 ibmvfc_fail_request(evt, error_code);
905 }
906
907 /**
908  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
909  * @vhost:      struct ibmvfc host to reset
910  **/
911 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
912 {
913         ibmvfc_purge_requests(vhost, DID_ERROR);
914         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
915         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
916 }
917
918 /**
919  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
920  * @vhost:      struct ibmvfc host to reset
921  **/
922 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
923 {
924         if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
925             !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
926                 scsi_block_requests(vhost->host);
927                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
928                 vhost->job_step = ibmvfc_npiv_logout;
929                 wake_up(&vhost->work_wait_q);
930         } else
931                 ibmvfc_hard_reset_host(vhost);
932 }
933
934 /**
935  * ibmvfc_reset_host - Reset the connection to the server
936  * @vhost:      ibmvfc host struct
937  **/
938 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
939 {
940         unsigned long flags;
941
942         spin_lock_irqsave(vhost->host->host_lock, flags);
943         __ibmvfc_reset_host(vhost);
944         spin_unlock_irqrestore(vhost->host->host_lock, flags);
945 }
946
947 /**
948  * ibmvfc_retry_host_init - Retry host initialization if allowed
949  * @vhost:      ibmvfc host struct
950  *
951  * Returns: 1 if init will be retried / 0 if not
952  *
953  **/
954 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
955 {
956         int retry = 0;
957
958         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
959                 vhost->delay_init = 1;
960                 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
961                         dev_err(vhost->dev,
962                                 "Host initialization retries exceeded. Taking adapter offline\n");
963                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
964                 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
965                         __ibmvfc_reset_host(vhost);
966                 else {
967                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
968                         retry = 1;
969                 }
970         }
971
972         wake_up(&vhost->work_wait_q);
973         return retry;
974 }
975
976 /**
977  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
978  * @starget:    scsi target struct
979  *
980  * Return value:
981  *      ibmvfc_target struct / NULL if not found
982  **/
983 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
984 {
985         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
986         struct ibmvfc_host *vhost = shost_priv(shost);
987         struct ibmvfc_target *tgt;
988
989         list_for_each_entry(tgt, &vhost->targets, queue)
990                 if (tgt->target_id == starget->id) {
991                         kref_get(&tgt->kref);
992                         return tgt;
993                 }
994         return NULL;
995 }
996
997 /**
998  * ibmvfc_get_target - Find the specified scsi_target
999  * @starget:    scsi target struct
1000  *
1001  * Return value:
1002  *      ibmvfc_target struct / NULL if not found
1003  **/
1004 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1005 {
1006         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1007         struct ibmvfc_target *tgt;
1008         unsigned long flags;
1009
1010         spin_lock_irqsave(shost->host_lock, flags);
1011         tgt = __ibmvfc_get_target(starget);
1012         spin_unlock_irqrestore(shost->host_lock, flags);
1013         return tgt;
1014 }
1015
1016 /**
1017  * ibmvfc_get_host_speed - Get host port speed
1018  * @shost:              scsi host struct
1019  *
1020  * Return value:
1021  *      none
1022  **/
1023 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1024 {
1025         struct ibmvfc_host *vhost = shost_priv(shost);
1026         unsigned long flags;
1027
1028         spin_lock_irqsave(shost->host_lock, flags);
1029         if (vhost->state == IBMVFC_ACTIVE) {
1030                 switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1031                 case 1:
1032                         fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1033                         break;
1034                 case 2:
1035                         fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1036                         break;
1037                 case 4:
1038                         fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1039                         break;
1040                 case 8:
1041                         fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1042                         break;
1043                 case 10:
1044                         fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1045                         break;
1046                 case 16:
1047                         fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1048                         break;
1049                 default:
1050                         ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1051                                    be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1052                         fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1053                         break;
1054                 }
1055         } else
1056                 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1057         spin_unlock_irqrestore(shost->host_lock, flags);
1058 }
1059
1060 /**
1061  * ibmvfc_get_host_port_state - Get host port state
1062  * @shost:              scsi host struct
1063  *
1064  * Return value:
1065  *      none
1066  **/
1067 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1068 {
1069         struct ibmvfc_host *vhost = shost_priv(shost);
1070         unsigned long flags;
1071
1072         spin_lock_irqsave(shost->host_lock, flags);
1073         switch (vhost->state) {
1074         case IBMVFC_INITIALIZING:
1075         case IBMVFC_ACTIVE:
1076                 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1077                 break;
1078         case IBMVFC_LINK_DOWN:
1079                 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1080                 break;
1081         case IBMVFC_LINK_DEAD:
1082         case IBMVFC_HOST_OFFLINE:
1083                 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1084                 break;
1085         case IBMVFC_HALTED:
1086                 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1087                 break;
1088         case IBMVFC_NO_CRQ:
1089                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1090                 break;
1091         default:
1092                 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1093                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1094                 break;
1095         }
1096         spin_unlock_irqrestore(shost->host_lock, flags);
1097 }
1098
1099 /**
1100  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1101  * @rport:              rport struct
1102  * @timeout:    timeout value
1103  *
1104  * Return value:
1105  *      none
1106  **/
1107 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1108 {
1109         if (timeout)
1110                 rport->dev_loss_tmo = timeout;
1111         else
1112                 rport->dev_loss_tmo = 1;
1113 }
1114
1115 /**
1116  * ibmvfc_release_tgt - Free memory allocated for a target
1117  * @kref:               kref struct
1118  *
1119  **/
1120 static void ibmvfc_release_tgt(struct kref *kref)
1121 {
1122         struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1123         kfree(tgt);
1124 }
1125
1126 /**
1127  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1128  * @starget:    scsi target struct
1129  *
1130  * Return value:
1131  *      none
1132  **/
1133 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1134 {
1135         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1136         fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1137         if (tgt)
1138                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1139 }
1140
1141 /**
1142  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1143  * @starget:    scsi target struct
1144  *
1145  * Return value:
1146  *      none
1147  **/
1148 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1149 {
1150         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1151         fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1152         if (tgt)
1153                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1154 }
1155
1156 /**
1157  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1158  * @starget:    scsi target struct
1159  *
1160  * Return value:
1161  *      none
1162  **/
1163 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1164 {
1165         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1166         fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1167         if (tgt)
1168                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1169 }
1170
1171 /**
1172  * ibmvfc_wait_while_resetting - Wait while the host resets
1173  * @vhost:              ibmvfc host struct
1174  *
1175  * Return value:
1176  *      0 on success / other on failure
1177  **/
1178 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1179 {
1180         long timeout = wait_event_timeout(vhost->init_wait_q,
1181                                           ((vhost->state == IBMVFC_ACTIVE ||
1182                                             vhost->state == IBMVFC_HOST_OFFLINE ||
1183                                             vhost->state == IBMVFC_LINK_DEAD) &&
1184                                            vhost->action == IBMVFC_HOST_ACTION_NONE),
1185                                           (init_timeout * HZ));
1186
1187         return timeout ? 0 : -EIO;
1188 }
1189
1190 /**
1191  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1192  * @shost:              scsi host struct
1193  *
1194  * Return value:
1195  *      0 on success / other on failure
1196  **/
1197 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1198 {
1199         struct ibmvfc_host *vhost = shost_priv(shost);
1200
1201         dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1202         ibmvfc_reset_host(vhost);
1203         return ibmvfc_wait_while_resetting(vhost);
1204 }
1205
1206 /**
1207  * ibmvfc_gather_partition_info - Gather info about the LPAR
1208  *
1209  * Return value:
1210  *      none
1211  **/
1212 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1213 {
1214         struct device_node *rootdn;
1215         const char *name;
1216         const unsigned int *num;
1217
1218         rootdn = of_find_node_by_path("/");
1219         if (!rootdn)
1220                 return;
1221
1222         name = of_get_property(rootdn, "ibm,partition-name", NULL);
1223         if (name)
1224                 strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1225         num = of_get_property(rootdn, "ibm,partition-no", NULL);
1226         if (num)
1227                 vhost->partition_number = *num;
1228         of_node_put(rootdn);
1229 }
1230
1231 /**
1232  * ibmvfc_set_login_info - Setup info for NPIV login
1233  * @vhost:      ibmvfc host struct
1234  *
1235  * Return value:
1236  *      none
1237  **/
1238 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1239 {
1240         struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1241         struct device_node *of_node = vhost->dev->of_node;
1242         const char *location;
1243
1244         memset(login_info, 0, sizeof(*login_info));
1245
1246         login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1247         login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1248         login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1249         login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1250         login_info->partition_num = cpu_to_be32(vhost->partition_number);
1251         login_info->vfc_frame_version = cpu_to_be32(1);
1252         login_info->fcp_version = cpu_to_be16(3);
1253         login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1254         if (vhost->client_migrated)
1255                 login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1256
1257         login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
1258         login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1259         login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1260         login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs));
1261         strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1262         strncpy(login_info->device_name,
1263                 dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1264
1265         location = of_get_property(of_node, "ibm,loc-code", NULL);
1266         location = location ? location : dev_name(vhost->dev);
1267         strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1268 }
1269
1270 /**
1271  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
1272  * @vhost:      ibmvfc host who owns the event pool
1273  *
1274  * Returns zero on success.
1275  **/
1276 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
1277 {
1278         int i;
1279         struct ibmvfc_event_pool *pool = &vhost->pool;
1280
1281         ENTER;
1282         pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1283         pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
1284         if (!pool->events)
1285                 return -ENOMEM;
1286
1287         pool->iu_storage = dma_alloc_coherent(vhost->dev,
1288                                               pool->size * sizeof(*pool->iu_storage),
1289                                               &pool->iu_token, 0);
1290
1291         if (!pool->iu_storage) {
1292                 kfree(pool->events);
1293                 return -ENOMEM;
1294         }
1295
1296         for (i = 0; i < pool->size; ++i) {
1297                 struct ibmvfc_event *evt = &pool->events[i];
1298                 atomic_set(&evt->free, 1);
1299                 evt->crq.valid = 0x80;
1300                 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
1301                 evt->xfer_iu = pool->iu_storage + i;
1302                 evt->vhost = vhost;
1303                 evt->ext_list = NULL;
1304                 list_add_tail(&evt->queue, &vhost->free);
1305         }
1306
1307         LEAVE;
1308         return 0;
1309 }
1310
1311 /**
1312  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
1313  * @vhost:      ibmvfc host who owns the event pool
1314  *
1315  **/
1316 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
1317 {
1318         int i;
1319         struct ibmvfc_event_pool *pool = &vhost->pool;
1320
1321         ENTER;
1322         for (i = 0; i < pool->size; ++i) {
1323                 list_del(&pool->events[i].queue);
1324                 BUG_ON(atomic_read(&pool->events[i].free) != 1);
1325                 if (pool->events[i].ext_list)
1326                         dma_pool_free(vhost->sg_pool,
1327                                       pool->events[i].ext_list,
1328                                       pool->events[i].ext_list_token);
1329         }
1330
1331         kfree(pool->events);
1332         dma_free_coherent(vhost->dev,
1333                           pool->size * sizeof(*pool->iu_storage),
1334                           pool->iu_storage, pool->iu_token);
1335         LEAVE;
1336 }
1337
1338 /**
1339  * ibmvfc_get_event - Gets the next free event in pool
1340  * @vhost:      ibmvfc host struct
1341  *
1342  * Returns a free event from the pool.
1343  **/
1344 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
1345 {
1346         struct ibmvfc_event *evt;
1347
1348         BUG_ON(list_empty(&vhost->free));
1349         evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
1350         atomic_set(&evt->free, 0);
1351         list_del(&evt->queue);
1352         return evt;
1353 }
1354
1355 /**
1356  * ibmvfc_init_event - Initialize fields in an event struct that are always
1357  *                              required.
1358  * @evt:        The event
1359  * @done:       Routine to call when the event is responded to
1360  * @format:     SRP or MAD format
1361  **/
1362 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1363                               void (*done) (struct ibmvfc_event *), u8 format)
1364 {
1365         evt->cmnd = NULL;
1366         evt->sync_iu = NULL;
1367         evt->crq.format = format;
1368         evt->done = done;
1369         evt->eh_comp = NULL;
1370 }
1371
1372 /**
1373  * ibmvfc_map_sg_list - Initialize scatterlist
1374  * @scmd:       scsi command struct
1375  * @nseg:       number of scatterlist segments
1376  * @md: memory descriptor list to initialize
1377  **/
1378 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1379                                struct srp_direct_buf *md)
1380 {
1381         int i;
1382         struct scatterlist *sg;
1383
1384         scsi_for_each_sg(scmd, sg, nseg, i) {
1385                 md[i].va = cpu_to_be64(sg_dma_address(sg));
1386                 md[i].len = cpu_to_be32(sg_dma_len(sg));
1387                 md[i].key = 0;
1388         }
1389 }
1390
1391 /**
1392  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1393  * @scmd:               struct scsi_cmnd with the scatterlist
1394  * @evt:                ibmvfc event struct
1395  * @vfc_cmd:    vfc_cmd that contains the memory descriptor
1396  * @dev:                device for which to map dma memory
1397  *
1398  * Returns:
1399  *      0 on success / non-zero on failure
1400  **/
1401 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1402                               struct ibmvfc_event *evt,
1403                               struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1404 {
1405
1406         int sg_mapped;
1407         struct srp_direct_buf *data = &vfc_cmd->ioba;
1408         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1409         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1410
1411         if (cls3_error)
1412                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1413
1414         sg_mapped = scsi_dma_map(scmd);
1415         if (!sg_mapped) {
1416                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1417                 return 0;
1418         } else if (unlikely(sg_mapped < 0)) {
1419                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1420                         scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1421                 return sg_mapped;
1422         }
1423
1424         if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1425                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1426                 iu->add_cdb_len |= IBMVFC_WRDATA;
1427         } else {
1428                 vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1429                 iu->add_cdb_len |= IBMVFC_RDDATA;
1430         }
1431
1432         if (sg_mapped == 1) {
1433                 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1434                 return 0;
1435         }
1436
1437         vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1438
1439         if (!evt->ext_list) {
1440                 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1441                                                &evt->ext_list_token);
1442
1443                 if (!evt->ext_list) {
1444                         scsi_dma_unmap(scmd);
1445                         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1446                                 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1447                         return -ENOMEM;
1448                 }
1449         }
1450
1451         ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1452
1453         data->va = cpu_to_be64(evt->ext_list_token);
1454         data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1455         data->key = 0;
1456         return 0;
1457 }
1458
1459 /**
1460  * ibmvfc_timeout - Internal command timeout handler
1461  * @evt:        struct ibmvfc_event that timed out
1462  *
1463  * Called when an internally generated command times out
1464  **/
1465 static void ibmvfc_timeout(struct timer_list *t)
1466 {
1467         struct ibmvfc_event *evt = from_timer(evt, t, timer);
1468         struct ibmvfc_host *vhost = evt->vhost;
1469         dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1470         ibmvfc_reset_host(vhost);
1471 }
1472
1473 /**
1474  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1475  * @evt:                event to be sent
1476  * @vhost:              ibmvfc host struct
1477  * @timeout:    timeout in seconds - 0 means do not time command
1478  *
1479  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1480  **/
1481 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1482                              struct ibmvfc_host *vhost, unsigned long timeout)
1483 {
1484         __be64 *crq_as_u64 = (__be64 *) &evt->crq;
1485         int rc;
1486
1487         /* Copy the IU into the transfer area */
1488         *evt->xfer_iu = evt->iu;
1489         if (evt->crq.format == IBMVFC_CMD_FORMAT)
1490                 evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1491         else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1492                 evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1493         else
1494                 BUG();
1495
1496         list_add_tail(&evt->queue, &vhost->sent);
1497         timer_setup(&evt->timer, ibmvfc_timeout, 0);
1498
1499         if (timeout) {
1500                 evt->timer.expires = jiffies + (timeout * HZ);
1501                 add_timer(&evt->timer);
1502         }
1503
1504         mb();
1505
1506         if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1507                                   be64_to_cpu(crq_as_u64[1])))) {
1508                 list_del(&evt->queue);
1509                 del_timer(&evt->timer);
1510
1511                 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1512                  * Firmware will send a CRQ with a transport event (0xFF) to
1513                  * tell this client what has happened to the transport. This
1514                  * will be handled in ibmvfc_handle_crq()
1515                  */
1516                 if (rc == H_CLOSED) {
1517                         if (printk_ratelimit())
1518                                 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1519                         if (evt->cmnd)
1520                                 scsi_dma_unmap(evt->cmnd);
1521                         ibmvfc_free_event(evt);
1522                         return SCSI_MLQUEUE_HOST_BUSY;
1523                 }
1524
1525                 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1526                 if (evt->cmnd) {
1527                         evt->cmnd->result = DID_ERROR << 16;
1528                         evt->done = ibmvfc_scsi_eh_done;
1529                 } else
1530                         evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1531
1532                 evt->done(evt);
1533         } else
1534                 ibmvfc_trc_start(evt);
1535
1536         return 0;
1537 }
1538
1539 /**
1540  * ibmvfc_log_error - Log an error for the failed command if appropriate
1541  * @evt:        ibmvfc event to log
1542  *
1543  **/
1544 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1545 {
1546         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1547         struct ibmvfc_host *vhost = evt->vhost;
1548         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1549         struct scsi_cmnd *cmnd = evt->cmnd;
1550         const char *err = unknown_error;
1551         int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1552         int logerr = 0;
1553         int rsp_code = 0;
1554
1555         if (index >= 0) {
1556                 logerr = cmd_status[index].log;
1557                 err = cmd_status[index].name;
1558         }
1559
1560         if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1561                 return;
1562
1563         if (rsp->flags & FCP_RSP_LEN_VALID)
1564                 rsp_code = rsp->data.info.rsp_code;
1565
1566         scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1567                     "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1568                     cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1569                     rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1570 }
1571
1572 /**
1573  * ibmvfc_relogin - Log back into the specified device
1574  * @sdev:       scsi device struct
1575  *
1576  **/
1577 static void ibmvfc_relogin(struct scsi_device *sdev)
1578 {
1579         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1580         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1581         struct ibmvfc_target *tgt;
1582
1583         list_for_each_entry(tgt, &vhost->targets, queue) {
1584                 if (rport == tgt->rport) {
1585                         ibmvfc_del_tgt(tgt);
1586                         break;
1587                 }
1588         }
1589
1590         ibmvfc_reinit_host(vhost);
1591 }
1592
1593 /**
1594  * ibmvfc_scsi_done - Handle responses from commands
1595  * @evt:        ibmvfc event to be handled
1596  *
1597  * Used as a callback when sending scsi cmds.
1598  **/
1599 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1600 {
1601         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1602         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1603         struct scsi_cmnd *cmnd = evt->cmnd;
1604         u32 rsp_len = 0;
1605         u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1606
1607         if (cmnd) {
1608                 if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1609                         scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1610                 else if (rsp->flags & FCP_RESID_UNDER)
1611                         scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1612                 else
1613                         scsi_set_resid(cmnd, 0);
1614
1615                 if (vfc_cmd->status) {
1616                         cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1617
1618                         if (rsp->flags & FCP_RSP_LEN_VALID)
1619                                 rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1620                         if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1621                                 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1622                         if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1623                                 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1624                         if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1625                             (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1626                                 ibmvfc_relogin(cmnd->device);
1627
1628                         if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1629                                 cmnd->result = (DID_ERROR << 16);
1630
1631                         ibmvfc_log_error(evt);
1632                 }
1633
1634                 if (!cmnd->result &&
1635                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1636                         cmnd->result = (DID_ERROR << 16);
1637
1638                 scsi_dma_unmap(cmnd);
1639                 cmnd->scsi_done(cmnd);
1640         }
1641
1642         if (evt->eh_comp)
1643                 complete(evt->eh_comp);
1644
1645         ibmvfc_free_event(evt);
1646 }
1647
1648 /**
1649  * ibmvfc_host_chkready - Check if the host can accept commands
1650  * @vhost:       struct ibmvfc host
1651  *
1652  * Returns:
1653  *      1 if host can accept command / 0 if not
1654  **/
1655 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1656 {
1657         int result = 0;
1658
1659         switch (vhost->state) {
1660         case IBMVFC_LINK_DEAD:
1661         case IBMVFC_HOST_OFFLINE:
1662                 result = DID_NO_CONNECT << 16;
1663                 break;
1664         case IBMVFC_NO_CRQ:
1665         case IBMVFC_INITIALIZING:
1666         case IBMVFC_HALTED:
1667         case IBMVFC_LINK_DOWN:
1668                 result = DID_REQUEUE << 16;
1669                 break;
1670         case IBMVFC_ACTIVE:
1671                 result = 0;
1672                 break;
1673         }
1674
1675         return result;
1676 }
1677
1678 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1679 {
1680         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1681         struct ibmvfc_host *vhost = evt->vhost;
1682         struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1683         struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1684         struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1685         size_t offset;
1686
1687         memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1688         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1689                 offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1690                 vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1691         } else
1692                 offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1693         vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1694         vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1695         vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1696         vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1697         vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1698         vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1699         vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1700         int_to_scsilun(sdev->lun, &iu->lun);
1701
1702         return vfc_cmd;
1703 }
1704
1705 /**
1706  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1707  * @cmnd:       struct scsi_cmnd to be executed
1708  * @done:       Callback function to be called when cmnd is completed
1709  *
1710  * Returns:
1711  *      0 on success / other on failure
1712  **/
1713 static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
1714                                void (*done) (struct scsi_cmnd *))
1715 {
1716         struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
1717         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1718         struct ibmvfc_cmd *vfc_cmd;
1719         struct ibmvfc_fcp_cmd_iu *iu;
1720         struct ibmvfc_event *evt;
1721         int rc;
1722
1723         if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1724             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1725                 cmnd->result = rc;
1726                 done(cmnd);
1727                 return 0;
1728         }
1729
1730         cmnd->result = (DID_OK << 16);
1731         evt = ibmvfc_get_event(vhost);
1732         ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1733         evt->cmnd = cmnd;
1734         cmnd->scsi_done = done;
1735
1736         vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1737         iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1738
1739         iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1740         memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1741
1742         if (cmnd->flags & SCMD_TAGGED) {
1743                 vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
1744                 iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
1745         }
1746
1747         vfc_cmd->correlation = cpu_to_be64(evt);
1748
1749         if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1750                 return ibmvfc_send_event(evt, vhost, 0);
1751
1752         ibmvfc_free_event(evt);
1753         if (rc == -ENOMEM)
1754                 return SCSI_MLQUEUE_HOST_BUSY;
1755
1756         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1757                 scmd_printk(KERN_ERR, cmnd,
1758                             "Failed to map DMA buffer for command. rc=%d\n", rc);
1759
1760         cmnd->result = DID_ERROR << 16;
1761         done(cmnd);
1762         return 0;
1763 }
1764
1765 static DEF_SCSI_QCMD(ibmvfc_queuecommand)
1766
1767 /**
1768  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1769  * @evt:        ibmvfc event struct
1770  *
1771  **/
1772 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1773 {
1774         /* copy the response back */
1775         if (evt->sync_iu)
1776                 *evt->sync_iu = *evt->xfer_iu;
1777
1778         complete(&evt->comp);
1779 }
1780
1781 /**
1782  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1783  * @evt:        struct ibmvfc_event
1784  *
1785  **/
1786 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1787 {
1788         struct ibmvfc_host *vhost = evt->vhost;
1789
1790         ibmvfc_free_event(evt);
1791         vhost->aborting_passthru = 0;
1792         dev_info(vhost->dev, "Passthru command cancelled\n");
1793 }
1794
1795 /**
1796  * ibmvfc_bsg_timeout - Handle a BSG timeout
1797  * @job:        struct bsg_job that timed out
1798  *
1799  * Returns:
1800  *      0 on success / other on failure
1801  **/
1802 static int ibmvfc_bsg_timeout(struct bsg_job *job)
1803 {
1804         struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1805         unsigned long port_id = (unsigned long)job->dd_data;
1806         struct ibmvfc_event *evt;
1807         struct ibmvfc_tmf *tmf;
1808         unsigned long flags;
1809         int rc;
1810
1811         ENTER;
1812         spin_lock_irqsave(vhost->host->host_lock, flags);
1813         if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
1814                 __ibmvfc_reset_host(vhost);
1815                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1816                 return 0;
1817         }
1818
1819         vhost->aborting_passthru = 1;
1820         evt = ibmvfc_get_event(vhost);
1821         ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
1822
1823         tmf = &evt->iu.tmf;
1824         memset(tmf, 0, sizeof(*tmf));
1825         tmf->common.version = cpu_to_be32(1);
1826         tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
1827         tmf->common.length = cpu_to_be16(sizeof(*tmf));
1828         tmf->scsi_id = cpu_to_be64(port_id);
1829         tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
1830         tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
1831         rc = ibmvfc_send_event(evt, vhost, default_timeout);
1832
1833         if (rc != 0) {
1834                 vhost->aborting_passthru = 0;
1835                 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
1836                 rc = -EIO;
1837         } else
1838                 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
1839                          port_id);
1840
1841         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1842
1843         LEAVE;
1844         return rc;
1845 }
1846
1847 /**
1848  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
1849  * @vhost:              struct ibmvfc_host to send command
1850  * @port_id:    port ID to send command
1851  *
1852  * Returns:
1853  *      0 on success / other on failure
1854  **/
1855 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
1856 {
1857         struct ibmvfc_port_login *plogi;
1858         struct ibmvfc_target *tgt;
1859         struct ibmvfc_event *evt;
1860         union ibmvfc_iu rsp_iu;
1861         unsigned long flags;
1862         int rc = 0, issue_login = 1;
1863
1864         ENTER;
1865         spin_lock_irqsave(vhost->host->host_lock, flags);
1866         list_for_each_entry(tgt, &vhost->targets, queue) {
1867                 if (tgt->scsi_id == port_id) {
1868                         issue_login = 0;
1869                         break;
1870                 }
1871         }
1872
1873         if (!issue_login)
1874                 goto unlock_out;
1875         if (unlikely((rc = ibmvfc_host_chkready(vhost))))
1876                 goto unlock_out;
1877
1878         evt = ibmvfc_get_event(vhost);
1879         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1880         plogi = &evt->iu.plogi;
1881         memset(plogi, 0, sizeof(*plogi));
1882         plogi->common.version = cpu_to_be32(1);
1883         plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
1884         plogi->common.length = cpu_to_be16(sizeof(*plogi));
1885         plogi->scsi_id = cpu_to_be64(port_id);
1886         evt->sync_iu = &rsp_iu;
1887         init_completion(&evt->comp);
1888
1889         rc = ibmvfc_send_event(evt, vhost, default_timeout);
1890         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1891
1892         if (rc)
1893                 return -EIO;
1894
1895         wait_for_completion(&evt->comp);
1896
1897         if (rsp_iu.plogi.common.status)
1898                 rc = -EIO;
1899
1900         spin_lock_irqsave(vhost->host->host_lock, flags);
1901         ibmvfc_free_event(evt);
1902 unlock_out:
1903         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1904         LEAVE;
1905         return rc;
1906 }
1907
1908 /**
1909  * ibmvfc_bsg_request - Handle a BSG request
1910  * @job:        struct bsg_job to be executed
1911  *
1912  * Returns:
1913  *      0 on success / other on failure
1914  **/
1915 static int ibmvfc_bsg_request(struct bsg_job *job)
1916 {
1917         struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1918         struct fc_rport *rport = fc_bsg_to_rport(job);
1919         struct ibmvfc_passthru_mad *mad;
1920         struct ibmvfc_event *evt;
1921         union ibmvfc_iu rsp_iu;
1922         unsigned long flags, port_id = -1;
1923         struct fc_bsg_request *bsg_request = job->request;
1924         struct fc_bsg_reply *bsg_reply = job->reply;
1925         unsigned int code = bsg_request->msgcode;
1926         int rc = 0, req_seg, rsp_seg, issue_login = 0;
1927         u32 fc_flags, rsp_len;
1928
1929         ENTER;
1930         bsg_reply->reply_payload_rcv_len = 0;
1931         if (rport)
1932                 port_id = rport->port_id;
1933
1934         switch (code) {
1935         case FC_BSG_HST_ELS_NOLOGIN:
1936                 port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
1937                         (bsg_request->rqst_data.h_els.port_id[1] << 8) |
1938                         bsg_request->rqst_data.h_els.port_id[2];
1939                 fallthrough;
1940         case FC_BSG_RPT_ELS:
1941                 fc_flags = IBMVFC_FC_ELS;
1942                 break;
1943         case FC_BSG_HST_CT:
1944                 issue_login = 1;
1945                 port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
1946                         (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
1947                         bsg_request->rqst_data.h_ct.port_id[2];
1948                 fallthrough;
1949         case FC_BSG_RPT_CT:
1950                 fc_flags = IBMVFC_FC_CT_IU;
1951                 break;
1952         default:
1953                 return -ENOTSUPP;
1954         }
1955
1956         if (port_id == -1)
1957                 return -EINVAL;
1958         if (!mutex_trylock(&vhost->passthru_mutex))
1959                 return -EBUSY;
1960
1961         job->dd_data = (void *)port_id;
1962         req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
1963                              job->request_payload.sg_cnt, DMA_TO_DEVICE);
1964
1965         if (!req_seg) {
1966                 mutex_unlock(&vhost->passthru_mutex);
1967                 return -ENOMEM;
1968         }
1969
1970         rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
1971                              job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1972
1973         if (!rsp_seg) {
1974                 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1975                              job->request_payload.sg_cnt, DMA_TO_DEVICE);
1976                 mutex_unlock(&vhost->passthru_mutex);
1977                 return -ENOMEM;
1978         }
1979
1980         if (req_seg > 1 || rsp_seg > 1) {
1981                 rc = -EINVAL;
1982                 goto out;
1983         }
1984
1985         if (issue_login)
1986                 rc = ibmvfc_bsg_plogi(vhost, port_id);
1987
1988         spin_lock_irqsave(vhost->host->host_lock, flags);
1989
1990         if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
1991             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1992                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1993                 goto out;
1994         }
1995
1996         evt = ibmvfc_get_event(vhost);
1997         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1998         mad = &evt->iu.passthru;
1999
2000         memset(mad, 0, sizeof(*mad));
2001         mad->common.version = cpu_to_be32(1);
2002         mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2003         mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2004
2005         mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2006                 offsetof(struct ibmvfc_passthru_mad, iu));
2007         mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2008
2009         mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2010         mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2011         mad->iu.flags = cpu_to_be32(fc_flags);
2012         mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2013
2014         mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2015         mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2016         mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2017         mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2018         mad->iu.scsi_id = cpu_to_be64(port_id);
2019         mad->iu.tag = cpu_to_be64((u64)evt);
2020         rsp_len = be32_to_cpu(mad->iu.rsp.len);
2021
2022         evt->sync_iu = &rsp_iu;
2023         init_completion(&evt->comp);
2024         rc = ibmvfc_send_event(evt, vhost, 0);
2025         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2026
2027         if (rc) {
2028                 rc = -EIO;
2029                 goto out;
2030         }
2031
2032         wait_for_completion(&evt->comp);
2033
2034         if (rsp_iu.passthru.common.status)
2035                 rc = -EIO;
2036         else
2037                 bsg_reply->reply_payload_rcv_len = rsp_len;
2038
2039         spin_lock_irqsave(vhost->host->host_lock, flags);
2040         ibmvfc_free_event(evt);
2041         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2042         bsg_reply->result = rc;
2043         bsg_job_done(job, bsg_reply->result,
2044                        bsg_reply->reply_payload_rcv_len);
2045         rc = 0;
2046 out:
2047         dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2048                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
2049         dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2050                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2051         mutex_unlock(&vhost->passthru_mutex);
2052         LEAVE;
2053         return rc;
2054 }
2055
2056 /**
2057  * ibmvfc_reset_device - Reset the device with the specified reset type
2058  * @sdev:       scsi device to reset
2059  * @type:       reset type
2060  * @desc:       reset type description for log messages
2061  *
2062  * Returns:
2063  *      0 on success / other on failure
2064  **/
2065 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2066 {
2067         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2068         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2069         struct ibmvfc_cmd *tmf;
2070         struct ibmvfc_event *evt = NULL;
2071         union ibmvfc_iu rsp_iu;
2072         struct ibmvfc_fcp_cmd_iu *iu;
2073         struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2074         int rsp_rc = -EBUSY;
2075         unsigned long flags;
2076         int rsp_code = 0;
2077
2078         spin_lock_irqsave(vhost->host->host_lock, flags);
2079         if (vhost->state == IBMVFC_ACTIVE) {
2080                 evt = ibmvfc_get_event(vhost);
2081                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2082                 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2083                 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2084
2085                 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2086                 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2087                         tmf->target_wwpn = cpu_to_be64(rport->port_name);
2088                 iu->tmf_flags = type;
2089                 evt->sync_iu = &rsp_iu;
2090
2091                 init_completion(&evt->comp);
2092                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2093         }
2094         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2095
2096         if (rsp_rc != 0) {
2097                 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2098                             desc, rsp_rc);
2099                 return -EIO;
2100         }
2101
2102         sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2103         wait_for_completion(&evt->comp);
2104
2105         if (rsp_iu.cmd.status)
2106                 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2107
2108         if (rsp_code) {
2109                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2110                         rsp_code = fc_rsp->data.info.rsp_code;
2111
2112                 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2113                             "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2114                             ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2115                             be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2116                             fc_rsp->scsi_status);
2117                 rsp_rc = -EIO;
2118         } else
2119                 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2120
2121         spin_lock_irqsave(vhost->host->host_lock, flags);
2122         ibmvfc_free_event(evt);
2123         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2124         return rsp_rc;
2125 }
2126
2127 /**
2128  * ibmvfc_match_rport - Match function for specified remote port
2129  * @evt:        ibmvfc event struct
2130  * @device:     device to match (rport)
2131  *
2132  * Returns:
2133  *      1 if event matches rport / 0 if event does not match rport
2134  **/
2135 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2136 {
2137         struct fc_rport *cmd_rport;
2138
2139         if (evt->cmnd) {
2140                 cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2141                 if (cmd_rport == rport)
2142                         return 1;
2143         }
2144         return 0;
2145 }
2146
2147 /**
2148  * ibmvfc_match_target - Match function for specified target
2149  * @evt:        ibmvfc event struct
2150  * @device:     device to match (starget)
2151  *
2152  * Returns:
2153  *      1 if event matches starget / 0 if event does not match starget
2154  **/
2155 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2156 {
2157         if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2158                 return 1;
2159         return 0;
2160 }
2161
2162 /**
2163  * ibmvfc_match_lun - Match function for specified LUN
2164  * @evt:        ibmvfc event struct
2165  * @device:     device to match (sdev)
2166  *
2167  * Returns:
2168  *      1 if event matches sdev / 0 if event does not match sdev
2169  **/
2170 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2171 {
2172         if (evt->cmnd && evt->cmnd->device == device)
2173                 return 1;
2174         return 0;
2175 }
2176
2177 /**
2178  * ibmvfc_wait_for_ops - Wait for ops to complete
2179  * @vhost:      ibmvfc host struct
2180  * @device:     device to match (starget or sdev)
2181  * @match:      match function
2182  *
2183  * Returns:
2184  *      SUCCESS / FAILED
2185  **/
2186 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2187                                int (*match) (struct ibmvfc_event *, void *))
2188 {
2189         struct ibmvfc_event *evt;
2190         DECLARE_COMPLETION_ONSTACK(comp);
2191         int wait;
2192         unsigned long flags;
2193         signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2194
2195         ENTER;
2196         do {
2197                 wait = 0;
2198                 spin_lock_irqsave(vhost->host->host_lock, flags);
2199                 list_for_each_entry(evt, &vhost->sent, queue) {
2200                         if (match(evt, device)) {
2201                                 evt->eh_comp = &comp;
2202                                 wait++;
2203                         }
2204                 }
2205                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2206
2207                 if (wait) {
2208                         timeout = wait_for_completion_timeout(&comp, timeout);
2209
2210                         if (!timeout) {
2211                                 wait = 0;
2212                                 spin_lock_irqsave(vhost->host->host_lock, flags);
2213                                 list_for_each_entry(evt, &vhost->sent, queue) {
2214                                         if (match(evt, device)) {
2215                                                 evt->eh_comp = NULL;
2216                                                 wait++;
2217                                         }
2218                                 }
2219                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2220                                 if (wait)
2221                                         dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2222                                 LEAVE;
2223                                 return wait ? FAILED : SUCCESS;
2224                         }
2225                 }
2226         } while (wait);
2227
2228         LEAVE;
2229         return SUCCESS;
2230 }
2231
2232 /**
2233  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2234  * @sdev:       scsi device to cancel commands
2235  * @type:       type of error recovery being performed
2236  *
2237  * This sends a cancel to the VIOS for the specified device. This does
2238  * NOT send any abort to the actual device. That must be done separately.
2239  *
2240  * Returns:
2241  *      0 on success / other on failure
2242  **/
2243 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2244 {
2245         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2246         struct scsi_target *starget = scsi_target(sdev);
2247         struct fc_rport *rport = starget_to_rport(starget);
2248         struct ibmvfc_tmf *tmf;
2249         struct ibmvfc_event *evt, *found_evt;
2250         union ibmvfc_iu rsp;
2251         int rsp_rc = -EBUSY;
2252         unsigned long flags;
2253         u16 status;
2254
2255         ENTER;
2256         spin_lock_irqsave(vhost->host->host_lock, flags);
2257         found_evt = NULL;
2258         list_for_each_entry(evt, &vhost->sent, queue) {
2259                 if (evt->cmnd && evt->cmnd->device == sdev) {
2260                         found_evt = evt;
2261                         break;
2262                 }
2263         }
2264
2265         if (!found_evt) {
2266                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2267                         sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2268                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2269                 return 0;
2270         }
2271
2272         if (vhost->logged_in) {
2273                 evt = ibmvfc_get_event(vhost);
2274                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2275
2276                 tmf = &evt->iu.tmf;
2277                 memset(tmf, 0, sizeof(*tmf));
2278                 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2279                         tmf->common.version = cpu_to_be32(2);
2280                         tmf->target_wwpn = cpu_to_be64(rport->port_name);
2281                 } else {
2282                         tmf->common.version = cpu_to_be32(1);
2283                 }
2284                 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2285                 tmf->common.length = cpu_to_be16(sizeof(*tmf));
2286                 tmf->scsi_id = cpu_to_be64(rport->port_id);
2287                 int_to_scsilun(sdev->lun, &tmf->lun);
2288                 if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2289                         type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2290                 if (vhost->state == IBMVFC_ACTIVE)
2291                         tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2292                 else
2293                         tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2294                 tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2295                 tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2296
2297                 evt->sync_iu = &rsp;
2298                 init_completion(&evt->comp);
2299                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2300         }
2301
2302         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2303
2304         if (rsp_rc != 0) {
2305                 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2306                 /* If failure is received, the host adapter is most likely going
2307                  through reset, return success so the caller will wait for the command
2308                  being cancelled to get returned */
2309                 return 0;
2310         }
2311
2312         sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2313
2314         wait_for_completion(&evt->comp);
2315         status = be16_to_cpu(rsp.mad_common.status);
2316         spin_lock_irqsave(vhost->host->host_lock, flags);
2317         ibmvfc_free_event(evt);
2318         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2319
2320         if (status != IBMVFC_MAD_SUCCESS) {
2321                 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2322                 switch (status) {
2323                 case IBMVFC_MAD_DRIVER_FAILED:
2324                 case IBMVFC_MAD_CRQ_ERROR:
2325                         /* Host adapter most likely going through reset, return success to
2326                          the caller will wait for the command being cancelled to get returned */
2327                         return 0;
2328                 default:
2329                         return -EIO;
2330                 };
2331         }
2332
2333         sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2334         return 0;
2335 }
2336
2337 /**
2338  * ibmvfc_match_key - Match function for specified cancel key
2339  * @evt:        ibmvfc event struct
2340  * @key:        cancel key to match
2341  *
2342  * Returns:
2343  *      1 if event matches key / 0 if event does not match key
2344  **/
2345 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2346 {
2347         unsigned long cancel_key = (unsigned long)key;
2348
2349         if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2350             be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2351                 return 1;
2352         return 0;
2353 }
2354
2355 /**
2356  * ibmvfc_match_evt - Match function for specified event
2357  * @evt:        ibmvfc event struct
2358  * @match:      event to match
2359  *
2360  * Returns:
2361  *      1 if event matches key / 0 if event does not match key
2362  **/
2363 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2364 {
2365         if (evt == match)
2366                 return 1;
2367         return 0;
2368 }
2369
2370 /**
2371  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2372  * @sdev:       scsi device to abort commands
2373  *
2374  * This sends an Abort Task Set to the VIOS for the specified device. This does
2375  * NOT send any cancel to the VIOS. That must be done separately.
2376  *
2377  * Returns:
2378  *      0 on success / other on failure
2379  **/
2380 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2381 {
2382         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2383         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2384         struct ibmvfc_cmd *tmf;
2385         struct ibmvfc_event *evt, *found_evt;
2386         union ibmvfc_iu rsp_iu;
2387         struct ibmvfc_fcp_cmd_iu *iu;
2388         struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2389         int rc, rsp_rc = -EBUSY;
2390         unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2391         int rsp_code = 0;
2392
2393         spin_lock_irqsave(vhost->host->host_lock, flags);
2394         found_evt = NULL;
2395         list_for_each_entry(evt, &vhost->sent, queue) {
2396                 if (evt->cmnd && evt->cmnd->device == sdev) {
2397                         found_evt = evt;
2398                         break;
2399                 }
2400         }
2401
2402         if (!found_evt) {
2403                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2404                         sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2405                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2406                 return 0;
2407         }
2408
2409         if (vhost->state == IBMVFC_ACTIVE) {
2410                 evt = ibmvfc_get_event(vhost);
2411                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2412                 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2413                 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2414
2415                 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2416                         tmf->target_wwpn = cpu_to_be64(rport->port_name);
2417                 iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2418                 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2419                 evt->sync_iu = &rsp_iu;
2420
2421                 tmf->correlation = cpu_to_be64(evt);
2422
2423                 init_completion(&evt->comp);
2424                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2425         }
2426
2427         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2428
2429         if (rsp_rc != 0) {
2430                 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2431                 return -EIO;
2432         }
2433
2434         sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2435         timeout = wait_for_completion_timeout(&evt->comp, timeout);
2436
2437         if (!timeout) {
2438                 rc = ibmvfc_cancel_all(sdev, 0);
2439                 if (!rc) {
2440                         rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2441                         if (rc == SUCCESS)
2442                                 rc = 0;
2443                 }
2444
2445                 if (rc) {
2446                         sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2447                         ibmvfc_reset_host(vhost);
2448                         rsp_rc = -EIO;
2449                         rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2450
2451                         if (rc == SUCCESS)
2452                                 rsp_rc = 0;
2453
2454                         rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2455                         if (rc != SUCCESS) {
2456                                 spin_lock_irqsave(vhost->host->host_lock, flags);
2457                                 ibmvfc_hard_reset_host(vhost);
2458                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2459                                 rsp_rc = 0;
2460                         }
2461
2462                         goto out;
2463                 }
2464         }
2465
2466         if (rsp_iu.cmd.status)
2467                 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2468
2469         if (rsp_code) {
2470                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2471                         rsp_code = fc_rsp->data.info.rsp_code;
2472
2473                 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2474                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2475                             ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2476                             be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2477                             fc_rsp->scsi_status);
2478                 rsp_rc = -EIO;
2479         } else
2480                 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2481
2482 out:
2483         spin_lock_irqsave(vhost->host->host_lock, flags);
2484         ibmvfc_free_event(evt);
2485         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2486         return rsp_rc;
2487 }
2488
2489 /**
2490  * ibmvfc_eh_abort_handler - Abort a command
2491  * @cmd:        scsi command to abort
2492  *
2493  * Returns:
2494  *      SUCCESS / FAST_IO_FAIL / FAILED
2495  **/
2496 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2497 {
2498         struct scsi_device *sdev = cmd->device;
2499         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2500         int cancel_rc, block_rc;
2501         int rc = FAILED;
2502
2503         ENTER;
2504         block_rc = fc_block_scsi_eh(cmd);
2505         ibmvfc_wait_while_resetting(vhost);
2506         if (block_rc != FAST_IO_FAIL) {
2507                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2508                 ibmvfc_abort_task_set(sdev);
2509         } else
2510                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2511
2512         if (!cancel_rc)
2513                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2514
2515         if (block_rc == FAST_IO_FAIL && rc != FAILED)
2516                 rc = FAST_IO_FAIL;
2517
2518         LEAVE;
2519         return rc;
2520 }
2521
2522 /**
2523  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2524  * @cmd:        scsi command struct
2525  *
2526  * Returns:
2527  *      SUCCESS / FAST_IO_FAIL / FAILED
2528  **/
2529 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2530 {
2531         struct scsi_device *sdev = cmd->device;
2532         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2533         int cancel_rc, block_rc, reset_rc = 0;
2534         int rc = FAILED;
2535
2536         ENTER;
2537         block_rc = fc_block_scsi_eh(cmd);
2538         ibmvfc_wait_while_resetting(vhost);
2539         if (block_rc != FAST_IO_FAIL) {
2540                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2541                 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2542         } else
2543                 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2544
2545         if (!cancel_rc && !reset_rc)
2546                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2547
2548         if (block_rc == FAST_IO_FAIL && rc != FAILED)
2549                 rc = FAST_IO_FAIL;
2550
2551         LEAVE;
2552         return rc;
2553 }
2554
2555 /**
2556  * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2557  * @sdev:       scsi device struct
2558  * @data:       return code
2559  *
2560  **/
2561 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2562 {
2563         unsigned long *rc = data;
2564         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2565 }
2566
2567 /**
2568  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2569  * @sdev:       scsi device struct
2570  * @data:       return code
2571  *
2572  **/
2573 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2574 {
2575         unsigned long *rc = data;
2576         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2577 }
2578
2579 /**
2580  * ibmvfc_eh_target_reset_handler - Reset the target
2581  * @cmd:        scsi command struct
2582  *
2583  * Returns:
2584  *      SUCCESS / FAST_IO_FAIL / FAILED
2585  **/
2586 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2587 {
2588         struct scsi_device *sdev = cmd->device;
2589         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2590         struct scsi_target *starget = scsi_target(sdev);
2591         int block_rc;
2592         int reset_rc = 0;
2593         int rc = FAILED;
2594         unsigned long cancel_rc = 0;
2595
2596         ENTER;
2597         block_rc = fc_block_scsi_eh(cmd);
2598         ibmvfc_wait_while_resetting(vhost);
2599         if (block_rc != FAST_IO_FAIL) {
2600                 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2601                 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2602         } else
2603                 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2604
2605         if (!cancel_rc && !reset_rc)
2606                 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2607
2608         if (block_rc == FAST_IO_FAIL && rc != FAILED)
2609                 rc = FAST_IO_FAIL;
2610
2611         LEAVE;
2612         return rc;
2613 }
2614
2615 /**
2616  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2617  * @cmd:        struct scsi_cmnd having problems
2618  *
2619  **/
2620 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2621 {
2622         int rc;
2623         struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2624
2625         dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2626         rc = ibmvfc_issue_fc_host_lip(vhost->host);
2627
2628         return rc ? FAILED : SUCCESS;
2629 }
2630
2631 /**
2632  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2633  * @rport:              rport struct
2634  *
2635  * Return value:
2636  *      none
2637  **/
2638 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2639 {
2640         struct Scsi_Host *shost = rport_to_shost(rport);
2641         struct ibmvfc_host *vhost = shost_priv(shost);
2642         struct fc_rport *dev_rport;
2643         struct scsi_device *sdev;
2644         struct ibmvfc_target *tgt;
2645         unsigned long rc, flags;
2646         unsigned int found;
2647
2648         ENTER;
2649         shost_for_each_device(sdev, shost) {
2650                 dev_rport = starget_to_rport(scsi_target(sdev));
2651                 if (dev_rport != rport)
2652                         continue;
2653                 ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2654         }
2655
2656         rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
2657
2658         if (rc == FAILED)
2659                 ibmvfc_issue_fc_host_lip(shost);
2660
2661         spin_lock_irqsave(shost->host_lock, flags);
2662         found = 0;
2663         list_for_each_entry(tgt, &vhost->targets, queue) {
2664                 if (tgt->scsi_id == rport->port_id) {
2665                         found++;
2666                         break;
2667                 }
2668         }
2669
2670         if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
2671                 /*
2672                  * If we get here, that means we previously attempted to send
2673                  * an implicit logout to the target but it failed, most likely
2674                  * due to I/O being pending, so we need to send it again
2675                  */
2676                 ibmvfc_del_tgt(tgt);
2677                 ibmvfc_reinit_host(vhost);
2678         }
2679
2680         spin_unlock_irqrestore(shost->host_lock, flags);
2681         LEAVE;
2682 }
2683
2684 static const struct ibmvfc_async_desc ae_desc [] = {
2685         { "PLOGI",      IBMVFC_AE_ELS_PLOGI,    IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2686         { "LOGO",       IBMVFC_AE_ELS_LOGO,     IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2687         { "PRLO",       IBMVFC_AE_ELS_PRLO,     IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2688         { "N-Port SCN", IBMVFC_AE_SCN_NPORT,    IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2689         { "Group SCN",  IBMVFC_AE_SCN_GROUP,    IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2690         { "Domain SCN", IBMVFC_AE_SCN_DOMAIN,   IBMVFC_DEFAULT_LOG_LEVEL },
2691         { "Fabric SCN", IBMVFC_AE_SCN_FABRIC,   IBMVFC_DEFAULT_LOG_LEVEL },
2692         { "Link Up",    IBMVFC_AE_LINK_UP,      IBMVFC_DEFAULT_LOG_LEVEL },
2693         { "Link Down",  IBMVFC_AE_LINK_DOWN,    IBMVFC_DEFAULT_LOG_LEVEL },
2694         { "Link Dead",  IBMVFC_AE_LINK_DEAD,    IBMVFC_DEFAULT_LOG_LEVEL },
2695         { "Halt",       IBMVFC_AE_HALT,         IBMVFC_DEFAULT_LOG_LEVEL },
2696         { "Resume",     IBMVFC_AE_RESUME,       IBMVFC_DEFAULT_LOG_LEVEL },
2697         { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
2698 };
2699
2700 static const struct ibmvfc_async_desc unknown_ae = {
2701         "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
2702 };
2703
2704 /**
2705  * ibmvfc_get_ae_desc - Get text description for async event
2706  * @ae: async event
2707  *
2708  **/
2709 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
2710 {
2711         int i;
2712
2713         for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2714                 if (ae_desc[i].ae == ae)
2715                         return &ae_desc[i];
2716
2717         return &unknown_ae;
2718 }
2719
2720 static const struct {
2721         enum ibmvfc_ae_link_state state;
2722         const char *desc;
2723 } link_desc [] = {
2724         { IBMVFC_AE_LS_LINK_UP,         " link up" },
2725         { IBMVFC_AE_LS_LINK_BOUNCED,    " link bounced" },
2726         { IBMVFC_AE_LS_LINK_DOWN,       " link down" },
2727         { IBMVFC_AE_LS_LINK_DEAD,       " link dead" },
2728 };
2729
2730 /**
2731  * ibmvfc_get_link_state - Get text description for link state
2732  * @state:      link state
2733  *
2734  **/
2735 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
2736 {
2737         int i;
2738
2739         for (i = 0; i < ARRAY_SIZE(link_desc); i++)
2740                 if (link_desc[i].state == state)
2741                         return link_desc[i].desc;
2742
2743         return "";
2744 }
2745
2746 /**
2747  * ibmvfc_handle_async - Handle an async event from the adapter
2748  * @crq:        crq to process
2749  * @vhost:      ibmvfc host struct
2750  *
2751  **/
2752 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2753                                 struct ibmvfc_host *vhost)
2754 {
2755         const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
2756         struct ibmvfc_target *tgt;
2757
2758         ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
2759                    " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
2760                    be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
2761                    ibmvfc_get_link_state(crq->link_state));
2762
2763         switch (be64_to_cpu(crq->event)) {
2764         case IBMVFC_AE_RESUME:
2765                 switch (crq->link_state) {
2766                 case IBMVFC_AE_LS_LINK_DOWN:
2767                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2768                         break;
2769                 case IBMVFC_AE_LS_LINK_DEAD:
2770                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2771                         break;
2772                 case IBMVFC_AE_LS_LINK_UP:
2773                 case IBMVFC_AE_LS_LINK_BOUNCED:
2774                 default:
2775                         vhost->events_to_log |= IBMVFC_AE_LINKUP;
2776                         vhost->delay_init = 1;
2777                         __ibmvfc_reset_host(vhost);
2778                         break;
2779                 }
2780
2781                 break;
2782         case IBMVFC_AE_LINK_UP:
2783                 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2784                 vhost->delay_init = 1;
2785                 __ibmvfc_reset_host(vhost);
2786                 break;
2787         case IBMVFC_AE_SCN_FABRIC:
2788         case IBMVFC_AE_SCN_DOMAIN:
2789                 vhost->events_to_log |= IBMVFC_AE_RSCN;
2790                 if (vhost->state < IBMVFC_HALTED) {
2791                         vhost->delay_init = 1;
2792                         __ibmvfc_reset_host(vhost);
2793                 }
2794                 break;
2795         case IBMVFC_AE_SCN_NPORT:
2796         case IBMVFC_AE_SCN_GROUP:
2797                 vhost->events_to_log |= IBMVFC_AE_RSCN;
2798                 ibmvfc_reinit_host(vhost);
2799                 break;
2800         case IBMVFC_AE_ELS_LOGO:
2801         case IBMVFC_AE_ELS_PRLO:
2802         case IBMVFC_AE_ELS_PLOGI:
2803                 list_for_each_entry(tgt, &vhost->targets, queue) {
2804                         if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2805                                 break;
2806                         if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
2807                                 continue;
2808                         if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
2809                                 continue;
2810                         if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
2811                                 continue;
2812                         if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
2813                                 tgt->logo_rcvd = 1;
2814                         if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
2815                                 ibmvfc_del_tgt(tgt);
2816                                 ibmvfc_reinit_host(vhost);
2817                         }
2818                 }
2819                 break;
2820         case IBMVFC_AE_LINK_DOWN:
2821         case IBMVFC_AE_ADAPTER_FAILED:
2822                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2823                 break;
2824         case IBMVFC_AE_LINK_DEAD:
2825                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2826                 break;
2827         case IBMVFC_AE_HALT:
2828                 ibmvfc_link_down(vhost, IBMVFC_HALTED);
2829                 break;
2830         default:
2831                 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
2832                 break;
2833         }
2834 }
2835
2836 /**
2837  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
2838  * @crq:        Command/Response queue
2839  * @vhost:      ibmvfc host struct
2840  *
2841  **/
2842 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2843 {
2844         long rc;
2845         struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
2846
2847         switch (crq->valid) {
2848         case IBMVFC_CRQ_INIT_RSP:
2849                 switch (crq->format) {
2850                 case IBMVFC_CRQ_INIT:
2851                         dev_info(vhost->dev, "Partner initialized\n");
2852                         /* Send back a response */
2853                         rc = ibmvfc_send_crq_init_complete(vhost);
2854                         if (rc == 0)
2855                                 ibmvfc_init_host(vhost);
2856                         else
2857                                 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2858                         break;
2859                 case IBMVFC_CRQ_INIT_COMPLETE:
2860                         dev_info(vhost->dev, "Partner initialization complete\n");
2861                         ibmvfc_init_host(vhost);
2862                         break;
2863                 default:
2864                         dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
2865                 }
2866                 return;
2867         case IBMVFC_CRQ_XPORT_EVENT:
2868                 vhost->state = IBMVFC_NO_CRQ;
2869                 vhost->logged_in = 0;
2870                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2871                 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2872                         /* We need to re-setup the interpartition connection */
2873                         dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
2874                         vhost->client_migrated = 1;
2875                         ibmvfc_purge_requests(vhost, DID_REQUEUE);
2876                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2877                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
2878                 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
2879                         dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
2880                         ibmvfc_purge_requests(vhost, DID_ERROR);
2881                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2882                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
2883                 } else {
2884                         dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
2885                 }
2886                 return;
2887         case IBMVFC_CRQ_CMD_RSP:
2888                 break;
2889         default:
2890                 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
2891                 return;
2892         }
2893
2894         if (crq->format == IBMVFC_ASYNC_EVENT)
2895                 return;
2896
2897         /* The only kind of payload CRQs we should get are responses to
2898          * things we send. Make sure this response is to something we
2899          * actually sent
2900          */
2901         if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2902                 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
2903                         crq->ioba);
2904                 return;
2905         }
2906
2907         if (unlikely(atomic_read(&evt->free))) {
2908                 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
2909                         crq->ioba);
2910                 return;
2911         }
2912
2913         del_timer(&evt->timer);
2914         list_del(&evt->queue);
2915         ibmvfc_trc_end(evt);
2916         evt->done(evt);
2917 }
2918
2919 /**
2920  * ibmvfc_scan_finished - Check if the device scan is done.
2921  * @shost:      scsi host struct
2922  * @time:       current elapsed time
2923  *
2924  * Returns:
2925  *      0 if scan is not done / 1 if scan is done
2926  **/
2927 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2928 {
2929         unsigned long flags;
2930         struct ibmvfc_host *vhost = shost_priv(shost);
2931         int done = 0;
2932
2933         spin_lock_irqsave(shost->host_lock, flags);
2934         if (time >= (init_timeout * HZ)) {
2935                 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
2936                          "continuing initialization\n", init_timeout);
2937                 done = 1;
2938         }
2939
2940         if (vhost->scan_complete)
2941                 done = 1;
2942         spin_unlock_irqrestore(shost->host_lock, flags);
2943         return done;
2944 }
2945
2946 /**
2947  * ibmvfc_slave_alloc - Setup the device's task set value
2948  * @sdev:       struct scsi_device device to configure
2949  *
2950  * Set the device's task set value so that error handling works as
2951  * expected.
2952  *
2953  * Returns:
2954  *      0 on success / -ENXIO if device does not exist
2955  **/
2956 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2957 {
2958         struct Scsi_Host *shost = sdev->host;
2959         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2960         struct ibmvfc_host *vhost = shost_priv(shost);
2961         unsigned long flags = 0;
2962
2963         if (!rport || fc_remote_port_chkready(rport))
2964                 return -ENXIO;
2965
2966         spin_lock_irqsave(shost->host_lock, flags);
2967         sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
2968         spin_unlock_irqrestore(shost->host_lock, flags);
2969         return 0;
2970 }
2971
2972 /**
2973  * ibmvfc_target_alloc - Setup the target's task set value
2974  * @starget:    struct scsi_target
2975  *
2976  * Set the target's task set value so that error handling works as
2977  * expected.
2978  *
2979  * Returns:
2980  *      0 on success / -ENXIO if device does not exist
2981  **/
2982 static int ibmvfc_target_alloc(struct scsi_target *starget)
2983 {
2984         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2985         struct ibmvfc_host *vhost = shost_priv(shost);
2986         unsigned long flags = 0;
2987
2988         spin_lock_irqsave(shost->host_lock, flags);
2989         starget->hostdata = (void *)(unsigned long)vhost->task_set++;
2990         spin_unlock_irqrestore(shost->host_lock, flags);
2991         return 0;
2992 }
2993
2994 /**
2995  * ibmvfc_slave_configure - Configure the device
2996  * @sdev:       struct scsi_device device to configure
2997  *
2998  * Enable allow_restart for a device if it is a disk. Adjust the
2999  * queue_depth here also.
3000  *
3001  * Returns:
3002  *      0
3003  **/
3004 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3005 {
3006         struct Scsi_Host *shost = sdev->host;
3007         unsigned long flags = 0;
3008
3009         spin_lock_irqsave(shost->host_lock, flags);
3010         if (sdev->type == TYPE_DISK)
3011                 sdev->allow_restart = 1;
3012         spin_unlock_irqrestore(shost->host_lock, flags);
3013         return 0;
3014 }
3015
3016 /**
3017  * ibmvfc_change_queue_depth - Change the device's queue depth
3018  * @sdev:       scsi device struct
3019  * @qdepth:     depth to set
3020  * @reason:     calling context
3021  *
3022  * Return value:
3023  *      actual depth set
3024  **/
3025 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3026 {
3027         if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3028                 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3029
3030         return scsi_change_queue_depth(sdev, qdepth);
3031 }
3032
3033 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3034                                                  struct device_attribute *attr, char *buf)
3035 {
3036         struct Scsi_Host *shost = class_to_shost(dev);
3037         struct ibmvfc_host *vhost = shost_priv(shost);
3038
3039         return snprintf(buf, PAGE_SIZE, "%s\n",
3040                         vhost->login_buf->resp.partition_name);
3041 }
3042
3043 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3044                                             struct device_attribute *attr, char *buf)
3045 {
3046         struct Scsi_Host *shost = class_to_shost(dev);
3047         struct ibmvfc_host *vhost = shost_priv(shost);
3048
3049         return snprintf(buf, PAGE_SIZE, "%s\n",
3050                         vhost->login_buf->resp.device_name);
3051 }
3052
3053 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3054                                          struct device_attribute *attr, char *buf)
3055 {
3056         struct Scsi_Host *shost = class_to_shost(dev);
3057         struct ibmvfc_host *vhost = shost_priv(shost);
3058
3059         return snprintf(buf, PAGE_SIZE, "%s\n",
3060                         vhost->login_buf->resp.port_loc_code);
3061 }
3062
3063 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3064                                          struct device_attribute *attr, char *buf)
3065 {
3066         struct Scsi_Host *shost = class_to_shost(dev);
3067         struct ibmvfc_host *vhost = shost_priv(shost);
3068
3069         return snprintf(buf, PAGE_SIZE, "%s\n",
3070                         vhost->login_buf->resp.drc_name);
3071 }
3072
3073 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3074                                              struct device_attribute *attr, char *buf)
3075 {
3076         struct Scsi_Host *shost = class_to_shost(dev);
3077         struct ibmvfc_host *vhost = shost_priv(shost);
3078         return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3079 }
3080
3081 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3082                                              struct device_attribute *attr, char *buf)
3083 {
3084         struct Scsi_Host *shost = class_to_shost(dev);
3085         struct ibmvfc_host *vhost = shost_priv(shost);
3086         return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3087 }
3088
3089 /**
3090  * ibmvfc_show_log_level - Show the adapter's error logging level
3091  * @dev:        class device struct
3092  * @buf:        buffer
3093  *
3094  * Return value:
3095  *      number of bytes printed to buffer
3096  **/
3097 static ssize_t ibmvfc_show_log_level(struct device *dev,
3098                                      struct device_attribute *attr, char *buf)
3099 {
3100         struct Scsi_Host *shost = class_to_shost(dev);
3101         struct ibmvfc_host *vhost = shost_priv(shost);
3102         unsigned long flags = 0;
3103         int len;
3104
3105         spin_lock_irqsave(shost->host_lock, flags);
3106         len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3107         spin_unlock_irqrestore(shost->host_lock, flags);
3108         return len;
3109 }
3110
3111 /**
3112  * ibmvfc_store_log_level - Change the adapter's error logging level
3113  * @dev:        class device struct
3114  * @buf:        buffer
3115  *
3116  * Return value:
3117  *      number of bytes printed to buffer
3118  **/
3119 static ssize_t ibmvfc_store_log_level(struct device *dev,
3120                                       struct device_attribute *attr,
3121                                       const char *buf, size_t count)
3122 {
3123         struct Scsi_Host *shost = class_to_shost(dev);
3124         struct ibmvfc_host *vhost = shost_priv(shost);
3125         unsigned long flags = 0;
3126
3127         spin_lock_irqsave(shost->host_lock, flags);
3128         vhost->log_level = simple_strtoul(buf, NULL, 10);
3129         spin_unlock_irqrestore(shost->host_lock, flags);
3130         return strlen(buf);
3131 }
3132
3133 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3134 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3135 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3136 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3137 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3138 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3139 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3140                    ibmvfc_show_log_level, ibmvfc_store_log_level);
3141
3142 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3143 /**
3144  * ibmvfc_read_trace - Dump the adapter trace
3145  * @filp:               open sysfs file
3146  * @kobj:               kobject struct
3147  * @bin_attr:   bin_attribute struct
3148  * @buf:                buffer
3149  * @off:                offset
3150  * @count:              buffer size
3151  *
3152  * Return value:
3153  *      number of bytes printed to buffer
3154  **/
3155 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3156                                  struct bin_attribute *bin_attr,
3157                                  char *buf, loff_t off, size_t count)
3158 {
3159         struct device *dev = container_of(kobj, struct device, kobj);
3160         struct Scsi_Host *shost = class_to_shost(dev);
3161         struct ibmvfc_host *vhost = shost_priv(shost);
3162         unsigned long flags = 0;
3163         int size = IBMVFC_TRACE_SIZE;
3164         char *src = (char *)vhost->trace;
3165
3166         if (off > size)
3167                 return 0;
3168         if (off + count > size) {
3169                 size -= off;
3170                 count = size;
3171         }
3172
3173         spin_lock_irqsave(shost->host_lock, flags);
3174         memcpy(buf, &src[off], count);
3175         spin_unlock_irqrestore(shost->host_lock, flags);
3176         return count;
3177 }
3178
3179 static struct bin_attribute ibmvfc_trace_attr = {
3180         .attr = {
3181                 .name = "trace",
3182                 .mode = S_IRUGO,
3183         },
3184         .size = 0,
3185         .read = ibmvfc_read_trace,
3186 };
3187 #endif
3188
3189 static struct device_attribute *ibmvfc_attrs[] = {
3190         &dev_attr_partition_name,
3191         &dev_attr_device_name,
3192         &dev_attr_port_loc_code,
3193         &dev_attr_drc_name,
3194         &dev_attr_npiv_version,
3195         &dev_attr_capabilities,
3196         &dev_attr_log_level,
3197         NULL
3198 };
3199
3200 static struct scsi_host_template driver_template = {
3201         .module = THIS_MODULE,
3202         .name = "IBM POWER Virtual FC Adapter",
3203         .proc_name = IBMVFC_NAME,
3204         .queuecommand = ibmvfc_queuecommand,
3205         .eh_timed_out = fc_eh_timed_out,
3206         .eh_abort_handler = ibmvfc_eh_abort_handler,
3207         .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3208         .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3209         .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3210         .slave_alloc = ibmvfc_slave_alloc,
3211         .slave_configure = ibmvfc_slave_configure,
3212         .target_alloc = ibmvfc_target_alloc,
3213         .scan_finished = ibmvfc_scan_finished,
3214         .change_queue_depth = ibmvfc_change_queue_depth,
3215         .cmd_per_lun = 16,
3216         .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3217         .this_id = -1,
3218         .sg_tablesize = SG_ALL,
3219         .max_sectors = IBMVFC_MAX_SECTORS,
3220         .shost_attrs = ibmvfc_attrs,
3221         .track_queue_depth = 1,
3222 };
3223
3224 /**
3225  * ibmvfc_next_async_crq - Returns the next entry in async queue
3226  * @vhost:      ibmvfc host struct
3227  *
3228  * Returns:
3229  *      Pointer to next entry in queue / NULL if empty
3230  **/
3231 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3232 {
3233         struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
3234         struct ibmvfc_async_crq *crq;
3235
3236         crq = &async_crq->msgs[async_crq->cur];
3237         if (crq->valid & 0x80) {
3238                 if (++async_crq->cur == async_crq->size)
3239                         async_crq->cur = 0;
3240                 rmb();
3241         } else
3242                 crq = NULL;
3243
3244         return crq;
3245 }
3246
3247 /**
3248  * ibmvfc_next_crq - Returns the next entry in message queue
3249  * @vhost:      ibmvfc host struct
3250  *
3251  * Returns:
3252  *      Pointer to next entry in queue / NULL if empty
3253  **/
3254 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3255 {
3256         struct ibmvfc_crq_queue *queue = &vhost->crq;
3257         struct ibmvfc_crq *crq;
3258
3259         crq = &queue->msgs[queue->cur];
3260         if (crq->valid & 0x80) {
3261                 if (++queue->cur == queue->size)
3262                         queue->cur = 0;
3263                 rmb();
3264         } else
3265                 crq = NULL;
3266
3267         return crq;
3268 }
3269
3270 /**
3271  * ibmvfc_interrupt - Interrupt handler
3272  * @irq:                number of irq to handle, not used
3273  * @dev_instance: ibmvfc_host that received interrupt
3274  *
3275  * Returns:
3276  *      IRQ_HANDLED
3277  **/
3278 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3279 {
3280         struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3281         unsigned long flags;
3282
3283         spin_lock_irqsave(vhost->host->host_lock, flags);
3284         vio_disable_interrupts(to_vio_dev(vhost->dev));
3285         tasklet_schedule(&vhost->tasklet);
3286         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3287         return IRQ_HANDLED;
3288 }
3289
3290 /**
3291  * ibmvfc_tasklet - Interrupt handler tasklet
3292  * @data:               ibmvfc host struct
3293  *
3294  * Returns:
3295  *      Nothing
3296  **/
3297 static void ibmvfc_tasklet(void *data)
3298 {
3299         struct ibmvfc_host *vhost = data;
3300         struct vio_dev *vdev = to_vio_dev(vhost->dev);
3301         struct ibmvfc_crq *crq;
3302         struct ibmvfc_async_crq *async;
3303         unsigned long flags;
3304         int done = 0;
3305
3306         spin_lock_irqsave(vhost->host->host_lock, flags);
3307         while (!done) {
3308                 /* Pull all the valid messages off the async CRQ */
3309                 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3310                         ibmvfc_handle_async(async, vhost);
3311                         async->valid = 0;
3312                         wmb();
3313                 }
3314
3315                 /* Pull all the valid messages off the CRQ */
3316                 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3317                         ibmvfc_handle_crq(crq, vhost);
3318                         crq->valid = 0;
3319                         wmb();
3320                 }
3321
3322                 vio_enable_interrupts(vdev);
3323                 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3324                         vio_disable_interrupts(vdev);
3325                         ibmvfc_handle_async(async, vhost);
3326                         async->valid = 0;
3327                         wmb();
3328                 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3329                         vio_disable_interrupts(vdev);
3330                         ibmvfc_handle_crq(crq, vhost);
3331                         crq->valid = 0;
3332                         wmb();
3333                 } else
3334                         done = 1;
3335         }
3336
3337         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3338 }
3339
3340 /**
3341  * ibmvfc_init_tgt - Set the next init job step for the target
3342  * @tgt:                ibmvfc target struct
3343  * @job_step:   job step to perform
3344  *
3345  **/
3346 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3347                             void (*job_step) (struct ibmvfc_target *))
3348 {
3349         if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3350                 tgt->job_step = job_step;
3351         wake_up(&tgt->vhost->work_wait_q);
3352 }
3353
3354 /**
3355  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3356  * @tgt:                ibmvfc target struct
3357  * @job_step:   initialization job step
3358  *
3359  * Returns: 1 if step will be retried / 0 if not
3360  *
3361  **/
3362 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3363                                   void (*job_step) (struct ibmvfc_target *))
3364 {
3365         if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3366                 ibmvfc_del_tgt(tgt);
3367                 wake_up(&tgt->vhost->work_wait_q);
3368                 return 0;
3369         } else
3370                 ibmvfc_init_tgt(tgt, job_step);
3371         return 1;
3372 }
3373
3374 /* Defined in FC-LS */
3375 static const struct {
3376         int code;
3377         int retry;
3378         int logged_in;
3379 } prli_rsp [] = {
3380         { 0, 1, 0 },
3381         { 1, 0, 1 },
3382         { 2, 1, 0 },
3383         { 3, 1, 0 },
3384         { 4, 0, 0 },
3385         { 5, 0, 0 },
3386         { 6, 0, 1 },
3387         { 7, 0, 0 },
3388         { 8, 1, 0 },
3389 };
3390
3391 /**
3392  * ibmvfc_get_prli_rsp - Find PRLI response index
3393  * @flags:      PRLI response flags
3394  *
3395  **/
3396 static int ibmvfc_get_prli_rsp(u16 flags)
3397 {
3398         int i;
3399         int code = (flags & 0x0f00) >> 8;
3400
3401         for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3402                 if (prli_rsp[i].code == code)
3403                         return i;
3404
3405         return 0;
3406 }
3407
3408 /**
3409  * ibmvfc_tgt_prli_done - Completion handler for Process Login
3410  * @evt:        ibmvfc event struct
3411  *
3412  **/
3413 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3414 {
3415         struct ibmvfc_target *tgt = evt->tgt;
3416         struct ibmvfc_host *vhost = evt->vhost;
3417         struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3418         struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3419         u32 status = be16_to_cpu(rsp->common.status);
3420         int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3421
3422         vhost->discovery_threads--;
3423         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3424         switch (status) {
3425         case IBMVFC_MAD_SUCCESS:
3426                 tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3427                         parms->type, parms->flags, parms->service_parms);
3428
3429                 if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3430                         index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
3431                         if (prli_rsp[index].logged_in) {
3432                                 if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
3433                                         tgt->need_login = 0;
3434                                         tgt->ids.roles = 0;
3435                                         if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
3436                                                 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3437                                         if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
3438                                                 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3439                                         tgt->add_rport = 1;
3440                                 } else
3441                                         ibmvfc_del_tgt(tgt);
3442                         } else if (prli_rsp[index].retry)
3443                                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3444                         else
3445                                 ibmvfc_del_tgt(tgt);
3446                 } else
3447                         ibmvfc_del_tgt(tgt);
3448                 break;
3449         case IBMVFC_MAD_DRIVER_FAILED:
3450                 break;
3451         case IBMVFC_MAD_CRQ_ERROR:
3452                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3453                 break;
3454         case IBMVFC_MAD_FAILED:
3455         default:
3456                 if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
3457                      be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
3458                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3459                 else if (tgt->logo_rcvd)
3460                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3461                 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3462                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3463                 else
3464                         ibmvfc_del_tgt(tgt);
3465
3466                 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3467                         ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3468                         be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3469                 break;
3470         }
3471
3472         kref_put(&tgt->kref, ibmvfc_release_tgt);
3473         ibmvfc_free_event(evt);
3474         wake_up(&vhost->work_wait_q);
3475 }
3476
3477 /**
3478  * ibmvfc_tgt_send_prli - Send a process login
3479  * @tgt:        ibmvfc target struct
3480  *
3481  **/
3482 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3483 {
3484         struct ibmvfc_process_login *prli;
3485         struct ibmvfc_host *vhost = tgt->vhost;
3486         struct ibmvfc_event *evt;
3487
3488         if (vhost->discovery_threads >= disc_threads)
3489                 return;
3490
3491         kref_get(&tgt->kref);
3492         evt = ibmvfc_get_event(vhost);
3493         vhost->discovery_threads++;
3494         ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
3495         evt->tgt = tgt;
3496         prli = &evt->iu.prli;
3497         memset(prli, 0, sizeof(*prli));
3498         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
3499                 prli->common.version = cpu_to_be32(2);
3500                 prli->target_wwpn = cpu_to_be64(tgt->wwpn);
3501         } else {
3502                 prli->common.version = cpu_to_be32(1);
3503         }
3504         prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
3505         prli->common.length = cpu_to_be16(sizeof(*prli));
3506         prli->scsi_id = cpu_to_be64(tgt->scsi_id);
3507
3508         prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
3509         prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
3510         prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
3511         prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
3512
3513         if (cls3_error)
3514                 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
3515
3516         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3517         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3518                 vhost->discovery_threads--;
3519                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3520                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3521         } else
3522                 tgt_dbg(tgt, "Sent process login\n");
3523 }
3524
3525 /**
3526  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
3527  * @evt:        ibmvfc event struct
3528  *
3529  **/
3530 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3531 {
3532         struct ibmvfc_target *tgt = evt->tgt;
3533         struct ibmvfc_host *vhost = evt->vhost;
3534         struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
3535         u32 status = be16_to_cpu(rsp->common.status);
3536         int level = IBMVFC_DEFAULT_LOG_LEVEL;
3537
3538         vhost->discovery_threads--;
3539         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3540         switch (status) {
3541         case IBMVFC_MAD_SUCCESS:
3542                 tgt_dbg(tgt, "Port Login succeeded\n");
3543                 if (tgt->ids.port_name &&
3544                     tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
3545                         vhost->reinit = 1;
3546                         tgt_dbg(tgt, "Port re-init required\n");
3547                         break;
3548                 }
3549                 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
3550                 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
3551                 tgt->ids.port_id = tgt->scsi_id;
3552                 memcpy(&tgt->service_parms, &rsp->service_parms,
3553                        sizeof(tgt->service_parms));
3554                 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
3555                        sizeof(tgt->service_parms_change));
3556                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3557                 break;
3558         case IBMVFC_MAD_DRIVER_FAILED:
3559                 break;
3560         case IBMVFC_MAD_CRQ_ERROR:
3561                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3562                 break;
3563         case IBMVFC_MAD_FAILED:
3564         default:
3565                 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3566                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3567                 else
3568                         ibmvfc_del_tgt(tgt);
3569
3570                 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3571                         ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3572                                              be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
3573                         ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
3574                         ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
3575                 break;
3576         }
3577
3578         kref_put(&tgt->kref, ibmvfc_release_tgt);
3579         ibmvfc_free_event(evt);
3580         wake_up(&vhost->work_wait_q);
3581 }
3582
3583 /**
3584  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
3585  * @tgt:        ibmvfc target struct
3586  *
3587  **/
3588 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
3589 {
3590         struct ibmvfc_port_login *plogi;
3591         struct ibmvfc_host *vhost = tgt->vhost;
3592         struct ibmvfc_event *evt;
3593
3594         if (vhost->discovery_threads >= disc_threads)
3595                 return;
3596
3597         kref_get(&tgt->kref);
3598         tgt->logo_rcvd = 0;
3599         evt = ibmvfc_get_event(vhost);
3600         vhost->discovery_threads++;
3601         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3602         ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
3603         evt->tgt = tgt;
3604         plogi = &evt->iu.plogi;
3605         memset(plogi, 0, sizeof(*plogi));
3606         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
3607                 plogi->common.version = cpu_to_be32(2);
3608                 plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
3609         } else {
3610                 plogi->common.version = cpu_to_be32(1);
3611         }
3612         plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
3613         plogi->common.length = cpu_to_be16(sizeof(*plogi));
3614         plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
3615
3616         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3617                 vhost->discovery_threads--;
3618                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3619                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3620         } else
3621                 tgt_dbg(tgt, "Sent port login\n");
3622 }
3623
3624 /**
3625  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
3626  * @evt:        ibmvfc event struct
3627  *
3628  **/
3629 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
3630 {
3631         struct ibmvfc_target *tgt = evt->tgt;
3632         struct ibmvfc_host *vhost = evt->vhost;
3633         struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
3634         u32 status = be16_to_cpu(rsp->common.status);
3635
3636         vhost->discovery_threads--;
3637         ibmvfc_free_event(evt);
3638         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3639
3640         switch (status) {
3641         case IBMVFC_MAD_SUCCESS:
3642                 tgt_dbg(tgt, "Implicit Logout succeeded\n");
3643                 break;
3644         case IBMVFC_MAD_DRIVER_FAILED:
3645                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3646                 wake_up(&vhost->work_wait_q);
3647                 return;
3648         case IBMVFC_MAD_FAILED:
3649         default:
3650                 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
3651                 break;
3652         }
3653
3654         ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
3655         kref_put(&tgt->kref, ibmvfc_release_tgt);
3656         wake_up(&vhost->work_wait_q);
3657 }
3658
3659 /**
3660  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
3661  * @tgt:                ibmvfc target struct
3662  *
3663  * Returns:
3664  *      Allocated and initialized ibmvfc_event struct
3665  **/
3666 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
3667                                                                  void (*done) (struct ibmvfc_event *))
3668 {
3669         struct ibmvfc_implicit_logout *mad;
3670         struct ibmvfc_host *vhost = tgt->vhost;
3671         struct ibmvfc_event *evt;
3672
3673         kref_get(&tgt->kref);
3674         evt = ibmvfc_get_event(vhost);
3675         ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
3676         evt->tgt = tgt;
3677         mad = &evt->iu.implicit_logout;
3678         memset(mad, 0, sizeof(*mad));
3679         mad->common.version = cpu_to_be32(1);
3680         mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
3681         mad->common.length = cpu_to_be16(sizeof(*mad));
3682         mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
3683         return evt;
3684 }
3685
3686 /**
3687  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
3688  * @tgt:                ibmvfc target struct
3689  *
3690  **/
3691 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
3692 {
3693         struct ibmvfc_host *vhost = tgt->vhost;
3694         struct ibmvfc_event *evt;
3695
3696         if (vhost->discovery_threads >= disc_threads)
3697                 return;
3698
3699         vhost->discovery_threads++;
3700         evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
3701                                                    ibmvfc_tgt_implicit_logout_done);
3702
3703         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3704         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3705                 vhost->discovery_threads--;
3706                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3707                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3708         } else
3709                 tgt_dbg(tgt, "Sent Implicit Logout\n");
3710 }
3711
3712 /**
3713  * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
3714  * @evt:        ibmvfc event struct
3715  *
3716  **/
3717 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
3718 {
3719         struct ibmvfc_target *tgt = evt->tgt;
3720         struct ibmvfc_host *vhost = evt->vhost;
3721         struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3722         u32 status = be16_to_cpu(mad->common.status);
3723
3724         vhost->discovery_threads--;
3725         ibmvfc_free_event(evt);
3726
3727         /*
3728          * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
3729          * driver in which case we need to free up all the targets. If we are
3730          * not unloading, we will still go through a hard reset to get out of
3731          * offline state, so there is no need to track the old targets in that
3732          * case.
3733          */
3734         if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
3735                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3736         else
3737                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
3738
3739         tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
3740         kref_put(&tgt->kref, ibmvfc_release_tgt);
3741         wake_up(&vhost->work_wait_q);
3742 }
3743
3744 /**
3745  * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
3746  * @tgt:                ibmvfc target struct
3747  *
3748  **/
3749 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
3750 {
3751         struct ibmvfc_host *vhost = tgt->vhost;
3752         struct ibmvfc_event *evt;
3753
3754         if (!vhost->logged_in) {
3755                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3756                 return;
3757         }
3758
3759         if (vhost->discovery_threads >= disc_threads)
3760                 return;
3761
3762         vhost->discovery_threads++;
3763         evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
3764                                                    ibmvfc_tgt_implicit_logout_and_del_done);
3765
3766         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
3767         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3768                 vhost->discovery_threads--;
3769                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3770                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3771         } else
3772                 tgt_dbg(tgt, "Sent Implicit Logout\n");
3773 }
3774
3775 /**
3776  * ibmvfc_tgt_move_login_done - Completion handler for Move Login
3777  * @evt:        ibmvfc event struct
3778  *
3779  **/
3780 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
3781 {
3782         struct ibmvfc_target *tgt = evt->tgt;
3783         struct ibmvfc_host *vhost = evt->vhost;
3784         struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
3785         u32 status = be16_to_cpu(rsp->common.status);
3786         int level = IBMVFC_DEFAULT_LOG_LEVEL;
3787
3788         vhost->discovery_threads--;
3789         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3790         switch (status) {
3791         case IBMVFC_MAD_SUCCESS:
3792                 tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
3793                 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
3794                 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
3795                 tgt->ids.port_id = tgt->scsi_id;
3796                 memcpy(&tgt->service_parms, &rsp->service_parms,
3797                        sizeof(tgt->service_parms));
3798                 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
3799                        sizeof(tgt->service_parms_change));
3800                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3801                 break;
3802         case IBMVFC_MAD_DRIVER_FAILED:
3803                 break;
3804         case IBMVFC_MAD_CRQ_ERROR:
3805                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
3806                 break;
3807         case IBMVFC_MAD_FAILED:
3808         default:
3809                 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
3810
3811                 tgt_log(tgt, level,
3812                         "Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
3813                         tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
3814                         status);
3815                 break;
3816         }
3817
3818         kref_put(&tgt->kref, ibmvfc_release_tgt);
3819         ibmvfc_free_event(evt);
3820         wake_up(&vhost->work_wait_q);
3821 }
3822
3823
3824 /**
3825  * ibmvfc_tgt_move_login - Initiate a move login for specified target
3826  * @tgt:                ibmvfc target struct
3827  *
3828  **/
3829 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
3830 {
3831         struct ibmvfc_host *vhost = tgt->vhost;
3832         struct ibmvfc_move_login *move;
3833         struct ibmvfc_event *evt;
3834
3835         if (vhost->discovery_threads >= disc_threads)
3836                 return;
3837
3838         kref_get(&tgt->kref);
3839         evt = ibmvfc_get_event(vhost);
3840         vhost->discovery_threads++;
3841         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3842         ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
3843         evt->tgt = tgt;
3844         move = &evt->iu.move_login;
3845         memset(move, 0, sizeof(*move));
3846         move->common.version = cpu_to_be32(1);
3847         move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
3848         move->common.length = cpu_to_be16(sizeof(*move));
3849
3850         move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
3851         move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
3852         move->wwpn = cpu_to_be64(tgt->wwpn);
3853         move->node_name = cpu_to_be64(tgt->ids.node_name);
3854
3855         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3856                 vhost->discovery_threads--;
3857                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3858                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3859         } else
3860                 tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
3861 }
3862
3863 /**
3864  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
3865  * @mad:        ibmvfc passthru mad struct
3866  * @tgt:        ibmvfc target struct
3867  *
3868  * Returns:
3869  *      1 if PLOGI needed / 0 if PLOGI not needed
3870  **/
3871 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
3872                                     struct ibmvfc_target *tgt)
3873 {
3874         if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
3875                 return 1;
3876         if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
3877                 return 1;
3878         if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
3879                 return 1;
3880         return 0;
3881 }
3882
3883 /**
3884  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
3885  * @evt:        ibmvfc event struct
3886  *
3887  **/
3888 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3889 {
3890         struct ibmvfc_target *tgt = evt->tgt;
3891         struct ibmvfc_host *vhost = evt->vhost;
3892         struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3893         u32 status = be16_to_cpu(mad->common.status);
3894         u8 fc_reason, fc_explain;
3895
3896         vhost->discovery_threads--;
3897         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3898         del_timer(&tgt->timer);
3899
3900         switch (status) {
3901         case IBMVFC_MAD_SUCCESS:
3902                 tgt_dbg(tgt, "ADISC succeeded\n");
3903                 if (ibmvfc_adisc_needs_plogi(mad, tgt))
3904                         ibmvfc_del_tgt(tgt);
3905                 break;
3906         case IBMVFC_MAD_DRIVER_FAILED:
3907                 break;
3908         case IBMVFC_MAD_FAILED:
3909         default:
3910                 ibmvfc_del_tgt(tgt);
3911                 fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
3912                 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
3913                 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3914                          ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
3915                          be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
3916                          ibmvfc_get_fc_type(fc_reason), fc_reason,
3917                          ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3918                 break;
3919         }
3920
3921         kref_put(&tgt->kref, ibmvfc_release_tgt);
3922         ibmvfc_free_event(evt);
3923         wake_up(&vhost->work_wait_q);
3924 }
3925
3926 /**
3927  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
3928  * @evt:                ibmvfc event struct
3929  *
3930  **/
3931 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
3932 {
3933         struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
3934
3935         memset(mad, 0, sizeof(*mad));
3936         mad->common.version = cpu_to_be32(1);
3937         mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
3938         mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
3939         mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3940                 offsetof(struct ibmvfc_passthru_mad, iu));
3941         mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
3942         mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
3943         mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
3944         mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3945                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3946                 offsetof(struct ibmvfc_passthru_fc_iu, payload));
3947         mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
3948         mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3949                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3950                 offsetof(struct ibmvfc_passthru_fc_iu, response));
3951         mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
3952 }
3953
3954 /**
3955  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
3956  * @evt:                ibmvfc event struct
3957  *
3958  * Just cleanup this event struct. Everything else is handled by
3959  * the ADISC completion handler. If the ADISC never actually comes
3960  * back, we still have the timer running on the ADISC event struct
3961  * which will fire and cause the CRQ to get reset.
3962  *
3963  **/
3964 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
3965 {
3966         struct ibmvfc_host *vhost = evt->vhost;
3967         struct ibmvfc_target *tgt = evt->tgt;
3968
3969         tgt_dbg(tgt, "ADISC cancel complete\n");
3970         vhost->abort_threads--;
3971         ibmvfc_free_event(evt);
3972         kref_put(&tgt->kref, ibmvfc_release_tgt);
3973         wake_up(&vhost->work_wait_q);
3974 }
3975
3976 /**
3977  * ibmvfc_adisc_timeout - Handle an ADISC timeout
3978  * @tgt:                ibmvfc target struct
3979  *
3980  * If an ADISC times out, send a cancel. If the cancel times
3981  * out, reset the CRQ. When the ADISC comes back as cancelled,
3982  * log back into the target.
3983  **/
3984 static void ibmvfc_adisc_timeout(struct timer_list *t)
3985 {
3986         struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
3987         struct ibmvfc_host *vhost = tgt->vhost;
3988         struct ibmvfc_event *evt;
3989         struct ibmvfc_tmf *tmf;
3990         unsigned long flags;
3991         int rc;
3992
3993         tgt_dbg(tgt, "ADISC timeout\n");
3994         spin_lock_irqsave(vhost->host->host_lock, flags);
3995         if (vhost->abort_threads >= disc_threads ||
3996             tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
3997             vhost->state != IBMVFC_INITIALIZING ||
3998             vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
3999                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4000                 return;
4001         }
4002
4003         vhost->abort_threads++;
4004         kref_get(&tgt->kref);
4005         evt = ibmvfc_get_event(vhost);
4006         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4007
4008         evt->tgt = tgt;
4009         tmf = &evt->iu.tmf;
4010         memset(tmf, 0, sizeof(*tmf));
4011         if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4012                 tmf->common.version = cpu_to_be32(2);
4013                 tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4014         } else {
4015                 tmf->common.version = cpu_to_be32(1);
4016         }
4017         tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4018         tmf->common.length = cpu_to_be16(sizeof(*tmf));
4019         tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4020         tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4021
4022         rc = ibmvfc_send_event(evt, vhost, default_timeout);
4023
4024         if (rc) {
4025                 tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4026                 vhost->abort_threads--;
4027                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4028                 __ibmvfc_reset_host(vhost);
4029         } else
4030                 tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4031         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4032 }
4033
4034 /**
4035  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4036  * @tgt:                ibmvfc target struct
4037  *
4038  * When sending an ADISC we end up with two timers running. The
4039  * first timer is the timer in the ibmvfc target struct. If this
4040  * fires, we send a cancel to the target. The second timer is the
4041  * timer on the ibmvfc event for the ADISC, which is longer. If that
4042  * fires, it means the ADISC timed out and our attempt to cancel it
4043  * also failed, so we need to reset the CRQ.
4044  **/
4045 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4046 {
4047         struct ibmvfc_passthru_mad *mad;
4048         struct ibmvfc_host *vhost = tgt->vhost;
4049         struct ibmvfc_event *evt;
4050
4051         if (vhost->discovery_threads >= disc_threads)
4052                 return;
4053
4054         kref_get(&tgt->kref);
4055         evt = ibmvfc_get_event(vhost);
4056         vhost->discovery_threads++;
4057         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4058         evt->tgt = tgt;
4059
4060         ibmvfc_init_passthru(evt);
4061         mad = &evt->iu.passthru;
4062         mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4063         mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4064         mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4065
4066         mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4067         memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4068                sizeof(vhost->login_buf->resp.port_name));
4069         memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4070                sizeof(vhost->login_buf->resp.node_name));
4071         mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4072
4073         if (timer_pending(&tgt->timer))
4074                 mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4075         else {
4076                 tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4077                 add_timer(&tgt->timer);
4078         }
4079
4080         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4081         if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4082                 vhost->discovery_threads--;
4083                 del_timer(&tgt->timer);
4084                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4085                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4086         } else
4087                 tgt_dbg(tgt, "Sent ADISC\n");
4088 }
4089
4090 /**
4091  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4092  * @evt:        ibmvfc event struct
4093  *
4094  **/
4095 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4096 {
4097         struct ibmvfc_target *tgt = evt->tgt;
4098         struct ibmvfc_host *vhost = evt->vhost;
4099         struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4100         u32 status = be16_to_cpu(rsp->common.status);
4101         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4102
4103         vhost->discovery_threads--;
4104         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4105         switch (status) {
4106         case IBMVFC_MAD_SUCCESS:
4107                 tgt_dbg(tgt, "Query Target succeeded\n");
4108                 if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4109                         ibmvfc_del_tgt(tgt);
4110                 else
4111                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4112                 break;
4113         case IBMVFC_MAD_DRIVER_FAILED:
4114                 break;
4115         case IBMVFC_MAD_CRQ_ERROR:
4116                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4117                 break;
4118         case IBMVFC_MAD_FAILED:
4119         default:
4120                 if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4121                     be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4122                     be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4123                         ibmvfc_del_tgt(tgt);
4124                 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4125                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4126                 else
4127                         ibmvfc_del_tgt(tgt);
4128
4129                 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4130                         ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4131                         be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4132                         ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4133                         ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4134                         status);
4135                 break;
4136         }
4137
4138         kref_put(&tgt->kref, ibmvfc_release_tgt);
4139         ibmvfc_free_event(evt);
4140         wake_up(&vhost->work_wait_q);
4141 }
4142
4143 /**
4144  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4145  * @tgt:        ibmvfc target struct
4146  *
4147  **/
4148 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4149 {
4150         struct ibmvfc_query_tgt *query_tgt;
4151         struct ibmvfc_host *vhost = tgt->vhost;
4152         struct ibmvfc_event *evt;
4153
4154         if (vhost->discovery_threads >= disc_threads)
4155                 return;
4156
4157         kref_get(&tgt->kref);
4158         evt = ibmvfc_get_event(vhost);
4159         vhost->discovery_threads++;
4160         evt->tgt = tgt;
4161         ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4162         query_tgt = &evt->iu.query_tgt;
4163         memset(query_tgt, 0, sizeof(*query_tgt));
4164         query_tgt->common.version = cpu_to_be32(1);
4165         query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4166         query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4167         query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4168
4169         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4170         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4171                 vhost->discovery_threads--;
4172                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4173                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4174         } else
4175                 tgt_dbg(tgt, "Sent Query Target\n");
4176 }
4177
4178 /**
4179  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4180  * @vhost:              ibmvfc host struct
4181  * @scsi_id:    SCSI ID to allocate target for
4182  *
4183  * Returns:
4184  *      0 on success / other on failure
4185  **/
4186 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4187                                struct ibmvfc_discover_targets_entry *target)
4188 {
4189         struct ibmvfc_target *stgt = NULL;
4190         struct ibmvfc_target *wtgt = NULL;
4191         struct ibmvfc_target *tgt;
4192         unsigned long flags;
4193         u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4194         u64 wwpn = be64_to_cpu(target->wwpn);
4195
4196         /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4197         spin_lock_irqsave(vhost->host->host_lock, flags);
4198         list_for_each_entry(tgt, &vhost->targets, queue) {
4199                 if (tgt->wwpn == wwpn) {
4200                         wtgt = tgt;
4201                         break;
4202                 }
4203         }
4204
4205         list_for_each_entry(tgt, &vhost->targets, queue) {
4206                 if (tgt->scsi_id == scsi_id) {
4207                         stgt = tgt;
4208                         break;
4209                 }
4210         }
4211
4212         if (wtgt && !stgt) {
4213                 /*
4214                  * A WWPN target has moved and we still are tracking the old
4215                  * SCSI ID.  The only way we should be able to get here is if
4216                  * we attempted to send an implicit logout for the old SCSI ID
4217                  * and it failed for some reason, such as there being I/O
4218                  * pending to the target. In this case, we will have already
4219                  * deleted the rport from the FC transport so we do a move
4220                  * login, which works even with I/O pending, as it will cancel
4221                  * any active commands.
4222                  */
4223                 if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4224                         /*
4225                          * Do a move login here. The old target is no longer
4226                          * known to the transport layer We don't use the
4227                          * normal ibmvfc_set_tgt_action to set this, as we
4228                          * don't normally want to allow this state change.
4229                          */
4230                         wtgt->old_scsi_id = wtgt->scsi_id;
4231                         wtgt->scsi_id = scsi_id;
4232                         wtgt->action = IBMVFC_TGT_ACTION_INIT;
4233                         ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4234                         goto unlock_out;
4235                 } else {
4236                         tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4237                                 wtgt->action, wtgt->rport);
4238                 }
4239         } else if (stgt) {
4240                 if (tgt->need_login)
4241                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4242                 goto unlock_out;
4243         }
4244         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4245
4246         tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4247         memset(tgt, 0, sizeof(*tgt));
4248         tgt->scsi_id = scsi_id;
4249         tgt->wwpn = wwpn;
4250         tgt->vhost = vhost;
4251         tgt->need_login = 1;
4252         timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4253         kref_init(&tgt->kref);
4254         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4255         spin_lock_irqsave(vhost->host->host_lock, flags);
4256         tgt->cancel_key = vhost->task_set++;
4257         list_add_tail(&tgt->queue, &vhost->targets);
4258
4259 unlock_out:
4260         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4261         return 0;
4262 }
4263
4264 /**
4265  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4266  * @vhost:              ibmvfc host struct
4267  *
4268  * Returns:
4269  *      0 on success / other on failure
4270  **/
4271 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4272 {
4273         int i, rc;
4274
4275         for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4276                 rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4277
4278         return rc;
4279 }
4280
4281 /**
4282  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4283  * @evt:        ibmvfc event struct
4284  *
4285  **/
4286 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4287 {
4288         struct ibmvfc_host *vhost = evt->vhost;
4289         struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4290         u32 mad_status = be16_to_cpu(rsp->common.status);
4291         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4292
4293         switch (mad_status) {
4294         case IBMVFC_MAD_SUCCESS:
4295                 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4296                 vhost->num_targets = be32_to_cpu(rsp->num_written);
4297                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4298                 break;
4299         case IBMVFC_MAD_FAILED:
4300                 level += ibmvfc_retry_host_init(vhost);
4301                 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4302                            ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4303                            be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4304                 break;
4305         case IBMVFC_MAD_DRIVER_FAILED:
4306                 break;
4307         default:
4308                 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4309                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4310                 break;
4311         }
4312
4313         ibmvfc_free_event(evt);
4314         wake_up(&vhost->work_wait_q);
4315 }
4316
4317 /**
4318  * ibmvfc_discover_targets - Send Discover Targets MAD
4319  * @vhost:      ibmvfc host struct
4320  *
4321  **/
4322 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4323 {
4324         struct ibmvfc_discover_targets *mad;
4325         struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
4326
4327         ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4328         mad = &evt->iu.discover_targets;
4329         memset(mad, 0, sizeof(*mad));
4330         mad->common.version = cpu_to_be32(1);
4331         mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
4332         mad->common.length = cpu_to_be16(sizeof(*mad));
4333         mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
4334         mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
4335         mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
4336         mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
4337         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4338
4339         if (!ibmvfc_send_event(evt, vhost, default_timeout))
4340                 ibmvfc_dbg(vhost, "Sent discover targets\n");
4341         else
4342                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4343 }
4344
4345 /**
4346  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
4347  * @evt:        ibmvfc event struct
4348  *
4349  **/
4350 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
4351 {
4352         struct ibmvfc_host *vhost = evt->vhost;
4353         u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
4354         struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
4355         unsigned int npiv_max_sectors;
4356         int level = IBMVFC_DEFAULT_LOG_LEVEL;
4357
4358         switch (mad_status) {
4359         case IBMVFC_MAD_SUCCESS:
4360                 ibmvfc_free_event(evt);
4361                 break;
4362         case IBMVFC_MAD_FAILED:
4363                 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4364                         level += ibmvfc_retry_host_init(vhost);
4365                 else
4366                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4367                 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
4368                            ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4369                                                 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4370                 ibmvfc_free_event(evt);
4371                 return;
4372         case IBMVFC_MAD_CRQ_ERROR:
4373                 ibmvfc_retry_host_init(vhost);
4374                 fallthrough;
4375         case IBMVFC_MAD_DRIVER_FAILED:
4376                 ibmvfc_free_event(evt);
4377                 return;
4378         default:
4379                 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
4380                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4381                 ibmvfc_free_event(evt);
4382                 return;
4383         }
4384
4385         vhost->client_migrated = 0;
4386
4387         if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
4388                 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
4389                         rsp->flags);
4390                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4391                 wake_up(&vhost->work_wait_q);
4392                 return;
4393         }
4394
4395         if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
4396                 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
4397                         rsp->max_cmds);
4398                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4399                 wake_up(&vhost->work_wait_q);
4400                 return;
4401         }
4402
4403         vhost->logged_in = 1;
4404         npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
4405         dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
4406                  rsp->partition_name, rsp->device_name, rsp->port_loc_code,
4407                  rsp->drc_name, npiv_max_sectors);
4408
4409         fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
4410         fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
4411         fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
4412         fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
4413         fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
4414         fc_host_supported_classes(vhost->host) = 0;
4415         if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
4416                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
4417         if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
4418                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
4419         if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
4420                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
4421         fc_host_maxframe_size(vhost->host) =
4422                 be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
4423
4424         vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
4425         vhost->host->max_sectors = npiv_max_sectors;
4426         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4427         wake_up(&vhost->work_wait_q);
4428 }
4429
4430 /**
4431  * ibmvfc_npiv_login - Sends NPIV login
4432  * @vhost:      ibmvfc host struct
4433  *
4434  **/
4435 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
4436 {
4437         struct ibmvfc_npiv_login_mad *mad;
4438         struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
4439
4440         ibmvfc_gather_partition_info(vhost);
4441         ibmvfc_set_login_info(vhost);
4442         ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
4443
4444         memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
4445         mad = &evt->iu.npiv_login;
4446         memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
4447         mad->common.version = cpu_to_be32(1);
4448         mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
4449         mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
4450         mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
4451         mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
4452
4453         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4454
4455         if (!ibmvfc_send_event(evt, vhost, default_timeout))
4456                 ibmvfc_dbg(vhost, "Sent NPIV login\n");
4457         else
4458                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4459 }
4460
4461 /**
4462  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
4463  * @vhost:              ibmvfc host struct
4464  *
4465  **/
4466 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
4467 {
4468         struct ibmvfc_host *vhost = evt->vhost;
4469         u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
4470
4471         ibmvfc_free_event(evt);
4472
4473         switch (mad_status) {
4474         case IBMVFC_MAD_SUCCESS:
4475                 if (list_empty(&vhost->sent) &&
4476                     vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
4477                         ibmvfc_init_host(vhost);
4478                         return;
4479                 }
4480                 break;
4481         case IBMVFC_MAD_FAILED:
4482         case IBMVFC_MAD_NOT_SUPPORTED:
4483         case IBMVFC_MAD_CRQ_ERROR:
4484         case IBMVFC_MAD_DRIVER_FAILED:
4485         default:
4486                 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
4487                 break;
4488         }
4489
4490         ibmvfc_hard_reset_host(vhost);
4491 }
4492
4493 /**
4494  * ibmvfc_npiv_logout - Issue an NPIV Logout
4495  * @vhost:              ibmvfc host struct
4496  *
4497  **/
4498 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
4499 {
4500         struct ibmvfc_npiv_logout_mad *mad;
4501         struct ibmvfc_event *evt;
4502
4503         evt = ibmvfc_get_event(vhost);
4504         ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
4505
4506         mad = &evt->iu.npiv_logout;
4507         memset(mad, 0, sizeof(*mad));
4508         mad->common.version = cpu_to_be32(1);
4509         mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
4510         mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
4511
4512         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
4513
4514         if (!ibmvfc_send_event(evt, vhost, default_timeout))
4515                 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
4516         else
4517                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4518 }
4519
4520 /**
4521  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
4522  * @vhost:              ibmvfc host struct
4523  *
4524  * Returns:
4525  *      1 if work to do / 0 if not
4526  **/
4527 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
4528 {
4529         struct ibmvfc_target *tgt;
4530
4531         list_for_each_entry(tgt, &vhost->targets, queue) {
4532                 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
4533                     tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4534                         return 1;
4535         }
4536
4537         return 0;
4538 }
4539
4540 /**
4541  * ibmvfc_dev_logo_to_do - Is there target logout work to do?
4542  * @vhost:              ibmvfc host struct
4543  *
4544  * Returns:
4545  *      1 if work to do / 0 if not
4546  **/
4547 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
4548 {
4549         struct ibmvfc_target *tgt;
4550
4551         list_for_each_entry(tgt, &vhost->targets, queue) {
4552                 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
4553                     tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
4554                         return 1;
4555         }
4556         return 0;
4557 }
4558
4559 /**
4560  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
4561  * @vhost:              ibmvfc host struct
4562  *
4563  * Returns:
4564  *      1 if work to do / 0 if not
4565  **/
4566 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4567 {
4568         struct ibmvfc_target *tgt;
4569
4570         if (kthread_should_stop())
4571                 return 1;
4572         switch (vhost->action) {
4573         case IBMVFC_HOST_ACTION_NONE:
4574         case IBMVFC_HOST_ACTION_INIT_WAIT:
4575         case IBMVFC_HOST_ACTION_LOGO_WAIT:
4576                 return 0;
4577         case IBMVFC_HOST_ACTION_TGT_INIT:
4578         case IBMVFC_HOST_ACTION_QUERY_TGTS:
4579                 if (vhost->discovery_threads == disc_threads)
4580                         return 0;
4581                 list_for_each_entry(tgt, &vhost->targets, queue)
4582                         if (tgt->action == IBMVFC_TGT_ACTION_INIT)
4583                                 return 1;
4584                 list_for_each_entry(tgt, &vhost->targets, queue)
4585                         if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4586                                 return 0;
4587                 return 1;
4588         case IBMVFC_HOST_ACTION_TGT_DEL:
4589         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4590                 if (vhost->discovery_threads == disc_threads)
4591                         return 0;
4592                 list_for_each_entry(tgt, &vhost->targets, queue)
4593                         if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
4594                                 return 1;
4595                 list_for_each_entry(tgt, &vhost->targets, queue)
4596                         if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
4597                                 return 0;
4598                 return 1;
4599         case IBMVFC_HOST_ACTION_LOGO:
4600         case IBMVFC_HOST_ACTION_INIT:
4601         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4602         case IBMVFC_HOST_ACTION_QUERY:
4603         case IBMVFC_HOST_ACTION_RESET:
4604         case IBMVFC_HOST_ACTION_REENABLE:
4605         default:
4606                 break;
4607         }
4608
4609         return 1;
4610 }
4611
4612 /**
4613  * ibmvfc_work_to_do - Is there task level work to do?
4614  * @vhost:              ibmvfc host struct
4615  *
4616  * Returns:
4617  *      1 if work to do / 0 if not
4618  **/
4619 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4620 {
4621         unsigned long flags;
4622         int rc;
4623
4624         spin_lock_irqsave(vhost->host->host_lock, flags);
4625         rc = __ibmvfc_work_to_do(vhost);
4626         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4627         return rc;
4628 }
4629
4630 /**
4631  * ibmvfc_log_ae - Log async events if necessary
4632  * @vhost:              ibmvfc host struct
4633  * @events:             events to log
4634  *
4635  **/
4636 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
4637 {
4638         if (events & IBMVFC_AE_RSCN)
4639                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
4640         if ((events & IBMVFC_AE_LINKDOWN) &&
4641             vhost->state >= IBMVFC_HALTED)
4642                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
4643         if ((events & IBMVFC_AE_LINKUP) &&
4644             vhost->state == IBMVFC_INITIALIZING)
4645                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
4646 }
4647
4648 /**
4649  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
4650  * @tgt:                ibmvfc target struct
4651  *
4652  **/
4653 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
4654 {
4655         struct ibmvfc_host *vhost = tgt->vhost;
4656         struct fc_rport *rport;
4657         unsigned long flags;
4658
4659         tgt_dbg(tgt, "Adding rport\n");
4660         rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
4661         spin_lock_irqsave(vhost->host->host_lock, flags);
4662
4663         if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4664                 tgt_dbg(tgt, "Deleting rport\n");
4665                 list_del(&tgt->queue);
4666                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4667                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4668                 fc_remote_port_delete(rport);
4669                 del_timer_sync(&tgt->timer);
4670                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4671                 return;
4672         } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
4673                 tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
4674                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
4675                 tgt->rport = NULL;
4676                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4677                 fc_remote_port_delete(rport);
4678                 return;
4679         } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
4680                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4681                 return;
4682         }
4683
4684         if (rport) {
4685                 tgt_dbg(tgt, "rport add succeeded\n");
4686                 tgt->rport = rport;
4687                 rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
4688                 rport->supported_classes = 0;
4689                 tgt->target_id = rport->scsi_target_id;
4690                 if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
4691                         rport->supported_classes |= FC_COS_CLASS1;
4692                 if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
4693                         rport->supported_classes |= FC_COS_CLASS2;
4694                 if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
4695                         rport->supported_classes |= FC_COS_CLASS3;
4696                 if (rport->rqst_q)
4697                         blk_queue_max_segments(rport->rqst_q, 1);
4698         } else
4699                 tgt_dbg(tgt, "rport add failed\n");
4700         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4701 }
4702
4703 /**
4704  * ibmvfc_do_work - Do task level work
4705  * @vhost:              ibmvfc host struct
4706  *
4707  **/
4708 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4709 {
4710         struct ibmvfc_target *tgt;
4711         unsigned long flags;
4712         struct fc_rport *rport;
4713         int rc;
4714
4715         ibmvfc_log_ae(vhost, vhost->events_to_log);
4716         spin_lock_irqsave(vhost->host->host_lock, flags);
4717         vhost->events_to_log = 0;
4718         switch (vhost->action) {
4719         case IBMVFC_HOST_ACTION_NONE:
4720         case IBMVFC_HOST_ACTION_LOGO_WAIT:
4721         case IBMVFC_HOST_ACTION_INIT_WAIT:
4722                 break;
4723         case IBMVFC_HOST_ACTION_RESET:
4724                 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4725                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4726                 rc = ibmvfc_reset_crq(vhost);
4727                 spin_lock_irqsave(vhost->host->host_lock, flags);
4728                 if (rc == H_CLOSED)
4729                         vio_enable_interrupts(to_vio_dev(vhost->dev));
4730                 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4731                     (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4732                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4733                         dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4734                 }
4735                 break;
4736         case IBMVFC_HOST_ACTION_REENABLE:
4737                 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4738                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4739                 rc = ibmvfc_reenable_crq_queue(vhost);
4740                 spin_lock_irqsave(vhost->host->host_lock, flags);
4741                 if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
4742                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4743                         dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
4744                 }
4745                 break;
4746         case IBMVFC_HOST_ACTION_LOGO:
4747                 vhost->job_step(vhost);
4748                 break;
4749         case IBMVFC_HOST_ACTION_INIT:
4750                 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
4751                 if (vhost->delay_init) {
4752                         vhost->delay_init = 0;
4753                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4754                         ssleep(15);
4755                         return;
4756                 } else
4757                         vhost->job_step(vhost);
4758                 break;
4759         case IBMVFC_HOST_ACTION_QUERY:
4760                 list_for_each_entry(tgt, &vhost->targets, queue)
4761                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
4762                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
4763                 break;
4764         case IBMVFC_HOST_ACTION_QUERY_TGTS:
4765                 list_for_each_entry(tgt, &vhost->targets, queue) {
4766                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4767                                 tgt->job_step(tgt);
4768                                 break;
4769                         }
4770                 }
4771
4772                 if (!ibmvfc_dev_init_to_do(vhost))
4773                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
4774                 break;
4775         case IBMVFC_HOST_ACTION_TGT_DEL:
4776         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4777                 list_for_each_entry(tgt, &vhost->targets, queue) {
4778                         if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
4779                                 tgt->job_step(tgt);
4780                                 break;
4781                         }
4782                 }
4783
4784                 if (ibmvfc_dev_logo_to_do(vhost)) {
4785                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4786                         return;
4787                 }
4788
4789                 list_for_each_entry(tgt, &vhost->targets, queue) {
4790                         if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4791                                 tgt_dbg(tgt, "Deleting rport\n");
4792                                 rport = tgt->rport;
4793                                 tgt->rport = NULL;
4794                                 list_del(&tgt->queue);
4795                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4796                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4797                                 if (rport)
4798                                         fc_remote_port_delete(rport);
4799                                 del_timer_sync(&tgt->timer);
4800                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4801                                 return;
4802                         } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
4803                                 tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
4804                                 rport = tgt->rport;
4805                                 tgt->rport = NULL;
4806                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
4807                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4808                                 if (rport)
4809                                         fc_remote_port_delete(rport);
4810                                 return;
4811                         }
4812                 }
4813
4814                 if (vhost->state == IBMVFC_INITIALIZING) {
4815                         if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
4816                                 if (vhost->reinit) {
4817                                         vhost->reinit = 0;
4818                                         scsi_block_requests(vhost->host);
4819                                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4820                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4821                                 } else {
4822                                         ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
4823                                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4824                                         wake_up(&vhost->init_wait_q);
4825                                         schedule_work(&vhost->rport_add_work_q);
4826                                         vhost->init_retries = 0;
4827                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4828                                         scsi_unblock_requests(vhost->host);
4829                                 }
4830
4831                                 return;
4832                         } else {
4833                                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
4834                                 vhost->job_step = ibmvfc_discover_targets;
4835                         }
4836                 } else {
4837                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4838                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4839                         scsi_unblock_requests(vhost->host);
4840                         wake_up(&vhost->init_wait_q);
4841                         return;
4842                 }
4843                 break;
4844         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4845                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
4846                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4847                 ibmvfc_alloc_targets(vhost);
4848                 spin_lock_irqsave(vhost->host->host_lock, flags);
4849                 break;
4850         case IBMVFC_HOST_ACTION_TGT_INIT:
4851                 list_for_each_entry(tgt, &vhost->targets, queue) {
4852                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4853                                 tgt->job_step(tgt);
4854                                 break;
4855                         }
4856                 }
4857
4858                 if (!ibmvfc_dev_init_to_do(vhost))
4859                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
4860                 break;
4861         default:
4862                 break;
4863         }
4864
4865         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4866 }
4867
4868 /**
4869  * ibmvfc_work - Do task level work
4870  * @data:               ibmvfc host struct
4871  *
4872  * Returns:
4873  *      zero
4874  **/
4875 static int ibmvfc_work(void *data)
4876 {
4877         struct ibmvfc_host *vhost = data;
4878         int rc;
4879
4880         set_user_nice(current, MIN_NICE);
4881
4882         while (1) {
4883                 rc = wait_event_interruptible(vhost->work_wait_q,
4884                                               ibmvfc_work_to_do(vhost));
4885
4886                 BUG_ON(rc);
4887
4888                 if (kthread_should_stop())
4889                         break;
4890
4891                 ibmvfc_do_work(vhost);
4892         }
4893
4894         ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
4895         return 0;
4896 }
4897
4898 /**
4899  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
4900  * @vhost:      ibmvfc host struct
4901  *
4902  * Allocates a page for messages, maps it for dma, and registers
4903  * the crq with the hypervisor.
4904  *
4905  * Return value:
4906  *      zero on success / other on failure
4907  **/
4908 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
4909 {
4910         int rc, retrc = -ENOMEM;
4911         struct device *dev = vhost->dev;
4912         struct vio_dev *vdev = to_vio_dev(dev);
4913         struct ibmvfc_crq_queue *crq = &vhost->crq;
4914
4915         ENTER;
4916         crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
4917
4918         if (!crq->msgs)
4919                 return -ENOMEM;
4920
4921         crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4922         crq->msg_token = dma_map_single(dev, crq->msgs,
4923                                         PAGE_SIZE, DMA_BIDIRECTIONAL);
4924
4925         if (dma_mapping_error(dev, crq->msg_token))
4926                 goto map_failed;
4927
4928         retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4929                                         crq->msg_token, PAGE_SIZE);
4930
4931         if (rc == H_RESOURCE)
4932                 /* maybe kexecing and resource is busy. try a reset */
4933                 retrc = rc = ibmvfc_reset_crq(vhost);
4934
4935         if (rc == H_CLOSED)
4936                 dev_warn(dev, "Partner adapter not ready\n");
4937         else if (rc) {
4938                 dev_warn(dev, "Error %d opening adapter\n", rc);
4939                 goto reg_crq_failed;
4940         }
4941
4942         retrc = 0;
4943
4944         tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
4945
4946         if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
4947                 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
4948                 goto req_irq_failed;
4949         }
4950
4951         if ((rc = vio_enable_interrupts(vdev))) {
4952                 dev_err(dev, "Error %d enabling interrupts\n", rc);
4953                 goto req_irq_failed;
4954         }
4955
4956         crq->cur = 0;
4957         LEAVE;
4958         return retrc;
4959
4960 req_irq_failed:
4961         tasklet_kill(&vhost->tasklet);
4962         do {
4963                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4964         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4965 reg_crq_failed:
4966         dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4967 map_failed:
4968         free_page((unsigned long)crq->msgs);
4969         return retrc;
4970 }
4971
4972 /**
4973  * ibmvfc_free_mem - Free memory for vhost
4974  * @vhost:      ibmvfc host struct
4975  *
4976  * Return value:
4977  *      none
4978  **/
4979 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
4980 {
4981         struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
4982
4983         ENTER;
4984         mempool_destroy(vhost->tgt_pool);
4985         kfree(vhost->trace);
4986         dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
4987                           vhost->disc_buf_dma);
4988         dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
4989                           vhost->login_buf, vhost->login_buf_dma);
4990         dma_pool_destroy(vhost->sg_pool);
4991         dma_unmap_single(vhost->dev, async_q->msg_token,
4992                          async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
4993         free_page((unsigned long)async_q->msgs);
4994         LEAVE;
4995 }
4996
4997 /**
4998  * ibmvfc_alloc_mem - Allocate memory for vhost
4999  * @vhost:      ibmvfc host struct
5000  *
5001  * Return value:
5002  *      0 on success / non-zero on failure
5003  **/
5004 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
5005 {
5006         struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
5007         struct device *dev = vhost->dev;
5008
5009         ENTER;
5010         async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
5011         if (!async_q->msgs) {
5012                 dev_err(dev, "Couldn't allocate async queue.\n");
5013                 goto nomem;
5014         }
5015
5016         async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
5017         async_q->msg_token = dma_map_single(dev, async_q->msgs,
5018                                             async_q->size * sizeof(*async_q->msgs),
5019                                             DMA_BIDIRECTIONAL);
5020
5021         if (dma_mapping_error(dev, async_q->msg_token)) {
5022                 dev_err(dev, "Failed to map async queue\n");
5023                 goto free_async_crq;
5024         }
5025
5026         vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
5027                                          SG_ALL * sizeof(struct srp_direct_buf),
5028                                          sizeof(struct srp_direct_buf), 0);
5029
5030         if (!vhost->sg_pool) {
5031                 dev_err(dev, "Failed to allocate sg pool\n");
5032                 goto unmap_async_crq;
5033         }
5034
5035         vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
5036                                               &vhost->login_buf_dma, GFP_KERNEL);
5037
5038         if (!vhost->login_buf) {
5039                 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
5040                 goto free_sg_pool;
5041         }
5042
5043         vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
5044         vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
5045                                              &vhost->disc_buf_dma, GFP_KERNEL);
5046
5047         if (!vhost->disc_buf) {
5048                 dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
5049                 goto free_login_buffer;
5050         }
5051
5052         vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
5053                                sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
5054
5055         if (!vhost->trace)
5056                 goto free_disc_buffer;
5057
5058         vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
5059                                                       sizeof(struct ibmvfc_target));
5060
5061         if (!vhost->tgt_pool) {
5062                 dev_err(dev, "Couldn't allocate target memory pool\n");
5063                 goto free_trace;
5064         }
5065
5066         LEAVE;
5067         return 0;
5068
5069 free_trace:
5070         kfree(vhost->trace);
5071 free_disc_buffer:
5072         dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
5073                           vhost->disc_buf_dma);
5074 free_login_buffer:
5075         dma_free_coherent(dev, sizeof(*vhost->login_buf),
5076                           vhost->login_buf, vhost->login_buf_dma);
5077 free_sg_pool:
5078         dma_pool_destroy(vhost->sg_pool);
5079 unmap_async_crq:
5080         dma_unmap_single(dev, async_q->msg_token,
5081                          async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
5082 free_async_crq:
5083         free_page((unsigned long)async_q->msgs);
5084 nomem:
5085         LEAVE;
5086         return -ENOMEM;
5087 }
5088
5089 /**
5090  * ibmvfc_rport_add_thread - Worker thread for rport adds
5091  * @work:       work struct
5092  *
5093  **/
5094 static void ibmvfc_rport_add_thread(struct work_struct *work)
5095 {
5096         struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
5097                                                  rport_add_work_q);
5098         struct ibmvfc_target *tgt;
5099         struct fc_rport *rport;
5100         unsigned long flags;
5101         int did_work;
5102
5103         ENTER;
5104         spin_lock_irqsave(vhost->host->host_lock, flags);
5105         do {
5106                 did_work = 0;
5107                 if (vhost->state != IBMVFC_ACTIVE)
5108                         break;
5109
5110                 list_for_each_entry(tgt, &vhost->targets, queue) {
5111                         if (tgt->add_rport) {
5112                                 did_work = 1;
5113                                 tgt->add_rport = 0;
5114                                 kref_get(&tgt->kref);
5115                                 rport = tgt->rport;
5116                                 if (!rport) {
5117                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5118                                         ibmvfc_tgt_add_rport(tgt);
5119                                 } else if (get_device(&rport->dev)) {
5120                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5121                                         tgt_dbg(tgt, "Setting rport roles\n");
5122                                         fc_remote_port_rolechg(rport, tgt->ids.roles);
5123                                         put_device(&rport->dev);
5124                                 } else {
5125                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5126                                 }
5127
5128                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
5129                                 spin_lock_irqsave(vhost->host->host_lock, flags);
5130                                 break;
5131                         }
5132                 }
5133         } while(did_work);
5134
5135         if (vhost->state == IBMVFC_ACTIVE)
5136                 vhost->scan_complete = 1;
5137         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5138         LEAVE;
5139 }
5140
5141 /**
5142  * ibmvfc_probe - Adapter hot plug add entry point
5143  * @vdev:       vio device struct
5144  * @id: vio device id struct
5145  *
5146  * Return value:
5147  *      0 on success / non-zero on failure
5148  **/
5149 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
5150 {
5151         struct ibmvfc_host *vhost;
5152         struct Scsi_Host *shost;
5153         struct device *dev = &vdev->dev;
5154         int rc = -ENOMEM;
5155
5156         ENTER;
5157         shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
5158         if (!shost) {
5159                 dev_err(dev, "Couldn't allocate host data\n");
5160                 goto out;
5161         }
5162
5163         shost->transportt = ibmvfc_transport_template;
5164         shost->can_queue = max_requests;
5165         shost->max_lun = max_lun;
5166         shost->max_id = max_targets;
5167         shost->max_sectors = IBMVFC_MAX_SECTORS;
5168         shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
5169         shost->unique_id = shost->host_no;
5170
5171         vhost = shost_priv(shost);
5172         INIT_LIST_HEAD(&vhost->sent);
5173         INIT_LIST_HEAD(&vhost->free);
5174         INIT_LIST_HEAD(&vhost->targets);
5175         sprintf(vhost->name, IBMVFC_NAME);
5176         vhost->host = shost;
5177         vhost->dev = dev;
5178         vhost->partition_number = -1;
5179         vhost->log_level = log_level;
5180         vhost->task_set = 1;
5181         strcpy(vhost->partition_name, "UNKNOWN");
5182         init_waitqueue_head(&vhost->work_wait_q);
5183         init_waitqueue_head(&vhost->init_wait_q);
5184         INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
5185         mutex_init(&vhost->passthru_mutex);
5186
5187         if ((rc = ibmvfc_alloc_mem(vhost)))
5188                 goto free_scsi_host;
5189
5190         vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
5191                                          shost->host_no);
5192
5193         if (IS_ERR(vhost->work_thread)) {
5194                 dev_err(dev, "Couldn't create kernel thread: %ld\n",
5195                         PTR_ERR(vhost->work_thread));
5196                 rc = PTR_ERR(vhost->work_thread);
5197                 goto free_host_mem;
5198         }
5199
5200         if ((rc = ibmvfc_init_crq(vhost))) {
5201                 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
5202                 goto kill_kthread;
5203         }
5204
5205         if ((rc = ibmvfc_init_event_pool(vhost))) {
5206                 dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
5207                 goto release_crq;
5208         }
5209
5210         if ((rc = scsi_add_host(shost, dev)))
5211                 goto release_event_pool;
5212
5213         fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
5214
5215         if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
5216                                            &ibmvfc_trace_attr))) {
5217                 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
5218                 goto remove_shost;
5219         }
5220
5221         if (shost_to_fc_host(shost)->rqst_q)
5222                 blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
5223         dev_set_drvdata(dev, vhost);
5224         spin_lock(&ibmvfc_driver_lock);
5225         list_add_tail(&vhost->queue, &ibmvfc_head);
5226         spin_unlock(&ibmvfc_driver_lock);
5227
5228         ibmvfc_send_crq_init(vhost);
5229         scsi_scan_host(shost);
5230         return 0;
5231
5232 remove_shost:
5233         scsi_remove_host(shost);
5234 release_event_pool:
5235         ibmvfc_free_event_pool(vhost);
5236 release_crq:
5237         ibmvfc_release_crq_queue(vhost);
5238 kill_kthread:
5239         kthread_stop(vhost->work_thread);
5240 free_host_mem:
5241         ibmvfc_free_mem(vhost);
5242 free_scsi_host:
5243         scsi_host_put(shost);
5244 out:
5245         LEAVE;
5246         return rc;
5247 }
5248
5249 /**
5250  * ibmvfc_remove - Adapter hot plug remove entry point
5251  * @vdev:       vio device struct
5252  *
5253  * Return value:
5254  *      0
5255  **/
5256 static int ibmvfc_remove(struct vio_dev *vdev)
5257 {
5258         struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
5259         unsigned long flags;
5260
5261         ENTER;
5262         ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
5263
5264         spin_lock_irqsave(vhost->host->host_lock, flags);
5265         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
5266         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5267
5268         ibmvfc_wait_while_resetting(vhost);
5269         ibmvfc_release_crq_queue(vhost);
5270         kthread_stop(vhost->work_thread);
5271         fc_remove_host(vhost->host);
5272         scsi_remove_host(vhost->host);
5273
5274         spin_lock_irqsave(vhost->host->host_lock, flags);
5275         ibmvfc_purge_requests(vhost, DID_ERROR);
5276         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5277         ibmvfc_free_event_pool(vhost);
5278
5279         ibmvfc_free_mem(vhost);
5280         spin_lock(&ibmvfc_driver_lock);
5281         list_del(&vhost->queue);
5282         spin_unlock(&ibmvfc_driver_lock);
5283         scsi_host_put(vhost->host);
5284         LEAVE;
5285         return 0;
5286 }
5287
5288 /**
5289  * ibmvfc_resume - Resume from suspend
5290  * @dev:        device struct
5291  *
5292  * We may have lost an interrupt across suspend/resume, so kick the
5293  * interrupt handler
5294  *
5295  */
5296 static int ibmvfc_resume(struct device *dev)
5297 {
5298         unsigned long flags;
5299         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
5300         struct vio_dev *vdev = to_vio_dev(dev);
5301
5302         spin_lock_irqsave(vhost->host->host_lock, flags);
5303         vio_disable_interrupts(vdev);
5304         tasklet_schedule(&vhost->tasklet);
5305         spin_unlock_irqrestore(vhost->host->host_lock, flags);
5306         return 0;
5307 }
5308
5309 /**
5310  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
5311  * @vdev:       vio device struct
5312  *
5313  * Return value:
5314  *      Number of bytes the driver will need to DMA map at the same time in
5315  *      order to perform well.
5316  */
5317 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
5318 {
5319         unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
5320         return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
5321 }
5322
5323 static const struct vio_device_id ibmvfc_device_table[] = {
5324         {"fcp", "IBM,vfc-client"},
5325         { "", "" }
5326 };
5327 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
5328
5329 static const struct dev_pm_ops ibmvfc_pm_ops = {
5330         .resume = ibmvfc_resume
5331 };
5332
5333 static struct vio_driver ibmvfc_driver = {
5334         .id_table = ibmvfc_device_table,
5335         .probe = ibmvfc_probe,
5336         .remove = ibmvfc_remove,
5337         .get_desired_dma = ibmvfc_get_desired_dma,
5338         .name = IBMVFC_NAME,
5339         .pm = &ibmvfc_pm_ops,
5340 };
5341
5342 static struct fc_function_template ibmvfc_transport_functions = {
5343         .show_host_fabric_name = 1,
5344         .show_host_node_name = 1,
5345         .show_host_port_name = 1,
5346         .show_host_supported_classes = 1,
5347         .show_host_port_type = 1,
5348         .show_host_port_id = 1,
5349         .show_host_maxframe_size = 1,
5350
5351         .get_host_port_state = ibmvfc_get_host_port_state,
5352         .show_host_port_state = 1,
5353
5354         .get_host_speed = ibmvfc_get_host_speed,
5355         .show_host_speed = 1,
5356
5357         .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
5358         .terminate_rport_io = ibmvfc_terminate_rport_io,
5359
5360         .show_rport_maxframe_size = 1,
5361         .show_rport_supported_classes = 1,
5362
5363         .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
5364         .show_rport_dev_loss_tmo = 1,
5365
5366         .get_starget_node_name = ibmvfc_get_starget_node_name,
5367         .show_starget_node_name = 1,
5368
5369         .get_starget_port_name = ibmvfc_get_starget_port_name,
5370         .show_starget_port_name = 1,
5371
5372         .get_starget_port_id = ibmvfc_get_starget_port_id,
5373         .show_starget_port_id = 1,
5374
5375         .bsg_request = ibmvfc_bsg_request,
5376         .bsg_timeout = ibmvfc_bsg_timeout,
5377 };
5378
5379 /**
5380  * ibmvfc_module_init - Initialize the ibmvfc module
5381  *
5382  * Return value:
5383  *      0 on success / other on failure
5384  **/
5385 static int __init ibmvfc_module_init(void)
5386 {
5387         int rc;
5388
5389         if (!firmware_has_feature(FW_FEATURE_VIO))
5390                 return -ENODEV;
5391
5392         printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
5393                IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
5394
5395         ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
5396         if (!ibmvfc_transport_template)
5397                 return -ENOMEM;
5398
5399         rc = vio_register_driver(&ibmvfc_driver);
5400         if (rc)
5401                 fc_release_transport(ibmvfc_transport_template);
5402         return rc;
5403 }
5404
5405 /**
5406  * ibmvfc_module_exit - Teardown the ibmvfc module
5407  *
5408  * Return value:
5409  *      nothing
5410  **/
5411 static void __exit ibmvfc_module_exit(void)
5412 {
5413         vio_unregister_driver(&ibmvfc_driver);
5414         fc_release_transport(ibmvfc_transport_template);
5415 }
5416
5417 module_init(ibmvfc_module_init);
5418 module_exit(ibmvfc_module_exit);