Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[sfrench/cifs-2.6.git] / drivers / scsi / bfa / bfa_fcpim.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfad_drv.h"
19 #include "bfa_modules.h"
20
21 BFA_TRC_FILE(HAL, FCPIM);
22
23 /*
24  *  BFA ITNIM Related definitions
25  */
26 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27 static void bfa_ioim_lm_init(struct bfa_s *bfa);
28
29 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
30         (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
31
32 #define bfa_fcpim_additn(__itnim)                                       \
33         list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
34 #define bfa_fcpim_delitn(__itnim)       do {                            \
35         WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
36         bfa_itnim_update_del_itn_stats(__itnim);      \
37         list_del(&(__itnim)->qe);      \
38         WARN_ON(!list_empty(&(__itnim)->io_q));                         \
39         WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));                 \
40         WARN_ON(!list_empty(&(__itnim)->pending_q));                    \
41 } while (0)
42
43 #define bfa_itnim_online_cb(__itnim) do {                               \
44         if ((__itnim)->bfa->fcs)                                        \
45                 bfa_cb_itnim_online((__itnim)->ditn);      \
46         else {                                                          \
47                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
48                 __bfa_cb_itnim_online, (__itnim));      \
49         }                                                               \
50 } while (0)
51
52 #define bfa_itnim_offline_cb(__itnim) do {                              \
53         if ((__itnim)->bfa->fcs)                                        \
54                 bfa_cb_itnim_offline((__itnim)->ditn);      \
55         else {                                                          \
56                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
57                 __bfa_cb_itnim_offline, (__itnim));      \
58         }                                                               \
59 } while (0)
60
61 #define bfa_itnim_sler_cb(__itnim) do {                                 \
62         if ((__itnim)->bfa->fcs)                                        \
63                 bfa_cb_itnim_sler((__itnim)->ditn);      \
64         else {                                                          \
65                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
66                 __bfa_cb_itnim_sler, (__itnim));      \
67         }                                                               \
68 } while (0)
69
70 enum bfa_ioim_lm_ua_status {
71         BFA_IOIM_LM_UA_RESET = 0,
72         BFA_IOIM_LM_UA_SET = 1,
73 };
74
75 /*
76  *  itnim state machine event
77  */
78 enum bfa_itnim_event {
79         BFA_ITNIM_SM_CREATE = 1,        /*  itnim is created */
80         BFA_ITNIM_SM_ONLINE = 2,        /*  itnim is online */
81         BFA_ITNIM_SM_OFFLINE = 3,       /*  itnim is offline */
82         BFA_ITNIM_SM_FWRSP = 4,         /*  firmware response */
83         BFA_ITNIM_SM_DELETE = 5,        /*  deleting an existing itnim */
84         BFA_ITNIM_SM_CLEANUP = 6,       /*  IO cleanup completion */
85         BFA_ITNIM_SM_SLER = 7,          /*  second level error recovery */
86         BFA_ITNIM_SM_HWFAIL = 8,        /*  IOC h/w failure event */
87         BFA_ITNIM_SM_QRESUME = 9,       /*  queue space available */
88 };
89
90 /*
91  *  BFA IOIM related definitions
92  */
93 #define bfa_ioim_move_to_comp_q(__ioim) do {                            \
94         list_del(&(__ioim)->qe);                                        \
95         list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);    \
96 } while (0)
97
98
99 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {                  \
100         if ((__fcpim)->profile_comp)                                    \
101                 (__fcpim)->profile_comp(__ioim);                        \
102 } while (0)
103
104 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {                 \
105         if ((__fcpim)->profile_start)                                   \
106                 (__fcpim)->profile_start(__ioim);                       \
107 } while (0)
108
109 /*
110  * IO state machine events
111  */
112 enum bfa_ioim_event {
113         BFA_IOIM_SM_START       = 1,    /*  io start request from host */
114         BFA_IOIM_SM_COMP_GOOD   = 2,    /*  io good comp, resource free */
115         BFA_IOIM_SM_COMP        = 3,    /*  io comp, resource is free */
116         BFA_IOIM_SM_COMP_UTAG   = 4,    /*  io comp, resource is free */
117         BFA_IOIM_SM_DONE        = 5,    /*  io comp, resource not free */
118         BFA_IOIM_SM_FREE        = 6,    /*  io resource is freed */
119         BFA_IOIM_SM_ABORT       = 7,    /*  abort request from scsi stack */
120         BFA_IOIM_SM_ABORT_COMP  = 8,    /*  abort from f/w */
121         BFA_IOIM_SM_ABORT_DONE  = 9,    /*  abort completion from f/w */
122         BFA_IOIM_SM_QRESUME     = 10,   /*  CQ space available to queue IO */
123         BFA_IOIM_SM_SGALLOCED   = 11,   /*  SG page allocation successful */
124         BFA_IOIM_SM_SQRETRY     = 12,   /*  sequence recovery retry */
125         BFA_IOIM_SM_HCB         = 13,   /*  bfa callback complete */
126         BFA_IOIM_SM_CLEANUP     = 14,   /*  IO cleanup from itnim */
127         BFA_IOIM_SM_TMSTART     = 15,   /*  IO cleanup from tskim */
128         BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
129         BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
130         BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
131 };
132
133
134 /*
135  *  BFA TSKIM related definitions
136  */
137
138 /*
139  * task management completion handling
140  */
141 #define bfa_tskim_qcomp(__tskim, __cbfn) do {                           \
142         bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
143         bfa_tskim_notify_comp(__tskim);      \
144 } while (0)
145
146 #define bfa_tskim_notify_comp(__tskim) do {                             \
147         if ((__tskim)->notify)                                          \
148                 bfa_itnim_tskdone((__tskim)->itnim);      \
149 } while (0)
150
151
152 enum bfa_tskim_event {
153         BFA_TSKIM_SM_START      = 1,    /*  TM command start            */
154         BFA_TSKIM_SM_DONE       = 2,    /*  TM completion               */
155         BFA_TSKIM_SM_QRESUME    = 3,    /*  resume after qfull          */
156         BFA_TSKIM_SM_HWFAIL     = 5,    /*  IOC h/w failure event       */
157         BFA_TSKIM_SM_HCB        = 6,    /*  BFA callback completion     */
158         BFA_TSKIM_SM_IOS_DONE   = 7,    /*  IO and sub TM completions   */
159         BFA_TSKIM_SM_CLEANUP    = 8,    /*  TM cleanup on ITN offline   */
160         BFA_TSKIM_SM_CLEANUP_DONE = 9,  /*  TM abort completion */
161 };
162
163 /*
164  * forward declaration for BFA ITNIM functions
165  */
166 static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
167 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
168 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
169 static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
170 static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
171 static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
172 static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
173 static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
174 static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
175 static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
176 static void     bfa_itnim_iotov(void *itnim_arg);
177 static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
178 static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
179 static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
180
181 /*
182  * forward declaration of ITNIM state machine
183  */
184 static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
185                                         enum bfa_itnim_event event);
186 static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
187                                         enum bfa_itnim_event event);
188 static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
189                                         enum bfa_itnim_event event);
190 static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
191                                         enum bfa_itnim_event event);
192 static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
193                                         enum bfa_itnim_event event);
194 static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
195                                         enum bfa_itnim_event event);
196 static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
197                                         enum bfa_itnim_event event);
198 static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
199                                         enum bfa_itnim_event event);
200 static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
201                                         enum bfa_itnim_event event);
202 static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
203                                         enum bfa_itnim_event event);
204 static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
205                                         enum bfa_itnim_event event);
206 static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
207                                         enum bfa_itnim_event event);
208 static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
209                                         enum bfa_itnim_event event);
210 static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
211                                         enum bfa_itnim_event event);
212 static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
213                                         enum bfa_itnim_event event);
214
215 /*
216  * forward declaration for BFA IOIM functions
217  */
218 static bfa_boolean_t    bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
219 static bfa_boolean_t    bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
220 static bfa_boolean_t    bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
221 static void             bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
222 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
223 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
224 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
225 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
226 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
227 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
228
229 /*
230  * forward declaration of BFA IO state machine
231  */
232 static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
233                                         enum bfa_ioim_event event);
234 static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
235                                         enum bfa_ioim_event event);
236 static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
237                                         enum bfa_ioim_event event);
238 static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
239                                         enum bfa_ioim_event event);
240 static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
241                                         enum bfa_ioim_event event);
242 static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
243                                         enum bfa_ioim_event event);
244 static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
245                                         enum bfa_ioim_event event);
246 static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
247                                         enum bfa_ioim_event event);
248 static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
249                                         enum bfa_ioim_event event);
250 static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
251                                         enum bfa_ioim_event event);
252 static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
253                                         enum bfa_ioim_event event);
254 static void     bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
255                                         enum bfa_ioim_event event);
256 /*
257  * forward declaration for BFA TSKIM functions
258  */
259 static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
260 static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
261 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
262                                         struct scsi_lun lun);
263 static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
264 static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
265 static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
266 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
267 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
268 static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
269
270 /*
271  * forward declaration of BFA TSKIM state machine
272  */
273 static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
274                                         enum bfa_tskim_event event);
275 static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
276                                         enum bfa_tskim_event event);
277 static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
278                                         enum bfa_tskim_event event);
279 static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
280                                         enum bfa_tskim_event event);
281 static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
282                                         enum bfa_tskim_event event);
283 static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
284                                         enum bfa_tskim_event event);
285 static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
286                                         enum bfa_tskim_event event);
287 /*
288  *  BFA FCP Initiator Mode module
289  */
290
291 /*
292  * Compute and return memory needed by FCP(im) module.
293  */
294 static void
295 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
296 {
297         bfa_itnim_meminfo(cfg, km_len);
298
299         /*
300          * IO memory
301          */
302         *km_len += cfg->fwcfg.num_ioim_reqs *
303           (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
304
305         /*
306          * task management command memory
307          */
308         if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
309                 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
310         *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
311 }
312
313
314 static void
315 bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
316                 struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
317 {
318         struct bfa_fcpim_s *fcpim = &fcp->fcpim;
319         struct bfa_s *bfa = fcp->bfa;
320
321         bfa_trc(bfa, cfg->drvcfg.path_tov);
322         bfa_trc(bfa, cfg->fwcfg.num_rports);
323         bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
324         bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
325
326         fcpim->fcp              = fcp;
327         fcpim->bfa              = bfa;
328         fcpim->num_itnims       = cfg->fwcfg.num_rports;
329         fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
330         fcpim->path_tov         = cfg->drvcfg.path_tov;
331         fcpim->delay_comp       = cfg->drvcfg.delay_comp;
332         fcpim->profile_comp = NULL;
333         fcpim->profile_start = NULL;
334
335         bfa_itnim_attach(fcpim);
336         bfa_tskim_attach(fcpim);
337         bfa_ioim_attach(fcpim);
338 }
339
340 static void
341 bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
342 {
343         struct bfa_fcpim_s *fcpim = &fcp->fcpim;
344         struct bfa_itnim_s *itnim;
345         struct list_head *qe, *qen;
346
347         /* Enqueue unused ioim resources to free_q */
348         list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
349
350         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
351                 itnim = (struct bfa_itnim_s *) qe;
352                 bfa_itnim_iocdisable(itnim);
353         }
354 }
355
356 void
357 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
358 {
359         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
360
361         fcpim->path_tov = path_tov * 1000;
362         if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
363                 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
364 }
365
366 u16
367 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
368 {
369         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
370
371         return fcpim->path_tov / 1000;
372 }
373
374 #define bfa_fcpim_add_iostats(__l, __r, __stats)        \
375         (__l->__stats += __r->__stats)
376
377 void
378 bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
379                 struct bfa_itnim_iostats_s *rstats)
380 {
381         bfa_fcpim_add_iostats(lstats, rstats, total_ios);
382         bfa_fcpim_add_iostats(lstats, rstats, qresumes);
383         bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
384         bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
385         bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
386         bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
387         bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
388         bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
389         bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
390         bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
391         bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
392         bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
393         bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
394         bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
395         bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
396         bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
397         bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
398         bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
399         bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
400         bfa_fcpim_add_iostats(lstats, rstats, onlines);
401         bfa_fcpim_add_iostats(lstats, rstats, offlines);
402         bfa_fcpim_add_iostats(lstats, rstats, creates);
403         bfa_fcpim_add_iostats(lstats, rstats, deletes);
404         bfa_fcpim_add_iostats(lstats, rstats, create_comps);
405         bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
406         bfa_fcpim_add_iostats(lstats, rstats, sler_events);
407         bfa_fcpim_add_iostats(lstats, rstats, fw_create);
408         bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
409         bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
410         bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
411         bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
412         bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
413         bfa_fcpim_add_iostats(lstats, rstats, tm_success);
414         bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
415         bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
416         bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
417         bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
418         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
419         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
420         bfa_fcpim_add_iostats(lstats, rstats, io_comps);
421         bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
422         bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
423         bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
424         bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
425 }
426
427 bfa_status_t
428 bfa_fcpim_port_iostats(struct bfa_s *bfa,
429                 struct bfa_itnim_iostats_s *stats, u8 lp_tag)
430 {
431         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
432         struct list_head *qe, *qen;
433         struct bfa_itnim_s *itnim;
434
435         /* accumulate IO stats from itnim */
436         memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
437         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
438                 itnim = (struct bfa_itnim_s *) qe;
439                 if (itnim->rport->rport_info.lp_tag != lp_tag)
440                         continue;
441                 bfa_fcpim_add_stats(stats, &(itnim->stats));
442         }
443         return BFA_STATUS_OK;
444 }
445
446 void
447 bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
448 {
449         struct bfa_itnim_latency_s *io_lat =
450                         &(ioim->itnim->ioprofile.io_latency);
451         u32 val, idx;
452
453         val = (u32)(jiffies - ioim->start_time);
454         idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
455         bfa_itnim_ioprofile_update(ioim->itnim, idx);
456
457         io_lat->count[idx]++;
458         io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
459         io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
460         io_lat->avg[idx] += val;
461 }
462
463 void
464 bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
465 {
466         ioim->start_time = jiffies;
467 }
468
469 bfa_status_t
470 bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
471 {
472         struct bfa_itnim_s *itnim;
473         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
474         struct list_head *qe, *qen;
475
476         /* accumulate IO stats from itnim */
477         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
478                 itnim = (struct bfa_itnim_s *) qe;
479                 bfa_itnim_clear_stats(itnim);
480         }
481         fcpim->io_profile = BFA_TRUE;
482         fcpim->io_profile_start_time = time;
483         fcpim->profile_comp = bfa_ioim_profile_comp;
484         fcpim->profile_start = bfa_ioim_profile_start;
485         return BFA_STATUS_OK;
486 }
487
488 bfa_status_t
489 bfa_fcpim_profile_off(struct bfa_s *bfa)
490 {
491         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
492         fcpim->io_profile = BFA_FALSE;
493         fcpim->io_profile_start_time = 0;
494         fcpim->profile_comp = NULL;
495         fcpim->profile_start = NULL;
496         return BFA_STATUS_OK;
497 }
498
499 u16
500 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
501 {
502         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
503
504         return fcpim->q_depth;
505 }
506
507 /*
508  *  BFA ITNIM module state machine functions
509  */
510
511 /*
512  * Beginning/unallocated state - no events expected.
513  */
514 static void
515 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
516 {
517         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
518         bfa_trc(itnim->bfa, event);
519
520         switch (event) {
521         case BFA_ITNIM_SM_CREATE:
522                 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
523                 itnim->is_online = BFA_FALSE;
524                 bfa_fcpim_additn(itnim);
525                 break;
526
527         default:
528                 bfa_sm_fault(itnim->bfa, event);
529         }
530 }
531
532 /*
533  * Beginning state, only online event expected.
534  */
535 static void
536 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
537 {
538         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
539         bfa_trc(itnim->bfa, event);
540
541         switch (event) {
542         case BFA_ITNIM_SM_ONLINE:
543                 if (bfa_itnim_send_fwcreate(itnim))
544                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
545                 else
546                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
547                 break;
548
549         case BFA_ITNIM_SM_DELETE:
550                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
551                 bfa_fcpim_delitn(itnim);
552                 break;
553
554         case BFA_ITNIM_SM_HWFAIL:
555                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
556                 break;
557
558         default:
559                 bfa_sm_fault(itnim->bfa, event);
560         }
561 }
562
563 /*
564  *      Waiting for itnim create response from firmware.
565  */
566 static void
567 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
568 {
569         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
570         bfa_trc(itnim->bfa, event);
571
572         switch (event) {
573         case BFA_ITNIM_SM_FWRSP:
574                 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
575                 itnim->is_online = BFA_TRUE;
576                 bfa_itnim_iotov_online(itnim);
577                 bfa_itnim_online_cb(itnim);
578                 break;
579
580         case BFA_ITNIM_SM_DELETE:
581                 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
582                 break;
583
584         case BFA_ITNIM_SM_OFFLINE:
585                 if (bfa_itnim_send_fwdelete(itnim))
586                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
587                 else
588                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
589                 break;
590
591         case BFA_ITNIM_SM_HWFAIL:
592                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
593                 break;
594
595         default:
596                 bfa_sm_fault(itnim->bfa, event);
597         }
598 }
599
600 static void
601 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
602                         enum bfa_itnim_event event)
603 {
604         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
605         bfa_trc(itnim->bfa, event);
606
607         switch (event) {
608         case BFA_ITNIM_SM_QRESUME:
609                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
610                 bfa_itnim_send_fwcreate(itnim);
611                 break;
612
613         case BFA_ITNIM_SM_DELETE:
614                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
615                 bfa_reqq_wcancel(&itnim->reqq_wait);
616                 bfa_fcpim_delitn(itnim);
617                 break;
618
619         case BFA_ITNIM_SM_OFFLINE:
620                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
621                 bfa_reqq_wcancel(&itnim->reqq_wait);
622                 bfa_itnim_offline_cb(itnim);
623                 break;
624
625         case BFA_ITNIM_SM_HWFAIL:
626                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
627                 bfa_reqq_wcancel(&itnim->reqq_wait);
628                 break;
629
630         default:
631                 bfa_sm_fault(itnim->bfa, event);
632         }
633 }
634
635 /*
636  * Waiting for itnim create response from firmware, a delete is pending.
637  */
638 static void
639 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
640                                 enum bfa_itnim_event event)
641 {
642         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
643         bfa_trc(itnim->bfa, event);
644
645         switch (event) {
646         case BFA_ITNIM_SM_FWRSP:
647                 if (bfa_itnim_send_fwdelete(itnim))
648                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
649                 else
650                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
651                 break;
652
653         case BFA_ITNIM_SM_HWFAIL:
654                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
655                 bfa_fcpim_delitn(itnim);
656                 break;
657
658         default:
659                 bfa_sm_fault(itnim->bfa, event);
660         }
661 }
662
663 /*
664  * Online state - normal parking state.
665  */
666 static void
667 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
668 {
669         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670         bfa_trc(itnim->bfa, event);
671
672         switch (event) {
673         case BFA_ITNIM_SM_OFFLINE:
674                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
675                 itnim->is_online = BFA_FALSE;
676                 bfa_itnim_iotov_start(itnim);
677                 bfa_itnim_cleanup(itnim);
678                 break;
679
680         case BFA_ITNIM_SM_DELETE:
681                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
682                 itnim->is_online = BFA_FALSE;
683                 bfa_itnim_cleanup(itnim);
684                 break;
685
686         case BFA_ITNIM_SM_SLER:
687                 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
688                 itnim->is_online = BFA_FALSE;
689                 bfa_itnim_iotov_start(itnim);
690                 bfa_itnim_sler_cb(itnim);
691                 break;
692
693         case BFA_ITNIM_SM_HWFAIL:
694                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
695                 itnim->is_online = BFA_FALSE;
696                 bfa_itnim_iotov_start(itnim);
697                 bfa_itnim_iocdisable_cleanup(itnim);
698                 break;
699
700         default:
701                 bfa_sm_fault(itnim->bfa, event);
702         }
703 }
704
705 /*
706  * Second level error recovery need.
707  */
708 static void
709 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
710 {
711         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
712         bfa_trc(itnim->bfa, event);
713
714         switch (event) {
715         case BFA_ITNIM_SM_OFFLINE:
716                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
717                 bfa_itnim_cleanup(itnim);
718                 break;
719
720         case BFA_ITNIM_SM_DELETE:
721                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
722                 bfa_itnim_cleanup(itnim);
723                 bfa_itnim_iotov_delete(itnim);
724                 break;
725
726         case BFA_ITNIM_SM_HWFAIL:
727                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
728                 bfa_itnim_iocdisable_cleanup(itnim);
729                 break;
730
731         default:
732                 bfa_sm_fault(itnim->bfa, event);
733         }
734 }
735
736 /*
737  * Going offline. Waiting for active IO cleanup.
738  */
739 static void
740 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
741                                  enum bfa_itnim_event event)
742 {
743         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
744         bfa_trc(itnim->bfa, event);
745
746         switch (event) {
747         case BFA_ITNIM_SM_CLEANUP:
748                 if (bfa_itnim_send_fwdelete(itnim))
749                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
750                 else
751                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
752                 break;
753
754         case BFA_ITNIM_SM_DELETE:
755                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
756                 bfa_itnim_iotov_delete(itnim);
757                 break;
758
759         case BFA_ITNIM_SM_HWFAIL:
760                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
761                 bfa_itnim_iocdisable_cleanup(itnim);
762                 bfa_itnim_offline_cb(itnim);
763                 break;
764
765         case BFA_ITNIM_SM_SLER:
766                 break;
767
768         default:
769                 bfa_sm_fault(itnim->bfa, event);
770         }
771 }
772
773 /*
774  * Deleting itnim. Waiting for active IO cleanup.
775  */
776 static void
777 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
778                                 enum bfa_itnim_event event)
779 {
780         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
781         bfa_trc(itnim->bfa, event);
782
783         switch (event) {
784         case BFA_ITNIM_SM_CLEANUP:
785                 if (bfa_itnim_send_fwdelete(itnim))
786                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
787                 else
788                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
789                 break;
790
791         case BFA_ITNIM_SM_HWFAIL:
792                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
793                 bfa_itnim_iocdisable_cleanup(itnim);
794                 break;
795
796         default:
797                 bfa_sm_fault(itnim->bfa, event);
798         }
799 }
800
801 /*
802  * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
803  */
804 static void
805 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
806 {
807         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
808         bfa_trc(itnim->bfa, event);
809
810         switch (event) {
811         case BFA_ITNIM_SM_FWRSP:
812                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
813                 bfa_itnim_offline_cb(itnim);
814                 break;
815
816         case BFA_ITNIM_SM_DELETE:
817                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
818                 break;
819
820         case BFA_ITNIM_SM_HWFAIL:
821                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
822                 bfa_itnim_offline_cb(itnim);
823                 break;
824
825         default:
826                 bfa_sm_fault(itnim->bfa, event);
827         }
828 }
829
830 static void
831 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
832                         enum bfa_itnim_event event)
833 {
834         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
835         bfa_trc(itnim->bfa, event);
836
837         switch (event) {
838         case BFA_ITNIM_SM_QRESUME:
839                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
840                 bfa_itnim_send_fwdelete(itnim);
841                 break;
842
843         case BFA_ITNIM_SM_DELETE:
844                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
845                 break;
846
847         case BFA_ITNIM_SM_HWFAIL:
848                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
849                 bfa_reqq_wcancel(&itnim->reqq_wait);
850                 bfa_itnim_offline_cb(itnim);
851                 break;
852
853         default:
854                 bfa_sm_fault(itnim->bfa, event);
855         }
856 }
857
858 /*
859  * Offline state.
860  */
861 static void
862 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
863 {
864         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
865         bfa_trc(itnim->bfa, event);
866
867         switch (event) {
868         case BFA_ITNIM_SM_DELETE:
869                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
870                 bfa_itnim_iotov_delete(itnim);
871                 bfa_fcpim_delitn(itnim);
872                 break;
873
874         case BFA_ITNIM_SM_ONLINE:
875                 if (bfa_itnim_send_fwcreate(itnim))
876                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
877                 else
878                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
879                 break;
880
881         case BFA_ITNIM_SM_HWFAIL:
882                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
883                 break;
884
885         default:
886                 bfa_sm_fault(itnim->bfa, event);
887         }
888 }
889
890 static void
891 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
892                                 enum bfa_itnim_event event)
893 {
894         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
895         bfa_trc(itnim->bfa, event);
896
897         switch (event) {
898         case BFA_ITNIM_SM_DELETE:
899                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
900                 bfa_itnim_iotov_delete(itnim);
901                 bfa_fcpim_delitn(itnim);
902                 break;
903
904         case BFA_ITNIM_SM_OFFLINE:
905                 bfa_itnim_offline_cb(itnim);
906                 break;
907
908         case BFA_ITNIM_SM_ONLINE:
909                 if (bfa_itnim_send_fwcreate(itnim))
910                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
911                 else
912                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
913                 break;
914
915         case BFA_ITNIM_SM_HWFAIL:
916                 break;
917
918         default:
919                 bfa_sm_fault(itnim->bfa, event);
920         }
921 }
922
923 /*
924  * Itnim is deleted, waiting for firmware response to delete.
925  */
926 static void
927 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
928 {
929         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
930         bfa_trc(itnim->bfa, event);
931
932         switch (event) {
933         case BFA_ITNIM_SM_FWRSP:
934         case BFA_ITNIM_SM_HWFAIL:
935                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
936                 bfa_fcpim_delitn(itnim);
937                 break;
938
939         default:
940                 bfa_sm_fault(itnim->bfa, event);
941         }
942 }
943
944 static void
945 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
946                 enum bfa_itnim_event event)
947 {
948         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
949         bfa_trc(itnim->bfa, event);
950
951         switch (event) {
952         case BFA_ITNIM_SM_QRESUME:
953                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
954                 bfa_itnim_send_fwdelete(itnim);
955                 break;
956
957         case BFA_ITNIM_SM_HWFAIL:
958                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
959                 bfa_reqq_wcancel(&itnim->reqq_wait);
960                 bfa_fcpim_delitn(itnim);
961                 break;
962
963         default:
964                 bfa_sm_fault(itnim->bfa, event);
965         }
966 }
967
968 /*
969  * Initiate cleanup of all IOs on an IOC failure.
970  */
971 static void
972 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
973 {
974         struct bfa_tskim_s *tskim;
975         struct bfa_ioim_s *ioim;
976         struct list_head        *qe, *qen;
977
978         list_for_each_safe(qe, qen, &itnim->tsk_q) {
979                 tskim = (struct bfa_tskim_s *) qe;
980                 bfa_tskim_iocdisable(tskim);
981         }
982
983         list_for_each_safe(qe, qen, &itnim->io_q) {
984                 ioim = (struct bfa_ioim_s *) qe;
985                 bfa_ioim_iocdisable(ioim);
986         }
987
988         /*
989          * For IO request in pending queue, we pretend an early timeout.
990          */
991         list_for_each_safe(qe, qen, &itnim->pending_q) {
992                 ioim = (struct bfa_ioim_s *) qe;
993                 bfa_ioim_tov(ioim);
994         }
995
996         list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
997                 ioim = (struct bfa_ioim_s *) qe;
998                 bfa_ioim_iocdisable(ioim);
999         }
1000 }
1001
1002 /*
1003  * IO cleanup completion
1004  */
1005 static void
1006 bfa_itnim_cleanp_comp(void *itnim_cbarg)
1007 {
1008         struct bfa_itnim_s *itnim = itnim_cbarg;
1009
1010         bfa_stats(itnim, cleanup_comps);
1011         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1012 }
1013
1014 /*
1015  * Initiate cleanup of all IOs.
1016  */
1017 static void
1018 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1019 {
1020         struct bfa_ioim_s  *ioim;
1021         struct bfa_tskim_s *tskim;
1022         struct list_head        *qe, *qen;
1023
1024         bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1025
1026         list_for_each_safe(qe, qen, &itnim->io_q) {
1027                 ioim = (struct bfa_ioim_s *) qe;
1028
1029                 /*
1030                  * Move IO to a cleanup queue from active queue so that a later
1031                  * TM will not pickup this IO.
1032                  */
1033                 list_del(&ioim->qe);
1034                 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1035
1036                 bfa_wc_up(&itnim->wc);
1037                 bfa_ioim_cleanup(ioim);
1038         }
1039
1040         list_for_each_safe(qe, qen, &itnim->tsk_q) {
1041                 tskim = (struct bfa_tskim_s *) qe;
1042                 bfa_wc_up(&itnim->wc);
1043                 bfa_tskim_cleanup(tskim);
1044         }
1045
1046         bfa_wc_wait(&itnim->wc);
1047 }
1048
1049 static void
1050 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1051 {
1052         struct bfa_itnim_s *itnim = cbarg;
1053
1054         if (complete)
1055                 bfa_cb_itnim_online(itnim->ditn);
1056 }
1057
1058 static void
1059 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1060 {
1061         struct bfa_itnim_s *itnim = cbarg;
1062
1063         if (complete)
1064                 bfa_cb_itnim_offline(itnim->ditn);
1065 }
1066
1067 static void
1068 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1069 {
1070         struct bfa_itnim_s *itnim = cbarg;
1071
1072         if (complete)
1073                 bfa_cb_itnim_sler(itnim->ditn);
1074 }
1075
1076 /*
1077  * Call to resume any I/O requests waiting for room in request queue.
1078  */
1079 static void
1080 bfa_itnim_qresume(void *cbarg)
1081 {
1082         struct bfa_itnim_s *itnim = cbarg;
1083
1084         bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1085 }
1086
1087 /*
1088  *  bfa_itnim_public
1089  */
1090
1091 void
1092 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1093 {
1094         bfa_wc_down(&itnim->wc);
1095 }
1096
1097 void
1098 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1099 {
1100         bfa_wc_down(&itnim->wc);
1101 }
1102
1103 void
1104 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
1105 {
1106         /*
1107          * ITN memory
1108          */
1109         *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1110 }
1111
1112 void
1113 bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
1114 {
1115         struct bfa_s    *bfa = fcpim->bfa;
1116         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
1117         struct bfa_itnim_s *itnim;
1118         int     i, j;
1119
1120         INIT_LIST_HEAD(&fcpim->itnim_q);
1121
1122         itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
1123         fcpim->itnim_arr = itnim;
1124
1125         for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1126                 memset(itnim, 0, sizeof(struct bfa_itnim_s));
1127                 itnim->bfa = bfa;
1128                 itnim->fcpim = fcpim;
1129                 itnim->reqq = BFA_REQQ_QOS_LO;
1130                 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1131                 itnim->iotov_active = BFA_FALSE;
1132                 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1133
1134                 INIT_LIST_HEAD(&itnim->io_q);
1135                 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1136                 INIT_LIST_HEAD(&itnim->pending_q);
1137                 INIT_LIST_HEAD(&itnim->tsk_q);
1138                 INIT_LIST_HEAD(&itnim->delay_comp_q);
1139                 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1140                         itnim->ioprofile.io_latency.min[j] = ~0;
1141                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1142         }
1143
1144         bfa_mem_kva_curp(fcp) = (u8 *) itnim;
1145 }
1146
1147 void
1148 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1149 {
1150         bfa_stats(itnim, ioc_disabled);
1151         bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1152 }
1153
1154 static bfa_boolean_t
1155 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1156 {
1157         struct bfi_itn_create_req_s *m;
1158
1159         itnim->msg_no++;
1160
1161         /*
1162          * check for room in queue to send request now
1163          */
1164         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1165         if (!m) {
1166                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1167                 return BFA_FALSE;
1168         }
1169
1170         bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
1171                         bfa_fn_lpu(itnim->bfa));
1172         m->fw_handle = itnim->rport->fw_handle;
1173         m->class = FC_CLASS_3;
1174         m->seq_rec = itnim->seq_rec;
1175         m->msg_no = itnim->msg_no;
1176         bfa_stats(itnim, fw_create);
1177
1178         /*
1179          * queue I/O message to firmware
1180          */
1181         bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1182         return BFA_TRUE;
1183 }
1184
1185 static bfa_boolean_t
1186 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1187 {
1188         struct bfi_itn_delete_req_s *m;
1189
1190         /*
1191          * check for room in queue to send request now
1192          */
1193         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1194         if (!m) {
1195                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1196                 return BFA_FALSE;
1197         }
1198
1199         bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
1200                         bfa_fn_lpu(itnim->bfa));
1201         m->fw_handle = itnim->rport->fw_handle;
1202         bfa_stats(itnim, fw_delete);
1203
1204         /*
1205          * queue I/O message to firmware
1206          */
1207         bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1208         return BFA_TRUE;
1209 }
1210
1211 /*
1212  * Cleanup all pending failed inflight requests.
1213  */
1214 static void
1215 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1216 {
1217         struct bfa_ioim_s *ioim;
1218         struct list_head *qe, *qen;
1219
1220         list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1221                 ioim = (struct bfa_ioim_s *)qe;
1222                 bfa_ioim_delayed_comp(ioim, iotov);
1223         }
1224 }
1225
1226 /*
1227  * Start all pending IO requests.
1228  */
1229 static void
1230 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1231 {
1232         struct bfa_ioim_s *ioim;
1233
1234         bfa_itnim_iotov_stop(itnim);
1235
1236         /*
1237          * Abort all inflight IO requests in the queue
1238          */
1239         bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1240
1241         /*
1242          * Start all pending IO requests.
1243          */
1244         while (!list_empty(&itnim->pending_q)) {
1245                 bfa_q_deq(&itnim->pending_q, &ioim);
1246                 list_add_tail(&ioim->qe, &itnim->io_q);
1247                 bfa_ioim_start(ioim);
1248         }
1249 }
1250
1251 /*
1252  * Fail all pending IO requests
1253  */
1254 static void
1255 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1256 {
1257         struct bfa_ioim_s *ioim;
1258
1259         /*
1260          * Fail all inflight IO requests in the queue
1261          */
1262         bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1263
1264         /*
1265          * Fail any pending IO requests.
1266          */
1267         while (!list_empty(&itnim->pending_q)) {
1268                 bfa_q_deq(&itnim->pending_q, &ioim);
1269                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1270                 bfa_ioim_tov(ioim);
1271         }
1272 }
1273
1274 /*
1275  * IO TOV timer callback. Fail any pending IO requests.
1276  */
1277 static void
1278 bfa_itnim_iotov(void *itnim_arg)
1279 {
1280         struct bfa_itnim_s *itnim = itnim_arg;
1281
1282         itnim->iotov_active = BFA_FALSE;
1283
1284         bfa_cb_itnim_tov_begin(itnim->ditn);
1285         bfa_itnim_iotov_cleanup(itnim);
1286         bfa_cb_itnim_tov(itnim->ditn);
1287 }
1288
1289 /*
1290  * Start IO TOV timer for failing back pending IO requests in offline state.
1291  */
1292 static void
1293 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1294 {
1295         if (itnim->fcpim->path_tov > 0) {
1296
1297                 itnim->iotov_active = BFA_TRUE;
1298                 WARN_ON(!bfa_itnim_hold_io(itnim));
1299                 bfa_timer_start(itnim->bfa, &itnim->timer,
1300                         bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1301         }
1302 }
1303
1304 /*
1305  * Stop IO TOV timer.
1306  */
1307 static void
1308 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1309 {
1310         if (itnim->iotov_active) {
1311                 itnim->iotov_active = BFA_FALSE;
1312                 bfa_timer_stop(&itnim->timer);
1313         }
1314 }
1315
1316 /*
1317  * Stop IO TOV timer.
1318  */
1319 static void
1320 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1321 {
1322         bfa_boolean_t pathtov_active = BFA_FALSE;
1323
1324         if (itnim->iotov_active)
1325                 pathtov_active = BFA_TRUE;
1326
1327         bfa_itnim_iotov_stop(itnim);
1328         if (pathtov_active)
1329                 bfa_cb_itnim_tov_begin(itnim->ditn);
1330         bfa_itnim_iotov_cleanup(itnim);
1331         if (pathtov_active)
1332                 bfa_cb_itnim_tov(itnim->ditn);
1333 }
1334
1335 static void
1336 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1337 {
1338         struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1339         fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1340                 itnim->stats.iocomp_aborted;
1341         fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1342                 itnim->stats.iocomp_timedout;
1343         fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1344                 itnim->stats.iocom_sqer_needed;
1345         fcpim->del_itn_stats.del_itn_iocom_res_free +=
1346                 itnim->stats.iocom_res_free;
1347         fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1348                 itnim->stats.iocom_hostabrts;
1349         fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1350         fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1351         fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1352 }
1353
1354 /*
1355  * bfa_itnim_public
1356  */
1357
1358 /*
1359  * Itnim interrupt processing.
1360  */
1361 void
1362 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1363 {
1364         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1365         union bfi_itn_i2h_msg_u msg;
1366         struct bfa_itnim_s *itnim;
1367
1368         bfa_trc(bfa, m->mhdr.msg_id);
1369
1370         msg.msg = m;
1371
1372         switch (m->mhdr.msg_id) {
1373         case BFI_ITN_I2H_CREATE_RSP:
1374                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1375                                                 msg.create_rsp->bfa_handle);
1376                 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1377                 bfa_stats(itnim, create_comps);
1378                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1379                 break;
1380
1381         case BFI_ITN_I2H_DELETE_RSP:
1382                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1383                                                 msg.delete_rsp->bfa_handle);
1384                 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1385                 bfa_stats(itnim, delete_comps);
1386                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1387                 break;
1388
1389         case BFI_ITN_I2H_SLER_EVENT:
1390                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1391                                                 msg.sler_event->bfa_handle);
1392                 bfa_stats(itnim, sler_events);
1393                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1394                 break;
1395
1396         default:
1397                 bfa_trc(bfa, m->mhdr.msg_id);
1398                 WARN_ON(1);
1399         }
1400 }
1401
1402 /*
1403  * bfa_itnim_api
1404  */
1405
1406 struct bfa_itnim_s *
1407 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1408 {
1409         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1410         struct bfa_itnim_s *itnim;
1411
1412         bfa_itn_create(bfa, rport, bfa_itnim_isr);
1413
1414         itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1415         WARN_ON(itnim->rport != rport);
1416
1417         itnim->ditn = ditn;
1418
1419         bfa_stats(itnim, creates);
1420         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1421
1422         return itnim;
1423 }
1424
1425 void
1426 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1427 {
1428         bfa_stats(itnim, deletes);
1429         bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1430 }
1431
1432 void
1433 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1434 {
1435         itnim->seq_rec = seq_rec;
1436         bfa_stats(itnim, onlines);
1437         bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1438 }
1439
1440 void
1441 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1442 {
1443         bfa_stats(itnim, offlines);
1444         bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1445 }
1446
1447 /*
1448  * Return true if itnim is considered offline for holding off IO request.
1449  * IO is not held if itnim is being deleted.
1450  */
1451 bfa_boolean_t
1452 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1453 {
1454         return itnim->fcpim->path_tov && itnim->iotov_active &&
1455                 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1456                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1457                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1458                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1459                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1460                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1461 }
1462
1463 #define bfa_io_lat_clock_res_div        HZ
1464 #define bfa_io_lat_clock_res_mul        1000
1465 bfa_status_t
1466 bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1467                         struct bfa_itnim_ioprofile_s *ioprofile)
1468 {
1469         struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1470         if (!fcpim->io_profile)
1471                 return BFA_STATUS_IOPROFILE_OFF;
1472
1473         itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1474         itnim->ioprofile.io_profile_start_time =
1475                                 bfa_io_profile_start_time(itnim->bfa);
1476         itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1477         itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1478         *ioprofile = itnim->ioprofile;
1479
1480         return BFA_STATUS_OK;
1481 }
1482
1483 void
1484 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1485 {
1486         int j;
1487         memset(&itnim->stats, 0, sizeof(itnim->stats));
1488         memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1489         for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1490                 itnim->ioprofile.io_latency.min[j] = ~0;
1491 }
1492
1493 /*
1494  *  BFA IO module state machine functions
1495  */
1496
1497 /*
1498  * IO is not started (unallocated).
1499  */
1500 static void
1501 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1502 {
1503         switch (event) {
1504         case BFA_IOIM_SM_START:
1505                 if (!bfa_itnim_is_online(ioim->itnim)) {
1506                         if (!bfa_itnim_hold_io(ioim->itnim)) {
1507                                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1508                                 list_del(&ioim->qe);
1509                                 list_add_tail(&ioim->qe,
1510                                         &ioim->fcpim->ioim_comp_q);
1511                                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1512                                                 __bfa_cb_ioim_pathtov, ioim);
1513                         } else {
1514                                 list_del(&ioim->qe);
1515                                 list_add_tail(&ioim->qe,
1516                                         &ioim->itnim->pending_q);
1517                         }
1518                         break;
1519                 }
1520
1521                 if (ioim->nsges > BFI_SGE_INLINE) {
1522                         if (!bfa_ioim_sgpg_alloc(ioim)) {
1523                                 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1524                                 return;
1525                         }
1526                 }
1527
1528                 if (!bfa_ioim_send_ioreq(ioim)) {
1529                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1530                         break;
1531                 }
1532
1533                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1534                 break;
1535
1536         case BFA_IOIM_SM_IOTOV:
1537                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1538                 bfa_ioim_move_to_comp_q(ioim);
1539                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1540                                 __bfa_cb_ioim_pathtov, ioim);
1541                 break;
1542
1543         case BFA_IOIM_SM_ABORT:
1544                 /*
1545                  * IO in pending queue can get abort requests. Complete abort
1546                  * requests immediately.
1547                  */
1548                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1549                 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1550                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1551                         __bfa_cb_ioim_abort, ioim);
1552                 break;
1553
1554         default:
1555                 bfa_sm_fault(ioim->bfa, event);
1556         }
1557 }
1558
1559 /*
1560  * IO is waiting for SG pages.
1561  */
1562 static void
1563 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1564 {
1565         bfa_trc(ioim->bfa, ioim->iotag);
1566         bfa_trc(ioim->bfa, event);
1567
1568         switch (event) {
1569         case BFA_IOIM_SM_SGALLOCED:
1570                 if (!bfa_ioim_send_ioreq(ioim)) {
1571                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1572                         break;
1573                 }
1574                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1575                 break;
1576
1577         case BFA_IOIM_SM_CLEANUP:
1578                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1579                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1580                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1581                               ioim);
1582                 bfa_ioim_notify_cleanup(ioim);
1583                 break;
1584
1585         case BFA_IOIM_SM_ABORT:
1586                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1587                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1588                 bfa_ioim_move_to_comp_q(ioim);
1589                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1590                               ioim);
1591                 break;
1592
1593         case BFA_IOIM_SM_HWFAIL:
1594                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1595                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1596                 bfa_ioim_move_to_comp_q(ioim);
1597                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1598                               ioim);
1599                 break;
1600
1601         default:
1602                 bfa_sm_fault(ioim->bfa, event);
1603         }
1604 }
1605
1606 /*
1607  * IO is active.
1608  */
1609 static void
1610 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1611 {
1612         switch (event) {
1613         case BFA_IOIM_SM_COMP_GOOD:
1614                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1615                 bfa_ioim_move_to_comp_q(ioim);
1616                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1617                               __bfa_cb_ioim_good_comp, ioim);
1618                 break;
1619
1620         case BFA_IOIM_SM_COMP:
1621                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1622                 bfa_ioim_move_to_comp_q(ioim);
1623                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1624                               ioim);
1625                 break;
1626
1627         case BFA_IOIM_SM_DONE:
1628                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1629                 bfa_ioim_move_to_comp_q(ioim);
1630                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1631                               ioim);
1632                 break;
1633
1634         case BFA_IOIM_SM_ABORT:
1635                 ioim->iosp->abort_explicit = BFA_TRUE;
1636                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1637
1638                 if (bfa_ioim_send_abort(ioim))
1639                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1640                 else {
1641                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1642                         bfa_stats(ioim->itnim, qwait);
1643                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1644                                           &ioim->iosp->reqq_wait);
1645                 }
1646                 break;
1647
1648         case BFA_IOIM_SM_CLEANUP:
1649                 ioim->iosp->abort_explicit = BFA_FALSE;
1650                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1651
1652                 if (bfa_ioim_send_abort(ioim))
1653                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1654                 else {
1655                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1656                         bfa_stats(ioim->itnim, qwait);
1657                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1658                                           &ioim->iosp->reqq_wait);
1659                 }
1660                 break;
1661
1662         case BFA_IOIM_SM_HWFAIL:
1663                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1664                 bfa_ioim_move_to_comp_q(ioim);
1665                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1666                               ioim);
1667                 break;
1668
1669         case BFA_IOIM_SM_SQRETRY:
1670                 if (bfa_ioim_maxretry_reached(ioim)) {
1671                         /* max retry reached, free IO */
1672                         bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1673                         bfa_ioim_move_to_comp_q(ioim);
1674                         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1675                                         __bfa_cb_ioim_failed, ioim);
1676                         break;
1677                 }
1678                 /* waiting for IO tag resource free */
1679                 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1680                 break;
1681
1682         default:
1683                 bfa_sm_fault(ioim->bfa, event);
1684         }
1685 }
1686
1687 /*
1688  * IO is retried with new tag.
1689  */
1690 static void
1691 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1692 {
1693         switch (event) {
1694         case BFA_IOIM_SM_FREE:
1695                 /* abts and rrq done. Now retry the IO with new tag */
1696                 bfa_ioim_update_iotag(ioim);
1697                 if (!bfa_ioim_send_ioreq(ioim)) {
1698                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1699                         break;
1700                 }
1701                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1702         break;
1703
1704         case BFA_IOIM_SM_CLEANUP:
1705                 ioim->iosp->abort_explicit = BFA_FALSE;
1706                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1707
1708                 if (bfa_ioim_send_abort(ioim))
1709                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1710                 else {
1711                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1712                         bfa_stats(ioim->itnim, qwait);
1713                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1714                                           &ioim->iosp->reqq_wait);
1715                 }
1716         break;
1717
1718         case BFA_IOIM_SM_HWFAIL:
1719                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1720                 bfa_ioim_move_to_comp_q(ioim);
1721                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1722                          __bfa_cb_ioim_failed, ioim);
1723                 break;
1724
1725         case BFA_IOIM_SM_ABORT:
1726                 /* in this state IO abort is done.
1727                  * Waiting for IO tag resource free.
1728                  */
1729                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1730                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1731                               ioim);
1732                 break;
1733
1734         default:
1735                 bfa_sm_fault(ioim->bfa, event);
1736         }
1737 }
1738
1739 /*
1740  * IO is being aborted, waiting for completion from firmware.
1741  */
1742 static void
1743 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1744 {
1745         bfa_trc(ioim->bfa, ioim->iotag);
1746         bfa_trc(ioim->bfa, event);
1747
1748         switch (event) {
1749         case BFA_IOIM_SM_COMP_GOOD:
1750         case BFA_IOIM_SM_COMP:
1751         case BFA_IOIM_SM_DONE:
1752         case BFA_IOIM_SM_FREE:
1753                 break;
1754
1755         case BFA_IOIM_SM_ABORT_DONE:
1756                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1757                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1758                               ioim);
1759                 break;
1760
1761         case BFA_IOIM_SM_ABORT_COMP:
1762                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1763                 bfa_ioim_move_to_comp_q(ioim);
1764                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1765                               ioim);
1766                 break;
1767
1768         case BFA_IOIM_SM_COMP_UTAG:
1769                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1770                 bfa_ioim_move_to_comp_q(ioim);
1771                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1772                               ioim);
1773                 break;
1774
1775         case BFA_IOIM_SM_CLEANUP:
1776                 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1777                 ioim->iosp->abort_explicit = BFA_FALSE;
1778
1779                 if (bfa_ioim_send_abort(ioim))
1780                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1781                 else {
1782                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1783                         bfa_stats(ioim->itnim, qwait);
1784                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1785                                           &ioim->iosp->reqq_wait);
1786                 }
1787                 break;
1788
1789         case BFA_IOIM_SM_HWFAIL:
1790                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1791                 bfa_ioim_move_to_comp_q(ioim);
1792                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1793                               ioim);
1794                 break;
1795
1796         default:
1797                 bfa_sm_fault(ioim->bfa, event);
1798         }
1799 }
1800
1801 /*
1802  * IO is being cleaned up (implicit abort), waiting for completion from
1803  * firmware.
1804  */
1805 static void
1806 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1807 {
1808         bfa_trc(ioim->bfa, ioim->iotag);
1809         bfa_trc(ioim->bfa, event);
1810
1811         switch (event) {
1812         case BFA_IOIM_SM_COMP_GOOD:
1813         case BFA_IOIM_SM_COMP:
1814         case BFA_IOIM_SM_DONE:
1815         case BFA_IOIM_SM_FREE:
1816                 break;
1817
1818         case BFA_IOIM_SM_ABORT:
1819                 /*
1820                  * IO is already being aborted implicitly
1821                  */
1822                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1823                 break;
1824
1825         case BFA_IOIM_SM_ABORT_DONE:
1826                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1827                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1828                 bfa_ioim_notify_cleanup(ioim);
1829                 break;
1830
1831         case BFA_IOIM_SM_ABORT_COMP:
1832                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1833                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1834                 bfa_ioim_notify_cleanup(ioim);
1835                 break;
1836
1837         case BFA_IOIM_SM_COMP_UTAG:
1838                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1839                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1840                 bfa_ioim_notify_cleanup(ioim);
1841                 break;
1842
1843         case BFA_IOIM_SM_HWFAIL:
1844                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1845                 bfa_ioim_move_to_comp_q(ioim);
1846                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1847                               ioim);
1848                 break;
1849
1850         case BFA_IOIM_SM_CLEANUP:
1851                 /*
1852                  * IO can be in cleanup state already due to TM command.
1853                  * 2nd cleanup request comes from ITN offline event.
1854                  */
1855                 break;
1856
1857         default:
1858                 bfa_sm_fault(ioim->bfa, event);
1859         }
1860 }
1861
1862 /*
1863  * IO is waiting for room in request CQ
1864  */
1865 static void
1866 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1867 {
1868         bfa_trc(ioim->bfa, ioim->iotag);
1869         bfa_trc(ioim->bfa, event);
1870
1871         switch (event) {
1872         case BFA_IOIM_SM_QRESUME:
1873                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1874                 bfa_ioim_send_ioreq(ioim);
1875                 break;
1876
1877         case BFA_IOIM_SM_ABORT:
1878                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1879                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1880                 bfa_ioim_move_to_comp_q(ioim);
1881                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1882                               ioim);
1883                 break;
1884
1885         case BFA_IOIM_SM_CLEANUP:
1886                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1887                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1888                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1889                               ioim);
1890                 bfa_ioim_notify_cleanup(ioim);
1891                 break;
1892
1893         case BFA_IOIM_SM_HWFAIL:
1894                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1895                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1896                 bfa_ioim_move_to_comp_q(ioim);
1897                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1898                               ioim);
1899                 break;
1900
1901         default:
1902                 bfa_sm_fault(ioim->bfa, event);
1903         }
1904 }
1905
1906 /*
1907  * Active IO is being aborted, waiting for room in request CQ.
1908  */
1909 static void
1910 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1911 {
1912         bfa_trc(ioim->bfa, ioim->iotag);
1913         bfa_trc(ioim->bfa, event);
1914
1915         switch (event) {
1916         case BFA_IOIM_SM_QRESUME:
1917                 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1918                 bfa_ioim_send_abort(ioim);
1919                 break;
1920
1921         case BFA_IOIM_SM_CLEANUP:
1922                 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1923                 ioim->iosp->abort_explicit = BFA_FALSE;
1924                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1925                 break;
1926
1927         case BFA_IOIM_SM_COMP_GOOD:
1928         case BFA_IOIM_SM_COMP:
1929                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1930                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1931                 bfa_ioim_move_to_comp_q(ioim);
1932                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1933                               ioim);
1934                 break;
1935
1936         case BFA_IOIM_SM_DONE:
1937                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1938                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1939                 bfa_ioim_move_to_comp_q(ioim);
1940                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1941                               ioim);
1942                 break;
1943
1944         case BFA_IOIM_SM_HWFAIL:
1945                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1946                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1947                 bfa_ioim_move_to_comp_q(ioim);
1948                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1949                               ioim);
1950                 break;
1951
1952         default:
1953                 bfa_sm_fault(ioim->bfa, event);
1954         }
1955 }
1956
1957 /*
1958  * Active IO is being cleaned up, waiting for room in request CQ.
1959  */
1960 static void
1961 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1962 {
1963         bfa_trc(ioim->bfa, ioim->iotag);
1964         bfa_trc(ioim->bfa, event);
1965
1966         switch (event) {
1967         case BFA_IOIM_SM_QRESUME:
1968                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1969                 bfa_ioim_send_abort(ioim);
1970                 break;
1971
1972         case BFA_IOIM_SM_ABORT:
1973                 /*
1974                  * IO is already being cleaned up implicitly
1975                  */
1976                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1977                 break;
1978
1979         case BFA_IOIM_SM_COMP_GOOD:
1980         case BFA_IOIM_SM_COMP:
1981                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1982                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1983                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1984                 bfa_ioim_notify_cleanup(ioim);
1985                 break;
1986
1987         case BFA_IOIM_SM_DONE:
1988                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1989                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1990                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1991                 bfa_ioim_notify_cleanup(ioim);
1992                 break;
1993
1994         case BFA_IOIM_SM_HWFAIL:
1995                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1996                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1997                 bfa_ioim_move_to_comp_q(ioim);
1998                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1999                               ioim);
2000                 break;
2001
2002         default:
2003                 bfa_sm_fault(ioim->bfa, event);
2004         }
2005 }
2006
2007 /*
2008  * IO bfa callback is pending.
2009  */
2010 static void
2011 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2012 {
2013         switch (event) {
2014         case BFA_IOIM_SM_HCB:
2015                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2016                 bfa_ioim_free(ioim);
2017                 break;
2018
2019         case BFA_IOIM_SM_CLEANUP:
2020                 bfa_ioim_notify_cleanup(ioim);
2021                 break;
2022
2023         case BFA_IOIM_SM_HWFAIL:
2024                 break;
2025
2026         default:
2027                 bfa_sm_fault(ioim->bfa, event);
2028         }
2029 }
2030
2031 /*
2032  * IO bfa callback is pending. IO resource cannot be freed.
2033  */
2034 static void
2035 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2036 {
2037         bfa_trc(ioim->bfa, ioim->iotag);
2038         bfa_trc(ioim->bfa, event);
2039
2040         switch (event) {
2041         case BFA_IOIM_SM_HCB:
2042                 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2043                 list_del(&ioim->qe);
2044                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2045                 break;
2046
2047         case BFA_IOIM_SM_FREE:
2048                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2049                 break;
2050
2051         case BFA_IOIM_SM_CLEANUP:
2052                 bfa_ioim_notify_cleanup(ioim);
2053                 break;
2054
2055         case BFA_IOIM_SM_HWFAIL:
2056                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2057                 break;
2058
2059         default:
2060                 bfa_sm_fault(ioim->bfa, event);
2061         }
2062 }
2063
2064 /*
2065  * IO is completed, waiting resource free from firmware.
2066  */
2067 static void
2068 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2069 {
2070         bfa_trc(ioim->bfa, ioim->iotag);
2071         bfa_trc(ioim->bfa, event);
2072
2073         switch (event) {
2074         case BFA_IOIM_SM_FREE:
2075                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2076                 bfa_ioim_free(ioim);
2077                 break;
2078
2079         case BFA_IOIM_SM_CLEANUP:
2080                 bfa_ioim_notify_cleanup(ioim);
2081                 break;
2082
2083         case BFA_IOIM_SM_HWFAIL:
2084                 break;
2085
2086         default:
2087                 bfa_sm_fault(ioim->bfa, event);
2088         }
2089 }
2090
2091 /*
2092  * This is called from bfa_fcpim_start after the bfa_init() with flash read
2093  * is complete by driver. now invalidate the stale content of lun mask
2094  * like unit attention, rp tag and lp tag.
2095  */
2096 static void
2097 bfa_ioim_lm_init(struct bfa_s *bfa)
2098 {
2099         struct bfa_lun_mask_s *lunm_list;
2100         int     i;
2101
2102         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2103                 return;
2104
2105         lunm_list = bfa_get_lun_mask_list(bfa);
2106         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2107                 lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2108                 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2109                 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2110         }
2111 }
2112
2113 static void
2114 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2115 {
2116         struct bfa_ioim_s *ioim = cbarg;
2117
2118         if (!complete) {
2119                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2120                 return;
2121         }
2122
2123         bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2124 }
2125
2126 static void
2127 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2128 {
2129         struct bfa_ioim_s       *ioim = cbarg;
2130         struct bfi_ioim_rsp_s *m;
2131         u8      *snsinfo = NULL;
2132         u8      sns_len = 0;
2133         s32     residue = 0;
2134
2135         if (!complete) {
2136                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2137                 return;
2138         }
2139
2140         m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2141         if (m->io_status == BFI_IOIM_STS_OK) {
2142                 /*
2143                  * setup sense information, if present
2144                  */
2145                 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2146                                         m->sns_len) {
2147                         sns_len = m->sns_len;
2148                         snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2149                                                 ioim->iotag);
2150                 }
2151
2152                 /*
2153                  * setup residue value correctly for normal completions
2154                  */
2155                 if (m->resid_flags == FCP_RESID_UNDER) {
2156                         residue = be32_to_cpu(m->residue);
2157                         bfa_stats(ioim->itnim, iocomp_underrun);
2158                 }
2159                 if (m->resid_flags == FCP_RESID_OVER) {
2160                         residue = be32_to_cpu(m->residue);
2161                         residue = -residue;
2162                         bfa_stats(ioim->itnim, iocomp_overrun);
2163                 }
2164         }
2165
2166         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2167                           m->scsi_status, sns_len, snsinfo, residue);
2168 }
2169
2170 void
2171 bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2172                         u16 rp_tag, u8 lp_tag)
2173 {
2174         struct bfa_lun_mask_s *lun_list;
2175         u8      i;
2176
2177         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2178                 return;
2179
2180         lun_list = bfa_get_lun_mask_list(bfa);
2181         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2182                 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2183                         if ((lun_list[i].lp_wwn == lp_wwn) &&
2184                             (lun_list[i].rp_wwn == rp_wwn)) {
2185                                 lun_list[i].rp_tag = rp_tag;
2186                                 lun_list[i].lp_tag = lp_tag;
2187                         }
2188                 }
2189         }
2190 }
2191
2192 /*
2193  * set UA for all active luns in LM DB
2194  */
2195 static void
2196 bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2197 {
2198         struct bfa_lun_mask_s   *lunm_list;
2199         int     i;
2200
2201         lunm_list = bfa_get_lun_mask_list(bfa);
2202         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2203                 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2204                         continue;
2205                 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2206         }
2207 }
2208
2209 bfa_status_t
2210 bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2211 {
2212         struct bfa_lunmask_cfg_s        *lun_mask;
2213
2214         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2215         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2216                 return BFA_STATUS_FAILED;
2217
2218         if (bfa_get_lun_mask_status(bfa) == update)
2219                 return BFA_STATUS_NO_CHANGE;
2220
2221         lun_mask = bfa_get_lun_mask(bfa);
2222         lun_mask->status = update;
2223
2224         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2225                 bfa_ioim_lm_set_ua(bfa);
2226
2227         return  bfa_dconf_update(bfa);
2228 }
2229
2230 bfa_status_t
2231 bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2232 {
2233         int i;
2234         struct bfa_lun_mask_s   *lunm_list;
2235
2236         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2237         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2238                 return BFA_STATUS_FAILED;
2239
2240         lunm_list = bfa_get_lun_mask_list(bfa);
2241         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2242                 if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2243                         if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2244                                 bfa_rport_unset_lunmask(bfa,
2245                                   BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2246                 }
2247         }
2248
2249         memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2250         return bfa_dconf_update(bfa);
2251 }
2252
2253 bfa_status_t
2254 bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2255 {
2256         struct bfa_lunmask_cfg_s *lun_mask;
2257
2258         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2259         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2260                 return BFA_STATUS_FAILED;
2261
2262         lun_mask = bfa_get_lun_mask(bfa);
2263         memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2264         return BFA_STATUS_OK;
2265 }
2266
2267 bfa_status_t
2268 bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2269                       wwn_t rpwwn, struct scsi_lun lun)
2270 {
2271         struct bfa_lun_mask_s *lunm_list;
2272         struct bfa_rport_s *rp = NULL;
2273         int i, free_index = MAX_LUN_MASK_CFG + 1;
2274         struct bfa_fcs_lport_s *port = NULL;
2275         struct bfa_fcs_rport_s *rp_fcs;
2276
2277         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2278         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2279                 return BFA_STATUS_FAILED;
2280
2281         port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2282                                    vf_id, *pwwn);
2283         if (port) {
2284                 *pwwn = port->port_cfg.pwwn;
2285                 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2286                 if (rp_fcs)
2287                         rp = rp_fcs->bfa_rport;
2288         }
2289
2290         lunm_list = bfa_get_lun_mask_list(bfa);
2291         /* if entry exists */
2292         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2293                 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2294                         free_index = i;
2295                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2296                     (lunm_list[i].rp_wwn == rpwwn) &&
2297                     (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2298                      scsilun_to_int((struct scsi_lun *)&lun)))
2299                         return  BFA_STATUS_ENTRY_EXISTS;
2300         }
2301
2302         if (free_index > MAX_LUN_MASK_CFG)
2303                 return BFA_STATUS_MAX_ENTRY_REACHED;
2304
2305         if (rp) {
2306                 lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2307                                                    rp->rport_info.local_pid);
2308                 lunm_list[free_index].rp_tag = rp->rport_tag;
2309         } else {
2310                 lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2311                 lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2312         }
2313
2314         lunm_list[free_index].lp_wwn = *pwwn;
2315         lunm_list[free_index].rp_wwn = rpwwn;
2316         lunm_list[free_index].lun = lun;
2317         lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2318
2319         /* set for all luns in this rp */
2320         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2321                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2322                     (lunm_list[i].rp_wwn == rpwwn))
2323                         lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2324         }
2325
2326         return bfa_dconf_update(bfa);
2327 }
2328
2329 bfa_status_t
2330 bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2331                          wwn_t rpwwn, struct scsi_lun lun)
2332 {
2333         struct bfa_lun_mask_s   *lunm_list;
2334         struct bfa_rport_s      *rp = NULL;
2335         struct bfa_fcs_lport_s *port = NULL;
2336         struct bfa_fcs_rport_s *rp_fcs;
2337         int     i;
2338
2339         /* in min cfg lunm_list could be NULL but  no commands should run. */
2340         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2341                 return BFA_STATUS_FAILED;
2342
2343         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2344         bfa_trc(bfa, *pwwn);
2345         bfa_trc(bfa, rpwwn);
2346         bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2347
2348         if (*pwwn == 0) {
2349                 port = bfa_fcs_lookup_port(
2350                                 &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2351                                 vf_id, *pwwn);
2352                 if (port) {
2353                         *pwwn = port->port_cfg.pwwn;
2354                         rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2355                         if (rp_fcs)
2356                                 rp = rp_fcs->bfa_rport;
2357                 }
2358         }
2359
2360         lunm_list = bfa_get_lun_mask_list(bfa);
2361         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2362                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2363                     (lunm_list[i].rp_wwn == rpwwn) &&
2364                     (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2365                      scsilun_to_int((struct scsi_lun *)&lun))) {
2366                         lunm_list[i].lp_wwn = 0;
2367                         lunm_list[i].rp_wwn = 0;
2368                         int_to_scsilun(0, &lunm_list[i].lun);
2369                         lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2370                         if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2371                                 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2372                                 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2373                         }
2374                         return bfa_dconf_update(bfa);
2375                 }
2376         }
2377
2378         /* set for all luns in this rp */
2379         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2380                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2381                     (lunm_list[i].rp_wwn == rpwwn))
2382                         lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2383         }
2384
2385         return BFA_STATUS_ENTRY_NOT_EXISTS;
2386 }
2387
2388 static void
2389 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2390 {
2391         struct bfa_ioim_s *ioim = cbarg;
2392
2393         if (!complete) {
2394                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2395                 return;
2396         }
2397
2398         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2399                           0, 0, NULL, 0);
2400 }
2401
2402 static void
2403 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2404 {
2405         struct bfa_ioim_s *ioim = cbarg;
2406
2407         bfa_stats(ioim->itnim, path_tov_expired);
2408         if (!complete) {
2409                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2410                 return;
2411         }
2412
2413         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2414                           0, 0, NULL, 0);
2415 }
2416
2417 static void
2418 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2419 {
2420         struct bfa_ioim_s *ioim = cbarg;
2421
2422         if (!complete) {
2423                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2424                 return;
2425         }
2426
2427         bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2428 }
2429
2430 static void
2431 bfa_ioim_sgpg_alloced(void *cbarg)
2432 {
2433         struct bfa_ioim_s *ioim = cbarg;
2434
2435         ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2436         list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2437         ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2438         bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2439 }
2440
2441 /*
2442  * Send I/O request to firmware.
2443  */
2444 static  bfa_boolean_t
2445 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2446 {
2447         struct bfa_itnim_s *itnim = ioim->itnim;
2448         struct bfi_ioim_req_s *m;
2449         static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2450         struct bfi_sge_s *sge, *sgpge;
2451         u32     pgdlen = 0;
2452         u32     fcp_dl;
2453         u64 addr;
2454         struct scatterlist *sg;
2455         struct bfa_sgpg_s *sgpg;
2456         struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2457         u32 i, sge_id, pgcumsz;
2458         enum dma_data_direction dmadir;
2459
2460         /*
2461          * check for room in queue to send request now
2462          */
2463         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2464         if (!m) {
2465                 bfa_stats(ioim->itnim, qwait);
2466                 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2467                                   &ioim->iosp->reqq_wait);
2468                 return BFA_FALSE;
2469         }
2470
2471         /*
2472          * build i/o request message next
2473          */
2474         m->io_tag = cpu_to_be16(ioim->iotag);
2475         m->rport_hdl = ioim->itnim->rport->fw_handle;
2476         m->io_timeout = 0;
2477
2478         sge = &m->sges[0];
2479         sgpg = ioim->sgpg;
2480         sge_id = 0;
2481         sgpge = NULL;
2482         pgcumsz = 0;
2483         scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2484                 if (i == 0) {
2485                         /* build inline IO SG element */
2486                         addr = bfa_sgaddr_le(sg_dma_address(sg));
2487                         sge->sga = *(union bfi_addr_u *) &addr;
2488                         pgdlen = sg_dma_len(sg);
2489                         sge->sg_len = pgdlen;
2490                         sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2491                                         BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2492                         bfa_sge_to_be(sge);
2493                         sge++;
2494                 } else {
2495                         if (sge_id == 0)
2496                                 sgpge = sgpg->sgpg->sges;
2497
2498                         addr = bfa_sgaddr_le(sg_dma_address(sg));
2499                         sgpge->sga = *(union bfi_addr_u *) &addr;
2500                         sgpge->sg_len = sg_dma_len(sg);
2501                         pgcumsz += sgpge->sg_len;
2502
2503                         /* set flags */
2504                         if (i < (ioim->nsges - 1) &&
2505                                         sge_id < (BFI_SGPG_DATA_SGES - 1))
2506                                 sgpge->flags = BFI_SGE_DATA;
2507                         else if (i < (ioim->nsges - 1))
2508                                 sgpge->flags = BFI_SGE_DATA_CPL;
2509                         else
2510                                 sgpge->flags = BFI_SGE_DATA_LAST;
2511
2512                         bfa_sge_to_le(sgpge);
2513
2514                         sgpge++;
2515                         if (i == (ioim->nsges - 1)) {
2516                                 sgpge->flags = BFI_SGE_PGDLEN;
2517                                 sgpge->sga.a32.addr_lo = 0;
2518                                 sgpge->sga.a32.addr_hi = 0;
2519                                 sgpge->sg_len = pgcumsz;
2520                                 bfa_sge_to_le(sgpge);
2521                         } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2522                                 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2523                                 sgpge->flags = BFI_SGE_LINK;
2524                                 sgpge->sga = sgpg->sgpg_pa;
2525                                 sgpge->sg_len = pgcumsz;
2526                                 bfa_sge_to_le(sgpge);
2527                                 sge_id = 0;
2528                                 pgcumsz = 0;
2529                         }
2530                 }
2531         }
2532
2533         if (ioim->nsges > BFI_SGE_INLINE) {
2534                 sge->sga = ioim->sgpg->sgpg_pa;
2535         } else {
2536                 sge->sga.a32.addr_lo = 0;
2537                 sge->sga.a32.addr_hi = 0;
2538         }
2539         sge->sg_len = pgdlen;
2540         sge->flags = BFI_SGE_PGDLEN;
2541         bfa_sge_to_be(sge);
2542
2543         /*
2544          * set up I/O command parameters
2545          */
2546         m->cmnd = cmnd_z0;
2547         int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2548         dmadir = cmnd->sc_data_direction;
2549         if (dmadir == DMA_TO_DEVICE)
2550                 m->cmnd.iodir = FCP_IODIR_WRITE;
2551         else if (dmadir == DMA_FROM_DEVICE)
2552                 m->cmnd.iodir = FCP_IODIR_READ;
2553         else
2554                 m->cmnd.iodir = FCP_IODIR_NONE;
2555
2556         m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2557         fcp_dl = scsi_bufflen(cmnd);
2558         m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2559
2560         /*
2561          * set up I/O message header
2562          */
2563         switch (m->cmnd.iodir) {
2564         case FCP_IODIR_READ:
2565                 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
2566                 bfa_stats(itnim, input_reqs);
2567                 ioim->itnim->stats.rd_throughput += fcp_dl;
2568                 break;
2569         case FCP_IODIR_WRITE:
2570                 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
2571                 bfa_stats(itnim, output_reqs);
2572                 ioim->itnim->stats.wr_throughput += fcp_dl;
2573                 break;
2574         case FCP_IODIR_RW:
2575                 bfa_stats(itnim, input_reqs);
2576                 bfa_stats(itnim, output_reqs);
2577         default:
2578                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2579         }
2580         if (itnim->seq_rec ||
2581             (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2582                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2583
2584         /*
2585          * queue I/O message to firmware
2586          */
2587         bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2588         return BFA_TRUE;
2589 }
2590
2591 /*
2592  * Setup any additional SG pages needed.Inline SG element is setup
2593  * at queuing time.
2594  */
2595 static bfa_boolean_t
2596 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2597 {
2598         u16     nsgpgs;
2599
2600         WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2601
2602         /*
2603          * allocate SG pages needed
2604          */
2605         nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2606         if (!nsgpgs)
2607                 return BFA_TRUE;
2608
2609         if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2610             != BFA_STATUS_OK) {
2611                 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2612                 return BFA_FALSE;
2613         }
2614
2615         ioim->nsgpgs = nsgpgs;
2616         ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2617
2618         return BFA_TRUE;
2619 }
2620
2621 /*
2622  * Send I/O abort request to firmware.
2623  */
2624 static  bfa_boolean_t
2625 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2626 {
2627         struct bfi_ioim_abort_req_s *m;
2628         enum bfi_ioim_h2i       msgop;
2629
2630         /*
2631          * check for room in queue to send request now
2632          */
2633         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2634         if (!m)
2635                 return BFA_FALSE;
2636
2637         /*
2638          * build i/o request message next
2639          */
2640         if (ioim->iosp->abort_explicit)
2641                 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2642         else
2643                 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2644
2645         bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
2646         m->io_tag    = cpu_to_be16(ioim->iotag);
2647         m->abort_tag = ++ioim->abort_tag;
2648
2649         /*
2650          * queue I/O message to firmware
2651          */
2652         bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2653         return BFA_TRUE;
2654 }
2655
2656 /*
2657  * Call to resume any I/O requests waiting for room in request queue.
2658  */
2659 static void
2660 bfa_ioim_qresume(void *cbarg)
2661 {
2662         struct bfa_ioim_s *ioim = cbarg;
2663
2664         bfa_stats(ioim->itnim, qresumes);
2665         bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2666 }
2667
2668
2669 static void
2670 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2671 {
2672         /*
2673          * Move IO from itnim queue to fcpim global queue since itnim will be
2674          * freed.
2675          */
2676         list_del(&ioim->qe);
2677         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2678
2679         if (!ioim->iosp->tskim) {
2680                 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2681                         bfa_cb_dequeue(&ioim->hcb_qe);
2682                         list_del(&ioim->qe);
2683                         list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2684                 }
2685                 bfa_itnim_iodone(ioim->itnim);
2686         } else
2687                 bfa_wc_down(&ioim->iosp->tskim->wc);
2688 }
2689
2690 static bfa_boolean_t
2691 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2692 {
2693         if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2694             (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))    ||
2695             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))         ||
2696             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))   ||
2697             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))           ||
2698             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))      ||
2699             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2700                 return BFA_FALSE;
2701
2702         return BFA_TRUE;
2703 }
2704
2705 void
2706 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2707 {
2708         /*
2709          * If path tov timer expired, failback with PATHTOV status - these
2710          * IO requests are not normally retried by IO stack.
2711          *
2712          * Otherwise device cameback online and fail it with normal failed
2713          * status so that IO stack retries these failed IO requests.
2714          */
2715         if (iotov)
2716                 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2717         else {
2718                 ioim->io_cbfn = __bfa_cb_ioim_failed;
2719                 bfa_stats(ioim->itnim, iocom_nexus_abort);
2720         }
2721         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2722
2723         /*
2724          * Move IO to fcpim global queue since itnim will be
2725          * freed.
2726          */
2727         list_del(&ioim->qe);
2728         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2729 }
2730
2731
2732 /*
2733  * Memory allocation and initialization.
2734  */
2735 void
2736 bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2737 {
2738         struct bfa_ioim_s               *ioim;
2739         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
2740         struct bfa_ioim_sp_s    *iosp;
2741         u16             i;
2742
2743         /*
2744          * claim memory first
2745          */
2746         ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
2747         fcpim->ioim_arr = ioim;
2748         bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
2749
2750         iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
2751         fcpim->ioim_sp_arr = iosp;
2752         bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
2753
2754         /*
2755          * Initialize ioim free queues
2756          */
2757         INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2758         INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2759
2760         for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2761              i++, ioim++, iosp++) {
2762                 /*
2763                  * initialize IOIM
2764                  */
2765                 memset(ioim, 0, sizeof(struct bfa_ioim_s));
2766                 ioim->iotag   = i;
2767                 ioim->bfa     = fcpim->bfa;
2768                 ioim->fcpim   = fcpim;
2769                 ioim->iosp    = iosp;
2770                 INIT_LIST_HEAD(&ioim->sgpg_q);
2771                 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2772                                    bfa_ioim_qresume, ioim);
2773                 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2774                                    bfa_ioim_sgpg_alloced, ioim);
2775                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2776         }
2777 }
2778
2779 void
2780 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2781 {
2782         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2783         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2784         struct bfa_ioim_s *ioim;
2785         u16     iotag;
2786         enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2787
2788         iotag = be16_to_cpu(rsp->io_tag);
2789
2790         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2791         WARN_ON(ioim->iotag != iotag);
2792
2793         bfa_trc(ioim->bfa, ioim->iotag);
2794         bfa_trc(ioim->bfa, rsp->io_status);
2795         bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2796
2797         if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2798                 ioim->iosp->comp_rspmsg = *m;
2799
2800         switch (rsp->io_status) {
2801         case BFI_IOIM_STS_OK:
2802                 bfa_stats(ioim->itnim, iocomp_ok);
2803                 if (rsp->reuse_io_tag == 0)
2804                         evt = BFA_IOIM_SM_DONE;
2805                 else
2806                         evt = BFA_IOIM_SM_COMP;
2807                 break;
2808
2809         case BFI_IOIM_STS_TIMEDOUT:
2810                 bfa_stats(ioim->itnim, iocomp_timedout);
2811         case BFI_IOIM_STS_ABORTED:
2812                 rsp->io_status = BFI_IOIM_STS_ABORTED;
2813                 bfa_stats(ioim->itnim, iocomp_aborted);
2814                 if (rsp->reuse_io_tag == 0)
2815                         evt = BFA_IOIM_SM_DONE;
2816                 else
2817                         evt = BFA_IOIM_SM_COMP;
2818                 break;
2819
2820         case BFI_IOIM_STS_PROTO_ERR:
2821                 bfa_stats(ioim->itnim, iocom_proto_err);
2822                 WARN_ON(!rsp->reuse_io_tag);
2823                 evt = BFA_IOIM_SM_COMP;
2824                 break;
2825
2826         case BFI_IOIM_STS_SQER_NEEDED:
2827                 bfa_stats(ioim->itnim, iocom_sqer_needed);
2828                 WARN_ON(rsp->reuse_io_tag != 0);
2829                 evt = BFA_IOIM_SM_SQRETRY;
2830                 break;
2831
2832         case BFI_IOIM_STS_RES_FREE:
2833                 bfa_stats(ioim->itnim, iocom_res_free);
2834                 evt = BFA_IOIM_SM_FREE;
2835                 break;
2836
2837         case BFI_IOIM_STS_HOST_ABORTED:
2838                 bfa_stats(ioim->itnim, iocom_hostabrts);
2839                 if (rsp->abort_tag != ioim->abort_tag) {
2840                         bfa_trc(ioim->bfa, rsp->abort_tag);
2841                         bfa_trc(ioim->bfa, ioim->abort_tag);
2842                         return;
2843                 }
2844
2845                 if (rsp->reuse_io_tag)
2846                         evt = BFA_IOIM_SM_ABORT_COMP;
2847                 else
2848                         evt = BFA_IOIM_SM_ABORT_DONE;
2849                 break;
2850
2851         case BFI_IOIM_STS_UTAG:
2852                 bfa_stats(ioim->itnim, iocom_utags);
2853                 evt = BFA_IOIM_SM_COMP_UTAG;
2854                 break;
2855
2856         default:
2857                 WARN_ON(1);
2858         }
2859
2860         bfa_sm_send_event(ioim, evt);
2861 }
2862
2863 void
2864 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2865 {
2866         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2867         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2868         struct bfa_ioim_s *ioim;
2869         u16     iotag;
2870
2871         iotag = be16_to_cpu(rsp->io_tag);
2872
2873         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2874         WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2875
2876         bfa_ioim_cb_profile_comp(fcpim, ioim);
2877
2878         bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2879 }
2880
2881 /*
2882  * Called by itnim to clean up IO while going offline.
2883  */
2884 void
2885 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2886 {
2887         bfa_trc(ioim->bfa, ioim->iotag);
2888         bfa_stats(ioim->itnim, io_cleanups);
2889
2890         ioim->iosp->tskim = NULL;
2891         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2892 }
2893
2894 void
2895 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2896 {
2897         bfa_trc(ioim->bfa, ioim->iotag);
2898         bfa_stats(ioim->itnim, io_tmaborts);
2899
2900         ioim->iosp->tskim = tskim;
2901         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2902 }
2903
2904 /*
2905  * IOC failure handling.
2906  */
2907 void
2908 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2909 {
2910         bfa_trc(ioim->bfa, ioim->iotag);
2911         bfa_stats(ioim->itnim, io_iocdowns);
2912         bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2913 }
2914
2915 /*
2916  * IO offline TOV popped. Fail the pending IO.
2917  */
2918 void
2919 bfa_ioim_tov(struct bfa_ioim_s *ioim)
2920 {
2921         bfa_trc(ioim->bfa, ioim->iotag);
2922         bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2923 }
2924
2925
2926 /*
2927  * Allocate IOIM resource for initiator mode I/O request.
2928  */
2929 struct bfa_ioim_s *
2930 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2931                 struct bfa_itnim_s *itnim, u16 nsges)
2932 {
2933         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2934         struct bfa_ioim_s *ioim;
2935         struct bfa_iotag_s *iotag = NULL;
2936
2937         /*
2938          * alocate IOIM resource
2939          */
2940         bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2941         if (!iotag) {
2942                 bfa_stats(itnim, no_iotags);
2943                 return NULL;
2944         }
2945
2946         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2947
2948         ioim->dio = dio;
2949         ioim->itnim = itnim;
2950         ioim->nsges = nsges;
2951         ioim->nsgpgs = 0;
2952
2953         bfa_stats(itnim, total_ios);
2954         fcpim->ios_active++;
2955
2956         list_add_tail(&ioim->qe, &itnim->io_q);
2957
2958         return ioim;
2959 }
2960
2961 void
2962 bfa_ioim_free(struct bfa_ioim_s *ioim)
2963 {
2964         struct bfa_fcpim_s *fcpim = ioim->fcpim;
2965         struct bfa_iotag_s *iotag;
2966
2967         if (ioim->nsgpgs > 0)
2968                 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2969
2970         bfa_stats(ioim->itnim, io_comps);
2971         fcpim->ios_active--;
2972
2973         ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2974
2975         WARN_ON(!(ioim->iotag <
2976                 (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2977         iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2978
2979         if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2980                 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2981         else
2982                 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2983
2984         list_del(&ioim->qe);
2985 }
2986
2987 void
2988 bfa_ioim_start(struct bfa_ioim_s *ioim)
2989 {
2990         bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2991
2992         /*
2993          * Obtain the queue over which this request has to be issued
2994          */
2995         ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2996                         BFA_FALSE : bfa_itnim_get_reqq(ioim);
2997
2998         bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2999 }
3000
3001 /*
3002  * Driver I/O abort request.
3003  */
3004 bfa_status_t
3005 bfa_ioim_abort(struct bfa_ioim_s *ioim)
3006 {
3007
3008         bfa_trc(ioim->bfa, ioim->iotag);
3009
3010         if (!bfa_ioim_is_abortable(ioim))
3011                 return BFA_STATUS_FAILED;
3012
3013         bfa_stats(ioim->itnim, io_aborts);
3014         bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
3015
3016         return BFA_STATUS_OK;
3017 }
3018
3019 /*
3020  *  BFA TSKIM state machine functions
3021  */
3022
3023 /*
3024  * Task management command beginning state.
3025  */
3026 static void
3027 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3028 {
3029         bfa_trc(tskim->bfa, event);
3030
3031         switch (event) {
3032         case BFA_TSKIM_SM_START:
3033                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3034                 bfa_tskim_gather_ios(tskim);
3035
3036                 /*
3037                  * If device is offline, do not send TM on wire. Just cleanup
3038                  * any pending IO requests and complete TM request.
3039                  */
3040                 if (!bfa_itnim_is_online(tskim->itnim)) {
3041                         bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3042                         tskim->tsk_status = BFI_TSKIM_STS_OK;
3043                         bfa_tskim_cleanup_ios(tskim);
3044                         return;
3045                 }
3046
3047                 if (!bfa_tskim_send(tskim)) {
3048                         bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3049                         bfa_stats(tskim->itnim, tm_qwait);
3050                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3051                                           &tskim->reqq_wait);
3052                 }
3053                 break;
3054
3055         default:
3056                 bfa_sm_fault(tskim->bfa, event);
3057         }
3058 }
3059
3060 /*
3061  * TM command is active, awaiting completion from firmware to
3062  * cleanup IO requests in TM scope.
3063  */
3064 static void
3065 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3066 {
3067         bfa_trc(tskim->bfa, event);
3068
3069         switch (event) {
3070         case BFA_TSKIM_SM_DONE:
3071                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3072                 bfa_tskim_cleanup_ios(tskim);
3073                 break;
3074
3075         case BFA_TSKIM_SM_CLEANUP:
3076                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3077                 if (!bfa_tskim_send_abort(tskim)) {
3078                         bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3079                         bfa_stats(tskim->itnim, tm_qwait);
3080                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3081                                 &tskim->reqq_wait);
3082                 }
3083                 break;
3084
3085         case BFA_TSKIM_SM_HWFAIL:
3086                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3087                 bfa_tskim_iocdisable_ios(tskim);
3088                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3089                 break;
3090
3091         default:
3092                 bfa_sm_fault(tskim->bfa, event);
3093         }
3094 }
3095
3096 /*
3097  * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3098  * completion event from firmware.
3099  */
3100 static void
3101 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3102 {
3103         bfa_trc(tskim->bfa, event);
3104
3105         switch (event) {
3106         case BFA_TSKIM_SM_DONE:
3107                 /*
3108                  * Ignore and wait for ABORT completion from firmware.
3109                  */
3110                 break;
3111
3112         case BFA_TSKIM_SM_CLEANUP_DONE:
3113                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3114                 bfa_tskim_cleanup_ios(tskim);
3115                 break;
3116
3117         case BFA_TSKIM_SM_HWFAIL:
3118                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3119                 bfa_tskim_iocdisable_ios(tskim);
3120                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3121                 break;
3122
3123         default:
3124                 bfa_sm_fault(tskim->bfa, event);
3125         }
3126 }
3127
3128 static void
3129 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3130 {
3131         bfa_trc(tskim->bfa, event);
3132
3133         switch (event) {
3134         case BFA_TSKIM_SM_IOS_DONE:
3135                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3136                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3137                 break;
3138
3139         case BFA_TSKIM_SM_CLEANUP:
3140                 /*
3141                  * Ignore, TM command completed on wire.
3142                  * Notify TM conmpletion on IO cleanup completion.
3143                  */
3144                 break;
3145
3146         case BFA_TSKIM_SM_HWFAIL:
3147                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3148                 bfa_tskim_iocdisable_ios(tskim);
3149                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3150                 break;
3151
3152         default:
3153                 bfa_sm_fault(tskim->bfa, event);
3154         }
3155 }
3156
3157 /*
3158  * Task management command is waiting for room in request CQ
3159  */
3160 static void
3161 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3162 {
3163         bfa_trc(tskim->bfa, event);
3164
3165         switch (event) {
3166         case BFA_TSKIM_SM_QRESUME:
3167                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3168                 bfa_tskim_send(tskim);
3169                 break;
3170
3171         case BFA_TSKIM_SM_CLEANUP:
3172                 /*
3173                  * No need to send TM on wire since ITN is offline.
3174                  */
3175                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3176                 bfa_reqq_wcancel(&tskim->reqq_wait);
3177                 bfa_tskim_cleanup_ios(tskim);
3178                 break;
3179
3180         case BFA_TSKIM_SM_HWFAIL:
3181                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3182                 bfa_reqq_wcancel(&tskim->reqq_wait);
3183                 bfa_tskim_iocdisable_ios(tskim);
3184                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3185                 break;
3186
3187         default:
3188                 bfa_sm_fault(tskim->bfa, event);
3189         }
3190 }
3191
3192 /*
3193  * Task management command is active, awaiting for room in request CQ
3194  * to send clean up request.
3195  */
3196 static void
3197 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3198                 enum bfa_tskim_event event)
3199 {
3200         bfa_trc(tskim->bfa, event);
3201
3202         switch (event) {
3203         case BFA_TSKIM_SM_DONE:
3204                 bfa_reqq_wcancel(&tskim->reqq_wait);
3205                 /*
3206                  * Fall through !!!
3207                  */
3208         case BFA_TSKIM_SM_QRESUME:
3209                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3210                 bfa_tskim_send_abort(tskim);
3211                 break;
3212
3213         case BFA_TSKIM_SM_HWFAIL:
3214                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3215                 bfa_reqq_wcancel(&tskim->reqq_wait);
3216                 bfa_tskim_iocdisable_ios(tskim);
3217                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3218                 break;
3219
3220         default:
3221                 bfa_sm_fault(tskim->bfa, event);
3222         }
3223 }
3224
3225 /*
3226  * BFA callback is pending
3227  */
3228 static void
3229 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3230 {
3231         bfa_trc(tskim->bfa, event);
3232
3233         switch (event) {
3234         case BFA_TSKIM_SM_HCB:
3235                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3236                 bfa_tskim_free(tskim);
3237                 break;
3238
3239         case BFA_TSKIM_SM_CLEANUP:
3240                 bfa_tskim_notify_comp(tskim);
3241                 break;
3242
3243         case BFA_TSKIM_SM_HWFAIL:
3244                 break;
3245
3246         default:
3247                 bfa_sm_fault(tskim->bfa, event);
3248         }
3249 }
3250
3251 static void
3252 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3253 {
3254         struct bfa_tskim_s *tskim = cbarg;
3255
3256         if (!complete) {
3257                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3258                 return;
3259         }
3260
3261         bfa_stats(tskim->itnim, tm_success);
3262         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3263 }
3264
3265 static void
3266 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3267 {
3268         struct bfa_tskim_s *tskim = cbarg;
3269
3270         if (!complete) {
3271                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3272                 return;
3273         }
3274
3275         bfa_stats(tskim->itnim, tm_failures);
3276         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3277                                 BFI_TSKIM_STS_FAILED);
3278 }
3279
3280 static bfa_boolean_t
3281 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
3282 {
3283         switch (tskim->tm_cmnd) {
3284         case FCP_TM_TARGET_RESET:
3285                 return BFA_TRUE;
3286
3287         case FCP_TM_ABORT_TASK_SET:
3288         case FCP_TM_CLEAR_TASK_SET:
3289         case FCP_TM_LUN_RESET:
3290         case FCP_TM_CLEAR_ACA:
3291                 return !memcmp(&tskim->lun, &lun, sizeof(lun));
3292
3293         default:
3294                 WARN_ON(1);
3295         }
3296
3297         return BFA_FALSE;
3298 }
3299
3300 /*
3301  * Gather affected IO requests and task management commands.
3302  */
3303 static void
3304 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3305 {
3306         struct bfa_itnim_s *itnim = tskim->itnim;
3307         struct bfa_ioim_s *ioim;
3308         struct list_head *qe, *qen;
3309         struct scsi_cmnd *cmnd;
3310         struct scsi_lun scsilun;
3311
3312         INIT_LIST_HEAD(&tskim->io_q);
3313
3314         /*
3315          * Gather any active IO requests first.
3316          */
3317         list_for_each_safe(qe, qen, &itnim->io_q) {
3318                 ioim = (struct bfa_ioim_s *) qe;
3319                 cmnd = (struct scsi_cmnd *) ioim->dio;
3320                 int_to_scsilun(cmnd->device->lun, &scsilun);
3321                 if (bfa_tskim_match_scope(tskim, scsilun)) {
3322                         list_del(&ioim->qe);
3323                         list_add_tail(&ioim->qe, &tskim->io_q);
3324                 }
3325         }
3326
3327         /*
3328          * Failback any pending IO requests immediately.
3329          */
3330         list_for_each_safe(qe, qen, &itnim->pending_q) {
3331                 ioim = (struct bfa_ioim_s *) qe;
3332                 cmnd = (struct scsi_cmnd *) ioim->dio;
3333                 int_to_scsilun(cmnd->device->lun, &scsilun);
3334                 if (bfa_tskim_match_scope(tskim, scsilun)) {
3335                         list_del(&ioim->qe);
3336                         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3337                         bfa_ioim_tov(ioim);
3338                 }
3339         }
3340 }
3341
3342 /*
3343  * IO cleanup completion
3344  */
3345 static void
3346 bfa_tskim_cleanp_comp(void *tskim_cbarg)
3347 {
3348         struct bfa_tskim_s *tskim = tskim_cbarg;
3349
3350         bfa_stats(tskim->itnim, tm_io_comps);
3351         bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3352 }
3353
3354 /*
3355  * Gather affected IO requests and task management commands.
3356  */
3357 static void
3358 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3359 {
3360         struct bfa_ioim_s *ioim;
3361         struct list_head        *qe, *qen;
3362
3363         bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3364
3365         list_for_each_safe(qe, qen, &tskim->io_q) {
3366                 ioim = (struct bfa_ioim_s *) qe;
3367                 bfa_wc_up(&tskim->wc);
3368                 bfa_ioim_cleanup_tm(ioim, tskim);
3369         }
3370
3371         bfa_wc_wait(&tskim->wc);
3372 }
3373
3374 /*
3375  * Send task management request to firmware.
3376  */
3377 static bfa_boolean_t
3378 bfa_tskim_send(struct bfa_tskim_s *tskim)
3379 {
3380         struct bfa_itnim_s *itnim = tskim->itnim;
3381         struct bfi_tskim_req_s *m;
3382
3383         /*
3384          * check for room in queue to send request now
3385          */
3386         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3387         if (!m)
3388                 return BFA_FALSE;
3389
3390         /*
3391          * build i/o request message next
3392          */
3393         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3394                         bfa_fn_lpu(tskim->bfa));
3395
3396         m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3397         m->itn_fhdl = tskim->itnim->rport->fw_handle;
3398         m->t_secs = tskim->tsecs;
3399         m->lun = tskim->lun;
3400         m->tm_flags = tskim->tm_cmnd;
3401
3402         /*
3403          * queue I/O message to firmware
3404          */
3405         bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3406         return BFA_TRUE;
3407 }
3408
3409 /*
3410  * Send abort request to cleanup an active TM to firmware.
3411  */
3412 static bfa_boolean_t
3413 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3414 {
3415         struct bfa_itnim_s      *itnim = tskim->itnim;
3416         struct bfi_tskim_abortreq_s     *m;
3417
3418         /*
3419          * check for room in queue to send request now
3420          */
3421         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3422         if (!m)
3423                 return BFA_FALSE;
3424
3425         /*
3426          * build i/o request message next
3427          */
3428         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3429                         bfa_fn_lpu(tskim->bfa));
3430
3431         m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
3432
3433         /*
3434          * queue I/O message to firmware
3435          */
3436         bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3437         return BFA_TRUE;
3438 }
3439
3440 /*
3441  * Call to resume task management cmnd waiting for room in request queue.
3442  */
3443 static void
3444 bfa_tskim_qresume(void *cbarg)
3445 {
3446         struct bfa_tskim_s *tskim = cbarg;
3447
3448         bfa_stats(tskim->itnim, tm_qresumes);
3449         bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3450 }
3451
3452 /*
3453  * Cleanup IOs associated with a task mangement command on IOC failures.
3454  */
3455 static void
3456 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3457 {
3458         struct bfa_ioim_s *ioim;
3459         struct list_head        *qe, *qen;
3460
3461         list_for_each_safe(qe, qen, &tskim->io_q) {
3462                 ioim = (struct bfa_ioim_s *) qe;
3463                 bfa_ioim_iocdisable(ioim);
3464         }
3465 }
3466
3467 /*
3468  * Notification on completions from related ioim.
3469  */
3470 void
3471 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3472 {
3473         bfa_wc_down(&tskim->wc);
3474 }
3475
3476 /*
3477  * Handle IOC h/w failure notification from itnim.
3478  */
3479 void
3480 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3481 {
3482         tskim->notify = BFA_FALSE;
3483         bfa_stats(tskim->itnim, tm_iocdowns);
3484         bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3485 }
3486
3487 /*
3488  * Cleanup TM command and associated IOs as part of ITNIM offline.
3489  */
3490 void
3491 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3492 {
3493         tskim->notify = BFA_TRUE;
3494         bfa_stats(tskim->itnim, tm_cleanups);
3495         bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3496 }
3497
3498 /*
3499  * Memory allocation and initialization.
3500  */
3501 void
3502 bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
3503 {
3504         struct bfa_tskim_s *tskim;
3505         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
3506         u16     i;
3507
3508         INIT_LIST_HEAD(&fcpim->tskim_free_q);
3509         INIT_LIST_HEAD(&fcpim->tskim_unused_q);
3510
3511         tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
3512         fcpim->tskim_arr = tskim;
3513
3514         for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3515                 /*
3516                  * initialize TSKIM
3517                  */
3518                 memset(tskim, 0, sizeof(struct bfa_tskim_s));
3519                 tskim->tsk_tag = i;
3520                 tskim->bfa      = fcpim->bfa;
3521                 tskim->fcpim    = fcpim;
3522                 tskim->notify  = BFA_FALSE;
3523                 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3524                                         tskim);
3525                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3526
3527                 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3528         }
3529
3530         bfa_mem_kva_curp(fcp) = (u8 *) tskim;
3531 }
3532
3533 void
3534 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3535 {
3536         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3537         struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3538         struct bfa_tskim_s *tskim;
3539         u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
3540
3541         tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3542         WARN_ON(tskim->tsk_tag != tsk_tag);
3543
3544         tskim->tsk_status = rsp->tsk_status;
3545
3546         /*
3547          * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3548          * requests. All other statuses are for normal completions.
3549          */
3550         if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3551                 bfa_stats(tskim->itnim, tm_cleanup_comps);
3552                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3553         } else {
3554                 bfa_stats(tskim->itnim, tm_fw_rsps);
3555                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3556         }
3557 }
3558
3559
3560 struct bfa_tskim_s *
3561 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3562 {
3563         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3564         struct bfa_tskim_s *tskim;
3565
3566         bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3567
3568         if (tskim)
3569                 tskim->dtsk = dtsk;
3570
3571         return tskim;
3572 }
3573
3574 void
3575 bfa_tskim_free(struct bfa_tskim_s *tskim)
3576 {
3577         WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3578         list_del(&tskim->qe);
3579         list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3580 }
3581
3582 /*
3583  * Start a task management command.
3584  *
3585  * @param[in]   tskim   BFA task management command instance
3586  * @param[in]   itnim   i-t nexus for the task management command
3587  * @param[in]   lun     lun, if applicable
3588  * @param[in]   tm_cmnd Task management command code.
3589  * @param[in]   t_secs  Timeout in seconds
3590  *
3591  * @return None.
3592  */
3593 void
3594 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3595                         struct scsi_lun lun,
3596                         enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3597 {
3598         tskim->itnim    = itnim;
3599         tskim->lun      = lun;
3600         tskim->tm_cmnd = tm_cmnd;
3601         tskim->tsecs    = tsecs;
3602         tskim->notify  = BFA_FALSE;
3603         bfa_stats(itnim, tm_cmnds);
3604
3605         list_add_tail(&tskim->qe, &itnim->tsk_q);
3606         bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3607 }
3608
3609 void
3610 bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3611 {
3612         struct bfa_fcpim_s      *fcpim = BFA_FCPIM(bfa);
3613         struct list_head        *qe;
3614         int     i;
3615
3616         for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3617                 bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3618                 list_add_tail(qe, &fcpim->tskim_unused_q);
3619         }
3620 }
3621
3622 /* BFA FCP module - parent module for fcpim */
3623
3624 BFA_MODULE(fcp);
3625
3626 static void
3627 bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3628                 struct bfa_s *bfa)
3629 {
3630         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3631         struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
3632         struct bfa_mem_dma_s *seg_ptr;
3633         u16     nsegs, idx, per_seg_ios, num_io_req;
3634         u32     km_len = 0;
3635
3636         /*
3637          * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3638          * So if the values are non zero, adjust them appropriately.
3639          */
3640         if (cfg->fwcfg.num_ioim_reqs &&
3641             cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3642                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3643         else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3644                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3645
3646         if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3647                 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3648
3649         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3650         if (num_io_req > BFA_IO_MAX) {
3651                 if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3652                         cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3653                         cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3654                 } else if (cfg->fwcfg.num_fwtio_reqs)
3655                         cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3656                 else
3657                         cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3658         }
3659
3660         bfa_fcpim_meminfo(cfg, &km_len);
3661
3662         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3663         km_len += num_io_req * sizeof(struct bfa_iotag_s);
3664         km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3665
3666         /* dma memory */
3667         nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3668         per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
3669
3670         bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3671                 if (num_io_req >= per_seg_ios) {
3672                         num_io_req -= per_seg_ios;
3673                         bfa_mem_dma_setup(minfo, seg_ptr,
3674                                 per_seg_ios * BFI_IOIM_SNSLEN);
3675                 } else
3676                         bfa_mem_dma_setup(minfo, seg_ptr,
3677                                 num_io_req * BFI_IOIM_SNSLEN);
3678         }
3679
3680         /* kva memory */
3681         bfa_mem_kva_setup(minfo, fcp_kva, km_len);
3682 }
3683
3684 static void
3685 bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3686                 struct bfa_pcidev_s *pcidev)
3687 {
3688         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3689         struct bfa_mem_dma_s *seg_ptr;
3690         u16     idx, nsegs, num_io_req;
3691
3692         fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3693         fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
3694         fcp->num_itns   = cfg->fwcfg.num_rports;
3695         fcp->bfa = bfa;
3696
3697         /*
3698          * Setup the pool of snsbase addr's, that is passed to fw as
3699          * part of bfi_iocfc_cfg_s.
3700          */
3701         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3702         nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3703
3704         bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3705
3706                 if (!bfa_mem_dma_virt(seg_ptr))
3707                         break;
3708
3709                 fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
3710                 fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
3711                 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3712         }
3713
3714         bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3715
3716         bfa_iotag_attach(fcp);
3717
3718         fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
3719         bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
3720                         (fcp->num_itns * sizeof(struct bfa_itn_s));
3721         memset(fcp->itn_arr, 0,
3722                         (fcp->num_itns * sizeof(struct bfa_itn_s)));
3723 }
3724
3725 static void
3726 bfa_fcp_detach(struct bfa_s *bfa)
3727 {
3728 }
3729
3730 static void
3731 bfa_fcp_start(struct bfa_s *bfa)
3732 {
3733         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3734
3735         /*
3736          * bfa_init() with flash read is complete. now invalidate the stale
3737          * content of lun mask like unit attention, rp tag and lp tag.
3738          */
3739         bfa_ioim_lm_init(fcp->bfa);
3740 }
3741
3742 static void
3743 bfa_fcp_stop(struct bfa_s *bfa)
3744 {
3745 }
3746
3747 static void
3748 bfa_fcp_iocdisable(struct bfa_s *bfa)
3749 {
3750         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3751
3752         /* Enqueue unused ioim resources to free_q */
3753         list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
3754
3755         bfa_fcpim_iocdisable(fcp);
3756 }
3757
3758 void
3759 bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
3760 {
3761         struct bfa_fcp_mod_s    *mod = BFA_FCP_MOD(bfa);
3762         struct list_head        *qe;
3763         int     i;
3764
3765         for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3766                 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3767                 list_add_tail(qe, &mod->iotag_unused_q);
3768         }
3769 }
3770
3771 void
3772 bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3773                 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3774 {
3775         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3776         struct bfa_itn_s *itn;
3777
3778         itn =  BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3779         itn->isr = isr;
3780 }
3781
3782 /*
3783  * Itn interrupt processing.
3784  */
3785 void
3786 bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3787 {
3788         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3789         union bfi_itn_i2h_msg_u msg;
3790         struct bfa_itn_s *itn;
3791
3792         msg.msg = m;
3793         itn =  BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3794
3795         if (itn->isr)
3796                 itn->isr(bfa, m);
3797         else
3798                 WARN_ON(1);
3799 }
3800
3801 void
3802 bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
3803 {
3804         struct bfa_iotag_s *iotag;
3805         u16     num_io_req, i;
3806
3807         iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
3808         fcp->iotag_arr = iotag;
3809
3810         INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3811         INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
3812         INIT_LIST_HEAD(&fcp->iotag_unused_q);
3813
3814         num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3815         for (i = 0; i < num_io_req; i++, iotag++) {
3816                 memset(iotag, 0, sizeof(struct bfa_iotag_s));
3817                 iotag->tag = i;
3818                 if (i < fcp->num_ioim_reqs)
3819                         list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3820                 else
3821                         list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3822         }
3823
3824         bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3825 }