1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2014 Cisco Systems, Inc. All rights reserved.
4 #include <linux/errno.h>
5 #include <linux/mempool.h>
7 #include <scsi/scsi_tcq.h>
14 /* snic target types */
15 static const char * const snic_tgt_type_str[] = {
16 [SNIC_TGT_DAS] = "DAS",
17 [SNIC_TGT_SAN] = "SAN",
20 static inline const char *
21 snic_tgt_type_to_str(int typ)
23 return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ?
24 snic_tgt_type_str[typ] : "Unknown");
27 static const char * const snic_tgt_state_str[] = {
28 [SNIC_TGT_STAT_INIT] = "INIT",
29 [SNIC_TGT_STAT_ONLINE] = "ONLINE",
30 [SNIC_TGT_STAT_OFFLINE] = "OFFLINE",
31 [SNIC_TGT_STAT_DEL] = "DELETION IN PROGRESS",
35 snic_tgt_state_to_str(int state)
37 return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ?
38 snic_tgt_state_str[state] : "UNKNOWN");
42 * Initiate report_tgt req desc
45 snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len,
46 dma_addr_t rsp_buf_pa, ulong ctx)
48 struct snic_sg_desc *sgd = NULL;
51 snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid,
54 req->u.rpt_tgts.sg_cnt = cpu_to_le16(1);
55 sgd = req_to_sgl(req);
56 sgd[0].addr = cpu_to_le64(rsp_buf_pa);
57 sgd[0].len = cpu_to_le32(len);
59 req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd);
63 * snic_queue_report_tgt_req: Queues report target request.
66 snic_queue_report_tgt_req(struct snic *snic)
68 struct snic_req_info *rqi = NULL;
69 u32 ntgts, buf_len = 0;
74 rqi = snic_req_init(snic, 1);
80 if (snic->fwinfo.max_tgts)
81 ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id);
83 ntgts = snic->shost->max_id;
85 /* Allocate Response Buffer */
86 SNIC_BUG_ON(ntgts == 0);
87 buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
89 buf = kzalloc(buf_len, GFP_KERNEL);
91 snic_req_free(snic, rqi);
92 SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
98 SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
100 pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
101 if (dma_mapping_error(&snic->pdev->dev, pa)) {
102 SNIC_HOST_ERR(snic->shost,
103 "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
106 snic_req_free(snic, rqi);
113 SNIC_BUG_ON(pa == 0);
114 rqi->sge_va = (ulong) buf;
116 snic_report_tgt_init(rqi->req,
123 snic_handle_untagged_req(snic, rqi);
125 ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
127 dma_unmap_single(&snic->pdev->dev, pa, buf_len,
131 snic_release_untagged_req(snic, rqi);
132 SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n");
137 SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n");
142 SNIC_HOST_ERR(snic->shost,
143 "Queuing Report Targets Failed, err = %d\n",
146 } /* end of snic_queue_report_tgt_req */
150 snic_scsi_scan_tgt(struct work_struct *work)
152 struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
153 struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
156 SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id);
157 scsi_scan_target(&tgt->dev,
163 spin_lock_irqsave(shost->host_lock, flags);
164 tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
165 spin_unlock_irqrestore(shost->host_lock, flags);
166 } /* end of snic_scsi_scan_tgt */
171 static struct snic_tgt *
172 snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid)
174 struct list_head *cur, *nxt;
175 struct snic_tgt *tgt = NULL;
177 list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
178 tgt = list_entry(cur, struct snic_tgt, list);
179 if (tgt->id == le32_to_cpu(tgtid->tgt_id))
185 } /* end of snic_tgt_lookup */
188 * snic_tgt_dev_release : Called on dropping last ref for snic_tgt object
191 snic_tgt_dev_release(struct device *dev)
193 struct snic_tgt *tgt = dev_to_tgt(dev);
195 SNIC_HOST_INFO(snic_tgt_to_shost(tgt),
196 "Target Device ID %d (%s) Permanently Deleted.\n",
200 SNIC_BUG_ON(!list_empty(&tgt->list));
205 * snic_tgt_del : work function to delete snic_tgt
208 snic_tgt_del(struct work_struct *work)
210 struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
211 struct Scsi_Host *shost = snic_tgt_to_shost(tgt);
213 if (tgt->flags & SNIC_TGT_SCAN_PENDING)
214 scsi_flush_work(shost);
216 /* Block IOs on child devices, stops new IOs */
217 scsi_target_block(&tgt->dev);
220 snic_tgt_scsi_abort_io(tgt);
222 /* Unblock IOs now, to flush if there are any. */
223 scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE);
225 /* Delete SCSI Target and sdevs */
226 scsi_remove_target(&tgt->dev); /* ?? */
227 device_del(&tgt->dev);
228 put_device(&tgt->dev);
229 } /* end of snic_tgt_del */
231 /* snic_tgt_create: checks for existence of snic_tgt, if it doesn't
234 static struct snic_tgt *
235 snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
237 struct snic_tgt *tgt = NULL;
241 tgt = snic_tgt_lookup(snic, tgtid);
243 /* update the information if required */
247 tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
249 SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n");
255 INIT_LIST_HEAD(&tgt->list);
256 tgt->id = le32_to_cpu(tgtid->tgt_id);
259 SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN);
260 tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type);
263 * Plugging into SML Device Tree
265 tgt->tdata.disc_id = 0;
266 tgt->state = SNIC_TGT_STAT_INIT;
267 device_initialize(&tgt->dev);
268 tgt->dev.parent = get_device(&snic->shost->shost_gendev);
269 tgt->dev.release = snic_tgt_dev_release;
270 INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt);
271 INIT_WORK(&tgt->del_work, snic_tgt_del);
272 switch (tgt->tdata.typ) {
274 dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
275 snic->shost->host_no, tgt->channel, tgt->id);
279 dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d",
280 snic->shost->host_no, tgt->channel, tgt->id);
284 SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n");
285 dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
286 snic->shost->host_no, tgt->channel, tgt->id);
290 spin_lock_irqsave(snic->shost->host_lock, flags);
291 list_add_tail(&tgt->list, &snic->disc.tgt_list);
292 tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++;
293 tgt->state = SNIC_TGT_STAT_ONLINE;
294 spin_unlock_irqrestore(snic->shost->host_lock, flags);
296 SNIC_HOST_INFO(snic->shost,
297 "Tgt %d, type = %s detected. Adding..\n",
298 tgt->id, snic_tgt_type_to_str(tgt->tdata.typ));
300 ret = device_add(&tgt->dev);
302 SNIC_HOST_ERR(snic->shost,
303 "Snic Tgt: device_add, with err = %d\n",
306 put_device(&snic->shost->shost_gendev);
307 spin_lock_irqsave(snic->shost->host_lock, flags);
308 list_del(&tgt->list);
309 spin_unlock_irqrestore(snic->shost->host_lock, flags);
316 SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev));
318 scsi_queue_work(snic->shost, &tgt->scan_work);
321 } /* end of snic_tgt_create */
323 /* Handler for discovery */
325 snic_handle_tgt_disc(struct work_struct *work)
327 struct snic *snic = container_of(work, struct snic, tgt_work);
328 struct snic_tgt_id *tgtid = NULL;
329 struct snic_tgt *tgt = NULL;
333 spin_lock_irqsave(&snic->snic_lock, flags);
334 if (snic->in_remove) {
335 spin_unlock_irqrestore(&snic->snic_lock, flags);
336 kfree(snic->disc.rtgt_info);
340 spin_unlock_irqrestore(&snic->snic_lock, flags);
342 mutex_lock(&snic->disc.mutex);
343 /* Discover triggered during disc in progress */
344 if (snic->disc.req_cnt) {
345 snic->disc.state = SNIC_DISC_DONE;
346 snic->disc.req_cnt = 0;
347 mutex_unlock(&snic->disc.mutex);
348 kfree(snic->disc.rtgt_info);
349 snic->disc.rtgt_info = NULL;
351 SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n");
352 /* Start Discovery Again */
353 snic_disc_start(snic);
358 tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info;
360 SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL);
362 for (i = 0; i < snic->disc.rtgt_cnt; i++) {
363 tgt = snic_tgt_create(snic, &tgtid[i]);
365 int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid);
367 SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n");
368 snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz);
373 snic->disc.rtgt_info = NULL;
374 snic->disc.state = SNIC_DISC_DONE;
375 mutex_unlock(&snic->disc.mutex);
377 SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n");
380 } /* end of snic_handle_tgt_disc */
384 snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
388 u32 cmnd_id, hid, tgt_cnt = 0;
390 struct snic_req_info *rqi = NULL;
391 struct snic_tgt_id *tgtid;
394 snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx);
395 rqi = (struct snic_req_info *) ctx;
396 tgtid = (struct snic_tgt_id *) rqi->sge_va;
398 tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt);
400 SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n");
406 /* printing list of targets here */
407 SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt);
409 SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts);
411 for (i = 0; i < tgt_cnt; i++)
412 SNIC_HOST_INFO(snic->shost,
414 le32_to_cpu(tgtid[i].tgt_id));
417 * Queue work for further processing,
418 * Response Buffer Memory is freed after creating targets
420 snic->disc.rtgt_cnt = tgt_cnt;
421 snic->disc.rtgt_info = (u8 *) tgtid;
422 queue_work(snic_glob->event_q, &snic->tgt_work);
426 /* Unmap Response Buffer */
427 snic_pci_unmap_rsp_buf(snic, rqi);
432 snic_release_untagged_req(snic, rqi);
435 } /* end of snic_report_tgt_cmpl_handler */
437 /* Discovery init fn */
439 snic_disc_init(struct snic_disc *disc)
441 INIT_LIST_HEAD(&disc->tgt_list);
442 mutex_init(&disc->mutex);
444 disc->nxt_tgt_id = 0;
445 disc->state = SNIC_DISC_INIT;
448 disc->rtgt_info = NULL;
450 } /* end of snic_disc_init */
452 /* Discovery, uninit fn */
454 snic_disc_term(struct snic *snic)
456 struct snic_disc *disc = &snic->disc;
458 mutex_lock(&disc->mutex);
461 SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n");
463 mutex_unlock(&disc->mutex);
467 * snic_disc_start: Discovery Start ...
470 snic_disc_start(struct snic *snic)
472 struct snic_disc *disc = &snic->disc;
476 SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
478 spin_lock_irqsave(&snic->snic_lock, flags);
479 if (snic->in_remove) {
480 spin_unlock_irqrestore(&snic->snic_lock, flags);
481 SNIC_ERR("snic driver removal in progress ...\n");
486 spin_unlock_irqrestore(&snic->snic_lock, flags);
488 mutex_lock(&disc->mutex);
489 if (disc->state == SNIC_DISC_PENDING) {
491 mutex_unlock(&disc->mutex);
495 disc->state = SNIC_DISC_PENDING;
496 mutex_unlock(&disc->mutex);
498 ret = snic_queue_report_tgt_req(snic);
500 SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret);
503 } /* end of snic_disc_start */
509 snic_handle_disc(struct work_struct *work)
511 struct snic *snic = container_of(work, struct snic, disc_work);
514 SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n");
516 ret = snic_disc_start(snic);
521 SNIC_HOST_ERR(snic->shost,
522 "disc_work: Discovery Failed w/ err = %d\n",
524 } /* end of snic_disc_work */
527 * snic_tgt_del_all : cleanup all snic targets
528 * Called on unbinding the interface
531 snic_tgt_del_all(struct snic *snic)
533 struct snic_tgt *tgt = NULL;
534 struct list_head *cur, *nxt;
537 scsi_flush_work(snic->shost);
539 mutex_lock(&snic->disc.mutex);
540 spin_lock_irqsave(snic->shost->host_lock, flags);
542 list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
543 tgt = list_entry(cur, struct snic_tgt, list);
544 tgt->state = SNIC_TGT_STAT_DEL;
545 list_del_init(&tgt->list);
546 SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id);
547 queue_work(snic_glob->event_q, &tgt->del_work);
550 spin_unlock_irqrestore(snic->shost->host_lock, flags);
551 mutex_unlock(&snic->disc.mutex);
553 flush_workqueue(snic_glob->event_q);
554 } /* end of snic_tgt_del_all */