2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas"
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
26 u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
29 case ATA_CMD_FPDMA_WRITE:
30 case ATA_CMD_FPDMA_READ:
31 case ATA_CMD_FPDMA_RECV:
32 case ATA_CMD_FPDMA_SEND:
33 case ATA_CMD_NCQ_NON_DATA:
34 return HISI_SAS_SATA_PROTOCOL_FPDMA;
36 case ATA_CMD_DOWNLOAD_MICRO:
38 case ATA_CMD_PMP_READ:
39 case ATA_CMD_READ_LOG_EXT:
40 case ATA_CMD_PIO_READ:
41 case ATA_CMD_PIO_READ_EXT:
42 case ATA_CMD_PMP_WRITE:
43 case ATA_CMD_WRITE_LOG_EXT:
44 case ATA_CMD_PIO_WRITE:
45 case ATA_CMD_PIO_WRITE_EXT:
46 return HISI_SAS_SATA_PROTOCOL_PIO;
49 case ATA_CMD_DOWNLOAD_MICRO_DMA:
50 case ATA_CMD_PMP_READ_DMA:
51 case ATA_CMD_PMP_WRITE_DMA:
53 case ATA_CMD_READ_EXT:
54 case ATA_CMD_READ_LOG_DMA_EXT:
55 case ATA_CMD_READ_STREAM_DMA_EXT:
56 case ATA_CMD_TRUSTED_RCV_DMA:
57 case ATA_CMD_TRUSTED_SND_DMA:
59 case ATA_CMD_WRITE_EXT:
60 case ATA_CMD_WRITE_FUA_EXT:
61 case ATA_CMD_WRITE_QUEUED:
62 case ATA_CMD_WRITE_LOG_DMA_EXT:
63 case ATA_CMD_WRITE_STREAM_DMA_EXT:
64 case ATA_CMD_ZAC_MGMT_IN:
65 return HISI_SAS_SATA_PROTOCOL_DMA;
67 case ATA_CMD_CHK_POWER:
68 case ATA_CMD_DEV_RESET:
71 case ATA_CMD_FLUSH_EXT:
73 case ATA_CMD_VERIFY_EXT:
74 case ATA_CMD_SET_FEATURES:
76 case ATA_CMD_STANDBYNOW1:
77 case ATA_CMD_ZAC_MGMT_OUT:
78 return HISI_SAS_SATA_PROTOCOL_NONDATA;
80 if (direction == DMA_NONE)
81 return HISI_SAS_SATA_PROTOCOL_NONDATA;
82 return HISI_SAS_SATA_PROTOCOL_PIO;
85 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
87 void hisi_sas_sata_done(struct sas_task *task,
88 struct hisi_sas_slot *slot)
90 struct task_status_struct *ts = &task->task_status;
91 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
92 struct hisi_sas_status_buffer *status_buf =
93 hisi_sas_status_buf_addr_mem(slot);
94 u8 *iu = &status_buf->iu[0];
95 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
97 resp->frame_len = sizeof(struct dev_to_host_fis);
98 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
100 ts->buf_valid_size = sizeof(*resp);
102 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
104 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
106 struct ata_queued_cmd *qc = task->uldd_task;
109 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
110 qc->tf.command == ATA_CMD_FPDMA_READ) {
117 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
119 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
121 return device->port->ha->lldd_ha;
124 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
126 return container_of(sas_port, struct hisi_sas_port, sas_port);
128 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
130 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
134 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
135 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
137 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
139 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
141 void *bitmap = hisi_hba->slot_index_tags;
143 clear_bit(slot_idx, bitmap);
146 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
148 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
151 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
153 void *bitmap = hisi_hba->slot_index_tags;
155 set_bit(slot_idx, bitmap);
158 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
161 void *bitmap = hisi_hba->slot_index_tags;
163 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
164 if (index >= hisi_hba->slot_index_count)
165 return -SAS_QUEUE_FULL;
166 hisi_sas_slot_index_set(hisi_hba, index);
171 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
175 for (i = 0; i < hisi_hba->slot_index_count; ++i)
176 hisi_sas_slot_index_clear(hisi_hba, i);
179 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
180 struct hisi_sas_slot *slot)
184 struct device *dev = hisi_hba->dev;
185 struct domain_device *device = task->dev;
186 struct hisi_sas_device *sas_dev = device->lldd_dev;
188 if (!sas_protocol_ata(task->task_proto))
190 dma_unmap_sg(dev, task->scatter, slot->n_elem,
193 task->lldd_task = NULL;
196 atomic64_dec(&sas_dev->running_req);
200 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
203 list_del_init(&slot->entry);
206 hisi_sas_slot_index_free(hisi_hba, slot->idx);
208 /* slot memory is fully zeroed when it is reused */
210 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
212 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
213 struct hisi_sas_slot *slot)
215 return hisi_hba->hw->prep_smp(hisi_hba, slot);
218 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
219 struct hisi_sas_slot *slot, int is_tmf,
220 struct hisi_sas_tmf_task *tmf)
222 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
225 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
226 struct hisi_sas_slot *slot)
228 return hisi_hba->hw->prep_stp(hisi_hba, slot);
231 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
232 struct hisi_sas_slot *slot,
233 int device_id, int abort_flag, int tag_to_abort)
235 return hisi_hba->hw->prep_abort(hisi_hba, slot,
236 device_id, abort_flag, tag_to_abort);
240 * This function will issue an abort TMF regardless of whether the
241 * task is in the sdev or not. Then it will do the task complete
242 * cleanup and callbacks.
244 static void hisi_sas_slot_abort(struct work_struct *work)
246 struct hisi_sas_slot *abort_slot =
247 container_of(work, struct hisi_sas_slot, abort_slot);
248 struct sas_task *task = abort_slot->task;
249 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
250 struct scsi_cmnd *cmnd = task->uldd_task;
251 struct hisi_sas_tmf_task tmf_task;
253 struct device *dev = hisi_hba->dev;
254 int tag = abort_slot->idx;
257 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
258 dev_err(dev, "cannot abort slot for non-ssp task\n");
262 int_to_scsilun(cmnd->device->lun, &lun);
263 tmf_task.tmf = TMF_ABORT_TASK;
264 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
266 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
268 /* Do cleanup for this task */
269 spin_lock_irqsave(&hisi_hba->lock, flags);
270 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
271 spin_unlock_irqrestore(&hisi_hba->lock, flags);
273 task->task_done(task);
276 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
277 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
280 struct hisi_hba *hisi_hba = dq->hisi_hba;
281 struct domain_device *device = task->dev;
282 struct hisi_sas_device *sas_dev = device->lldd_dev;
283 struct hisi_sas_port *port;
284 struct hisi_sas_slot *slot;
285 struct hisi_sas_cmd_hdr *cmd_hdr_base;
286 struct asd_sas_port *sas_port = device->port;
287 struct device *dev = hisi_hba->dev;
288 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
292 struct task_status_struct *ts = &task->task_status;
294 ts->resp = SAS_TASK_UNDELIVERED;
295 ts->stat = SAS_PHY_DOWN;
297 * libsas will use dev->port, should
298 * not call task_done for sata
300 if (device->dev_type != SAS_SATA_DEV)
301 task->task_done(task);
305 if (DEV_IS_GONE(sas_dev)) {
307 dev_info(dev, "task prep: device %d not ready\n",
310 dev_info(dev, "task prep: device %016llx not ready\n",
311 SAS_ADDR(device->sas_addr));
316 port = to_hisi_sas_port(sas_port);
317 if (port && !port->port_attached) {
318 dev_info(dev, "task prep: %s port%d not attach device\n",
319 (dev_is_sata(device)) ?
326 if (!sas_protocol_ata(task->task_proto)) {
327 if (task->num_scatter) {
328 n_elem = dma_map_sg(dev, task->scatter,
329 task->num_scatter, task->data_dir);
336 n_elem = task->num_scatter;
338 spin_lock_irqsave(&hisi_hba->lock, flags);
339 if (hisi_hba->hw->slot_index_alloc)
340 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
343 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
345 spin_unlock_irqrestore(&hisi_hba->lock, flags);
348 spin_unlock_irqrestore(&hisi_hba->lock, flags);
350 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
354 dlvry_queue = dq->id;
355 dlvry_queue_slot = dq->wr_point;
356 slot = &hisi_hba->slot_info[slot_idx];
357 memset(slot, 0, sizeof(struct hisi_sas_slot));
359 slot->idx = slot_idx;
360 slot->n_elem = n_elem;
361 slot->dlvry_queue = dlvry_queue;
362 slot->dlvry_queue_slot = dlvry_queue_slot;
363 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
364 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
367 task->lldd_task = slot;
368 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
370 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
371 GFP_ATOMIC, &slot->buf_dma);
374 goto err_out_slot_buf;
376 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
377 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
378 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
380 switch (task->task_proto) {
381 case SAS_PROTOCOL_SMP:
382 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
384 case SAS_PROTOCOL_SSP:
385 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
387 case SAS_PROTOCOL_SATA:
388 case SAS_PROTOCOL_STP:
389 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
390 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
393 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
400 dev_err(dev, "task prep: rc = 0x%x\n", rc);
404 list_add_tail(&slot->entry, &sas_dev->list);
405 spin_lock_irqsave(&task->task_state_lock, flags);
406 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
407 spin_unlock_irqrestore(&task->task_state_lock, flags);
409 dq->slot_prep = slot;
411 atomic64_inc(&sas_dev->running_req);
417 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
420 /* Nothing to be done */
422 spin_lock_irqsave(&hisi_hba->lock, flags);
423 hisi_sas_slot_index_free(hisi_hba, slot_idx);
424 spin_unlock_irqrestore(&hisi_hba->lock, flags);
426 dev_err(dev, "task prep: failed[%d]!\n", rc);
427 if (!sas_protocol_ata(task->task_proto))
429 dma_unmap_sg(dev, task->scatter, n_elem,
435 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
436 int is_tmf, struct hisi_sas_tmf_task *tmf)
441 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
442 struct device *dev = hisi_hba->dev;
443 struct domain_device *device = task->dev;
444 struct hisi_sas_device *sas_dev = device->lldd_dev;
445 struct hisi_sas_dq *dq = sas_dev->dq;
447 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
450 /* protect task_prep and start_delivery sequence */
451 spin_lock_irqsave(&dq->lock, flags);
452 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
454 dev_err(dev, "task exec: failed[%d]!\n", rc);
457 hisi_hba->hw->start_delivery(dq);
458 spin_unlock_irqrestore(&dq->lock, flags);
463 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
465 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
466 struct asd_sas_phy *sas_phy = &phy->sas_phy;
467 struct sas_ha_struct *sas_ha;
469 if (!phy->phy_attached)
472 sas_ha = &hisi_hba->sha;
473 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
476 struct sas_phy *sphy = sas_phy->phy;
478 sphy->negotiated_linkrate = sas_phy->linkrate;
479 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
480 sphy->maximum_linkrate_hw =
481 hisi_hba->hw->phy_get_max_linkrate();
482 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
483 sphy->minimum_linkrate = phy->minimum_linkrate;
485 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
486 sphy->maximum_linkrate = phy->maximum_linkrate;
489 if (phy->phy_type & PORT_TYPE_SAS) {
490 struct sas_identify_frame *id;
492 id = (struct sas_identify_frame *)phy->frame_rcvd;
493 id->dev_type = phy->identify.device_type;
494 id->initiator_bits = SAS_PROTOCOL_ALL;
495 id->target_bits = phy->identify.target_port_protocols;
496 } else if (phy->phy_type & PORT_TYPE_SATA) {
500 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
501 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
504 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
506 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
507 struct hisi_sas_device *sas_dev = NULL;
510 spin_lock(&hisi_hba->lock);
511 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
512 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
513 int queue = i % hisi_hba->queue_count;
514 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
516 hisi_hba->devices[i].device_id = i;
517 sas_dev = &hisi_hba->devices[i];
518 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
519 sas_dev->dev_type = device->dev_type;
520 sas_dev->hisi_hba = hisi_hba;
521 sas_dev->sas_device = device;
523 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
527 spin_unlock(&hisi_hba->lock);
532 static int hisi_sas_dev_found(struct domain_device *device)
534 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
535 struct domain_device *parent_dev = device->parent;
536 struct hisi_sas_device *sas_dev;
537 struct device *dev = hisi_hba->dev;
539 if (hisi_hba->hw->alloc_dev)
540 sas_dev = hisi_hba->hw->alloc_dev(device);
542 sas_dev = hisi_sas_alloc_dev(device);
544 dev_err(dev, "fail alloc dev: max support %d devices\n",
545 HISI_SAS_MAX_DEVICES);
549 device->lldd_dev = sas_dev;
550 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
552 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
554 u8 phy_num = parent_dev->ex_dev.num_phys;
557 for (phy_no = 0; phy_no < phy_num; phy_no++) {
558 phy = &parent_dev->ex_dev.ex_phy[phy_no];
559 if (SAS_ADDR(phy->attached_sas_addr) ==
560 SAS_ADDR(device->sas_addr)) {
561 sas_dev->attached_phy = phy_no;
566 if (phy_no == phy_num) {
567 dev_info(dev, "dev found: no attached "
568 "dev:%016llx at ex:%016llx\n",
569 SAS_ADDR(device->sas_addr),
570 SAS_ADDR(parent_dev->sas_addr));
578 static int hisi_sas_slave_configure(struct scsi_device *sdev)
580 struct domain_device *dev = sdev_to_domain_dev(sdev);
581 int ret = sas_slave_configure(sdev);
585 if (!dev_is_sata(dev))
586 sas_change_queue_depth(sdev, 64);
591 static void hisi_sas_scan_start(struct Scsi_Host *shost)
593 struct hisi_hba *hisi_hba = shost_priv(shost);
595 hisi_hba->hw->phys_init(hisi_hba);
598 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
600 struct hisi_hba *hisi_hba = shost_priv(shost);
601 struct sas_ha_struct *sha = &hisi_hba->sha;
603 /* Wait for PHY up interrupt to occur */
611 static void hisi_sas_phyup_work(struct work_struct *work)
613 struct hisi_sas_phy *phy =
614 container_of(work, struct hisi_sas_phy, phyup_ws);
615 struct hisi_hba *hisi_hba = phy->hisi_hba;
616 struct asd_sas_phy *sas_phy = &phy->sas_phy;
617 int phy_no = sas_phy->id;
619 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
620 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
623 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
625 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
626 struct asd_sas_phy *sas_phy = &phy->sas_phy;
628 phy->hisi_hba = hisi_hba;
630 init_timer(&phy->timer);
631 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
632 sas_phy->class = SAS;
633 sas_phy->iproto = SAS_PROTOCOL_ALL;
635 sas_phy->type = PHY_TYPE_PHYSICAL;
636 sas_phy->role = PHY_ROLE_INITIATOR;
637 sas_phy->oob_mode = OOB_NOT_CONNECTED;
638 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
639 sas_phy->id = phy_no;
640 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
641 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
642 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
643 sas_phy->lldd_phy = phy;
645 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
648 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
650 struct sas_ha_struct *sas_ha = sas_phy->ha;
651 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
652 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
653 struct asd_sas_port *sas_port = sas_phy->port;
654 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
660 spin_lock_irqsave(&hisi_hba->lock, flags);
661 port->port_attached = 1;
662 port->id = phy->port_id;
664 sas_port->lldd_port = port;
665 spin_unlock_irqrestore(&hisi_hba->lock, flags);
668 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
669 struct hisi_sas_slot *slot)
673 struct task_status_struct *ts;
675 ts = &task->task_status;
677 ts->resp = SAS_TASK_COMPLETE;
678 ts->stat = SAS_ABORTED_TASK;
679 spin_lock_irqsave(&task->task_state_lock, flags);
680 task->task_state_flags &=
681 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
682 task->task_state_flags |= SAS_TASK_STATE_DONE;
683 spin_unlock_irqrestore(&task->task_state_lock, flags);
686 hisi_sas_slot_task_free(hisi_hba, task, slot);
689 /* hisi_hba.lock should be locked */
690 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
691 struct domain_device *device)
693 struct hisi_sas_slot *slot, *slot2;
694 struct hisi_sas_device *sas_dev = device->lldd_dev;
696 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
697 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
700 static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
702 struct hisi_sas_device *sas_dev;
703 struct domain_device *device;
706 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
707 sas_dev = &hisi_hba->devices[i];
708 device = sas_dev->sas_device;
710 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
714 hisi_sas_release_task(hisi_hba, device);
718 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
719 struct domain_device *device)
721 if (hisi_hba->hw->dereg_device)
722 hisi_hba->hw->dereg_device(hisi_hba, device);
725 static void hisi_sas_dev_gone(struct domain_device *device)
727 struct hisi_sas_device *sas_dev = device->lldd_dev;
728 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
729 struct device *dev = hisi_hba->dev;
731 dev_info(dev, "found dev[%d:%x] is gone\n",
732 sas_dev->device_id, sas_dev->dev_type);
734 hisi_sas_internal_task_abort(hisi_hba, device,
735 HISI_SAS_INT_ABT_DEV, 0);
737 hisi_sas_dereg_device(hisi_hba, device);
739 hisi_hba->hw->free_device(hisi_hba, sas_dev);
740 device->lldd_dev = NULL;
741 memset(sas_dev, 0, sizeof(*sas_dev));
742 sas_dev->dev_type = SAS_PHY_UNUSED;
745 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
747 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
750 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
753 struct sas_ha_struct *sas_ha = sas_phy->ha;
754 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
755 int phy_no = sas_phy->id;
758 case PHY_FUNC_HARD_RESET:
759 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
762 case PHY_FUNC_LINK_RESET:
763 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
765 hisi_hba->hw->phy_enable(hisi_hba, phy_no);
768 case PHY_FUNC_DISABLE:
769 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
772 case PHY_FUNC_SET_LINK_RATE:
773 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
775 case PHY_FUNC_GET_EVENTS:
776 if (hisi_hba->hw->get_events) {
777 hisi_hba->hw->get_events(hisi_hba, phy_no);
781 case PHY_FUNC_RELEASE_SPINUP_HOLD:
788 static void hisi_sas_task_done(struct sas_task *task)
790 if (!del_timer(&task->slow_task->timer))
792 complete(&task->slow_task->completion);
795 static void hisi_sas_tmf_timedout(unsigned long data)
797 struct sas_task *task = (struct sas_task *)data;
800 spin_lock_irqsave(&task->task_state_lock, flags);
801 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
802 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
803 spin_unlock_irqrestore(&task->task_state_lock, flags);
805 complete(&task->slow_task->completion);
808 #define TASK_TIMEOUT 20
810 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
811 void *parameter, u32 para_len,
812 struct hisi_sas_tmf_task *tmf)
814 struct hisi_sas_device *sas_dev = device->lldd_dev;
815 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
816 struct device *dev = hisi_hba->dev;
817 struct sas_task *task;
820 for (retry = 0; retry < TASK_RETRY; retry++) {
821 task = sas_alloc_slow_task(GFP_KERNEL);
826 task->task_proto = device->tproto;
828 if (dev_is_sata(device)) {
829 task->ata_task.device_control_reg_update = 1;
830 memcpy(&task->ata_task.fis, parameter, para_len);
832 memcpy(&task->ssp_task, parameter, para_len);
834 task->task_done = hisi_sas_task_done;
836 task->slow_task->timer.data = (unsigned long) task;
837 task->slow_task->timer.function = hisi_sas_tmf_timedout;
838 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
839 add_timer(&task->slow_task->timer);
841 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
844 del_timer(&task->slow_task->timer);
845 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
850 wait_for_completion(&task->slow_task->completion);
851 res = TMF_RESP_FUNC_FAILED;
852 /* Even TMF timed out, return direct. */
853 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
854 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
855 struct hisi_sas_slot *slot = task->lldd_task;
857 dev_err(dev, "abort tmf: TMF task timeout\n");
865 if (task->task_status.resp == SAS_TASK_COMPLETE &&
866 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
867 res = TMF_RESP_FUNC_COMPLETE;
871 if (task->task_status.resp == SAS_TASK_COMPLETE &&
872 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
873 res = TMF_RESP_FUNC_SUCC;
877 if (task->task_status.resp == SAS_TASK_COMPLETE &&
878 task->task_status.stat == SAS_DATA_UNDERRUN) {
879 /* no error, but return the number of bytes of
882 dev_warn(dev, "abort tmf: task to dev %016llx "
883 "resp: 0x%x sts 0x%x underrun\n",
884 SAS_ADDR(device->sas_addr),
885 task->task_status.resp,
886 task->task_status.stat);
887 res = task->task_status.residual;
891 if (task->task_status.resp == SAS_TASK_COMPLETE &&
892 task->task_status.stat == SAS_DATA_OVERRUN) {
893 dev_warn(dev, "abort tmf: blocked task error\n");
898 dev_warn(dev, "abort tmf: task to dev "
899 "%016llx resp: 0x%x status 0x%x\n",
900 SAS_ADDR(device->sas_addr), task->task_status.resp,
901 task->task_status.stat);
906 if (retry == TASK_RETRY)
907 dev_warn(dev, "abort tmf: executing internal task failed!\n");
912 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
913 bool reset, int pmp, u8 *fis)
915 struct ata_taskfile tf;
917 ata_tf_init(dev, &tf);
922 tf.command = ATA_CMD_DEV_RESET;
923 ata_tf_to_fis(&tf, pmp, 0, fis);
926 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
929 struct ata_port *ap = device->sata_dev.ap;
930 struct ata_link *link;
931 int rc = TMF_RESP_FUNC_FAILED;
932 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
933 struct device *dev = hisi_hba->dev;
934 int s = sizeof(struct host_to_dev_fis);
937 ata_for_each_link(link, ap, EDGE) {
938 int pmp = sata_srst_pmp(link);
940 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
941 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
942 if (rc != TMF_RESP_FUNC_COMPLETE)
946 if (rc == TMF_RESP_FUNC_COMPLETE) {
947 ata_for_each_link(link, ap, EDGE) {
948 int pmp = sata_srst_pmp(link);
950 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
951 rc = hisi_sas_exec_internal_tmf_task(device, fis,
953 if (rc != TMF_RESP_FUNC_COMPLETE)
954 dev_err(dev, "ata disk de-reset failed\n");
957 dev_err(dev, "ata disk reset failed\n");
960 if (rc == TMF_RESP_FUNC_COMPLETE) {
961 spin_lock_irqsave(&hisi_hba->lock, flags);
962 hisi_sas_release_task(hisi_hba, device);
963 spin_unlock_irqrestore(&hisi_hba->lock, flags);
969 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
970 u8 *lun, struct hisi_sas_tmf_task *tmf)
972 struct sas_ssp_task ssp_task;
974 if (!(device->tproto & SAS_PROTOCOL_SSP))
975 return TMF_RESP_FUNC_ESUPP;
977 memcpy(ssp_task.LUN, lun, 8);
979 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
980 sizeof(ssp_task), tmf);
983 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
984 struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
986 struct hisi_sas_device *sas_dev;
987 struct domain_device *device;
990 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
991 sas_dev = &hisi_hba->devices[i];
992 device = sas_dev->sas_device;
993 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
994 || !device || (device->port != sas_port))
997 hisi_hba->hw->free_device(hisi_hba, sas_dev);
999 /* Update linkrate of directly attached device. */
1000 if (!device->parent)
1001 device->linkrate = linkrate;
1003 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1007 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1010 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1011 struct asd_sas_port *_sas_port = NULL;
1014 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1015 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1016 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1017 struct asd_sas_port *sas_port = sas_phy->port;
1018 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1019 bool do_port_check = !!(_sas_port != sas_port);
1021 if (!sas_phy->phy->enabled)
1024 /* Report PHY state change to libsas */
1025 if (state & (1 << phy_no)) {
1026 if (do_port_check && sas_port) {
1027 struct domain_device *dev = sas_port->port_dev;
1029 _sas_port = sas_port;
1030 port->id = phy->port_id;
1031 hisi_sas_refresh_port_id(hisi_hba,
1032 sas_port, sas_phy->linkrate);
1034 if (DEV_IS_EXPANDER(dev->dev_type))
1035 sas_ha->notify_port_event(sas_phy,
1036 PORTE_BROADCAST_RCVD);
1038 } else if (old_state & (1 << phy_no))
1039 /* PHY down but was up before */
1040 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1044 drain_workqueue(hisi_hba->shost->work_q);
1047 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1049 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1050 struct device *dev = hisi_hba->dev;
1051 struct Scsi_Host *shost = hisi_hba->shost;
1052 u32 old_state, state;
1053 unsigned long flags;
1056 if (!hisi_hba->hw->soft_reset)
1059 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1062 dev_dbg(dev, "controller resetting...\n");
1063 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1065 scsi_block_requests(shost);
1066 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1067 rc = hisi_hba->hw->soft_reset(hisi_hba);
1069 dev_warn(dev, "controller reset failed (%d)\n", rc);
1070 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1073 spin_lock_irqsave(&hisi_hba->lock, flags);
1074 hisi_sas_release_tasks(hisi_hba);
1075 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1077 sas_ha->notify_ha_event(sas_ha, HAE_RESET);
1078 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1080 /* Init and wait for PHYs to come up and all libsas event finished. */
1081 hisi_hba->hw->phys_init(hisi_hba);
1083 drain_workqueue(hisi_hba->wq);
1084 drain_workqueue(shost->work_q);
1086 state = hisi_hba->hw->get_phys_state(hisi_hba);
1087 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1088 dev_dbg(dev, "controller reset complete\n");
1091 scsi_unblock_requests(shost);
1092 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1097 static int hisi_sas_abort_task(struct sas_task *task)
1099 struct scsi_lun lun;
1100 struct hisi_sas_tmf_task tmf_task;
1101 struct domain_device *device = task->dev;
1102 struct hisi_sas_device *sas_dev = device->lldd_dev;
1103 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
1104 struct device *dev = hisi_hba->dev;
1105 int rc = TMF_RESP_FUNC_FAILED;
1106 unsigned long flags;
1109 dev_warn(dev, "Device has been removed\n");
1110 return TMF_RESP_FUNC_FAILED;
1113 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1114 rc = TMF_RESP_FUNC_COMPLETE;
1118 sas_dev->dev_status = HISI_SAS_DEV_EH;
1119 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1120 struct scsi_cmnd *cmnd = task->uldd_task;
1121 struct hisi_sas_slot *slot = task->lldd_task;
1122 u32 tag = slot->idx;
1125 int_to_scsilun(cmnd->device->lun, &lun);
1126 tmf_task.tmf = TMF_ABORT_TASK;
1127 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1129 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1132 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1133 HISI_SAS_INT_ABT_CMD, tag);
1135 * If the TMF finds that the IO is not in the device and also
1136 * the internal abort does not succeed, then it is safe to
1138 * Note: if the internal abort succeeds then the slot
1139 * will have already been completed
1141 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1142 if (task->lldd_task) {
1143 spin_lock_irqsave(&hisi_hba->lock, flags);
1144 hisi_sas_do_release_task(hisi_hba, task, slot);
1145 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1148 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1149 task->task_proto & SAS_PROTOCOL_STP) {
1150 if (task->dev->dev_type == SAS_SATA_DEV) {
1151 hisi_sas_internal_task_abort(hisi_hba, device,
1152 HISI_SAS_INT_ABT_DEV, 0);
1153 hisi_sas_dereg_device(hisi_hba, device);
1154 rc = hisi_sas_softreset_ata_disk(device);
1156 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1158 struct hisi_sas_slot *slot = task->lldd_task;
1159 u32 tag = slot->idx;
1161 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1162 HISI_SAS_INT_ABT_CMD, tag);
1163 if (rc == TMF_RESP_FUNC_FAILED) {
1164 spin_lock_irqsave(&hisi_hba->lock, flags);
1165 hisi_sas_do_release_task(hisi_hba, task, slot);
1166 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1171 if (rc != TMF_RESP_FUNC_COMPLETE)
1172 dev_notice(dev, "abort task: rc=%d\n", rc);
1176 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1178 struct hisi_sas_tmf_task tmf_task;
1179 int rc = TMF_RESP_FUNC_FAILED;
1181 tmf_task.tmf = TMF_ABORT_TASK_SET;
1182 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1187 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1189 int rc = TMF_RESP_FUNC_FAILED;
1190 struct hisi_sas_tmf_task tmf_task;
1192 tmf_task.tmf = TMF_CLEAR_ACA;
1193 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1198 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1200 struct sas_phy *phy = sas_get_local_phy(device);
1201 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1202 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1203 rc = sas_phy_reset(phy, reset_type);
1204 sas_put_local_phy(phy);
1209 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1211 struct hisi_sas_device *sas_dev = device->lldd_dev;
1212 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1213 unsigned long flags;
1214 int rc = TMF_RESP_FUNC_FAILED;
1216 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1217 return TMF_RESP_FUNC_FAILED;
1218 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1220 hisi_sas_internal_task_abort(hisi_hba, device,
1221 HISI_SAS_INT_ABT_DEV, 0);
1222 hisi_sas_dereg_device(hisi_hba, device);
1224 rc = hisi_sas_debug_I_T_nexus_reset(device);
1226 if (rc == TMF_RESP_FUNC_COMPLETE) {
1227 spin_lock_irqsave(&hisi_hba->lock, flags);
1228 hisi_sas_release_task(hisi_hba, device);
1229 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1234 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1236 struct hisi_sas_device *sas_dev = device->lldd_dev;
1237 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1238 struct device *dev = hisi_hba->dev;
1239 unsigned long flags;
1240 int rc = TMF_RESP_FUNC_FAILED;
1242 sas_dev->dev_status = HISI_SAS_DEV_EH;
1243 if (dev_is_sata(device)) {
1244 struct sas_phy *phy;
1246 /* Clear internal IO and then hardreset */
1247 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1248 HISI_SAS_INT_ABT_DEV, 0);
1249 if (rc == TMF_RESP_FUNC_FAILED)
1251 hisi_sas_dereg_device(hisi_hba, device);
1253 phy = sas_get_local_phy(device);
1255 rc = sas_phy_reset(phy, 1);
1258 spin_lock_irqsave(&hisi_hba->lock, flags);
1259 hisi_sas_release_task(hisi_hba, device);
1260 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1262 sas_put_local_phy(phy);
1264 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1266 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1267 if (rc == TMF_RESP_FUNC_COMPLETE) {
1268 spin_lock_irqsave(&hisi_hba->lock, flags);
1269 hisi_sas_release_task(hisi_hba, device);
1270 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1274 if (rc != TMF_RESP_FUNC_COMPLETE)
1275 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1276 sas_dev->device_id, rc);
1280 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1282 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1284 return hisi_sas_controller_reset(hisi_hba);
1287 static int hisi_sas_query_task(struct sas_task *task)
1289 struct scsi_lun lun;
1290 struct hisi_sas_tmf_task tmf_task;
1291 int rc = TMF_RESP_FUNC_FAILED;
1293 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1294 struct scsi_cmnd *cmnd = task->uldd_task;
1295 struct domain_device *device = task->dev;
1296 struct hisi_sas_slot *slot = task->lldd_task;
1297 u32 tag = slot->idx;
1299 int_to_scsilun(cmnd->device->lun, &lun);
1300 tmf_task.tmf = TMF_QUERY_TASK;
1301 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1303 rc = hisi_sas_debug_issue_ssp_tmf(device,
1307 /* The task is still in Lun, release it then */
1308 case TMF_RESP_FUNC_SUCC:
1309 /* The task is not in Lun or failed, reset the phy */
1310 case TMF_RESP_FUNC_FAILED:
1311 case TMF_RESP_FUNC_COMPLETE:
1314 rc = TMF_RESP_FUNC_FAILED;
1322 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1323 struct sas_task *task, int abort_flag,
1326 struct domain_device *device = task->dev;
1327 struct hisi_sas_device *sas_dev = device->lldd_dev;
1328 struct device *dev = hisi_hba->dev;
1329 struct hisi_sas_port *port;
1330 struct hisi_sas_slot *slot;
1331 struct asd_sas_port *sas_port = device->port;
1332 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1333 struct hisi_sas_dq *dq = sas_dev->dq;
1334 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1335 unsigned long flags, flags_dq;
1337 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1343 port = to_hisi_sas_port(sas_port);
1345 /* simply get a slot and send abort command */
1346 spin_lock_irqsave(&hisi_hba->lock, flags);
1347 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1349 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1352 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1354 spin_lock_irqsave(&dq->lock, flags_dq);
1355 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1359 dlvry_queue = dq->id;
1360 dlvry_queue_slot = dq->wr_point;
1362 slot = &hisi_hba->slot_info[slot_idx];
1363 memset(slot, 0, sizeof(struct hisi_sas_slot));
1365 slot->idx = slot_idx;
1366 slot->n_elem = n_elem;
1367 slot->dlvry_queue = dlvry_queue;
1368 slot->dlvry_queue_slot = dlvry_queue_slot;
1369 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1370 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1373 task->lldd_task = slot;
1375 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1376 GFP_ATOMIC, &slot->buf_dma);
1382 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1383 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1384 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1386 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1387 abort_flag, task_tag);
1392 list_add_tail(&slot->entry, &sas_dev->list);
1393 spin_lock_irqsave(&task->task_state_lock, flags);
1394 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1395 spin_unlock_irqrestore(&task->task_state_lock, flags);
1397 dq->slot_prep = slot;
1399 atomic64_inc(&sas_dev->running_req);
1401 /* send abort command to the chip */
1402 hisi_hba->hw->start_delivery(dq);
1403 spin_unlock_irqrestore(&dq->lock, flags_dq);
1408 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1411 spin_lock_irqsave(&hisi_hba->lock, flags);
1412 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1413 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1414 spin_unlock_irqrestore(&dq->lock, flags_dq);
1416 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1422 * hisi_sas_internal_task_abort -- execute an internal
1423 * abort command for single IO command or a device
1424 * @hisi_hba: host controller struct
1425 * @device: domain device
1426 * @abort_flag: mode of operation, device or single IO
1427 * @tag: tag of IO to be aborted (only relevant to single
1431 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1432 struct domain_device *device,
1433 int abort_flag, int tag)
1435 struct sas_task *task;
1436 struct hisi_sas_device *sas_dev = device->lldd_dev;
1437 struct device *dev = hisi_hba->dev;
1440 if (!hisi_hba->hw->prep_abort)
1443 task = sas_alloc_slow_task(GFP_KERNEL);
1448 task->task_proto = device->tproto;
1449 task->task_done = hisi_sas_task_done;
1450 task->slow_task->timer.data = (unsigned long)task;
1451 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1452 task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
1453 add_timer(&task->slow_task->timer);
1455 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1456 task, abort_flag, tag);
1458 del_timer(&task->slow_task->timer);
1459 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1463 wait_for_completion(&task->slow_task->completion);
1464 res = TMF_RESP_FUNC_FAILED;
1466 /* Internal abort timed out */
1467 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1468 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1469 struct hisi_sas_slot *slot = task->lldd_task;
1473 dev_err(dev, "internal task abort: timeout.\n");
1477 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1478 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1479 res = TMF_RESP_FUNC_COMPLETE;
1483 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1484 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1485 res = TMF_RESP_FUNC_SUCC;
1490 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1491 "resp: 0x%x sts 0x%x\n",
1492 SAS_ADDR(device->sas_addr),
1494 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1495 task->task_status.stat);
1496 sas_free_task(task);
1501 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1503 hisi_sas_port_notify_formed(sas_phy);
1506 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1508 phy->phy_attached = 0;
1513 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1515 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1516 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1517 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1520 /* Phy down but ready */
1521 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1522 hisi_sas_port_notify_formed(sas_phy);
1524 struct hisi_sas_port *port = phy->port;
1526 /* Phy down and not ready */
1527 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1528 sas_phy_disconnected(sas_phy);
1531 if (phy->phy_type & PORT_TYPE_SAS) {
1532 int port_id = port->id;
1534 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1536 port->port_attached = 0;
1537 } else if (phy->phy_type & PORT_TYPE_SATA)
1538 port->port_attached = 0;
1540 hisi_sas_phy_disconnected(phy);
1543 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1546 struct scsi_transport_template *hisi_sas_stt;
1547 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1549 static struct scsi_host_template _hisi_sas_sht = {
1550 .module = THIS_MODULE,
1552 .queuecommand = sas_queuecommand,
1553 .target_alloc = sas_target_alloc,
1554 .slave_configure = hisi_sas_slave_configure,
1555 .scan_finished = hisi_sas_scan_finished,
1556 .scan_start = hisi_sas_scan_start,
1557 .change_queue_depth = sas_change_queue_depth,
1558 .bios_param = sas_bios_param,
1561 .sg_tablesize = SG_ALL,
1562 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1563 .use_clustering = ENABLE_CLUSTERING,
1564 .eh_device_reset_handler = sas_eh_device_reset_handler,
1565 .eh_target_reset_handler = sas_eh_target_reset_handler,
1566 .target_destroy = sas_target_destroy,
1569 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1570 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1572 static struct sas_domain_function_template hisi_sas_transport_ops = {
1573 .lldd_dev_found = hisi_sas_dev_found,
1574 .lldd_dev_gone = hisi_sas_dev_gone,
1575 .lldd_execute_task = hisi_sas_queue_command,
1576 .lldd_control_phy = hisi_sas_control_phy,
1577 .lldd_abort_task = hisi_sas_abort_task,
1578 .lldd_abort_task_set = hisi_sas_abort_task_set,
1579 .lldd_clear_aca = hisi_sas_clear_aca,
1580 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1581 .lldd_lu_reset = hisi_sas_lu_reset,
1582 .lldd_query_task = hisi_sas_query_task,
1583 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1584 .lldd_port_formed = hisi_sas_port_formed,
1587 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1589 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1591 for (i = 0; i < hisi_hba->queue_count; i++) {
1592 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1593 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1595 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1596 memset(hisi_hba->cmd_hdr[i], 0, s);
1599 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1600 memset(hisi_hba->complete_hdr[i], 0, s);
1604 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1605 memset(hisi_hba->initial_fis, 0, s);
1607 s = max_command_entries * sizeof(struct hisi_sas_iost);
1608 memset(hisi_hba->iost, 0, s);
1610 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1611 memset(hisi_hba->breakpoint, 0, s);
1613 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1614 memset(hisi_hba->sata_breakpoint, 0, s);
1616 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1618 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1620 struct device *dev = hisi_hba->dev;
1621 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1623 spin_lock_init(&hisi_hba->lock);
1624 for (i = 0; i < hisi_hba->n_phy; i++) {
1625 hisi_sas_phy_init(hisi_hba, i);
1626 hisi_hba->port[i].port_attached = 0;
1627 hisi_hba->port[i].id = -1;
1630 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1631 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1632 hisi_hba->devices[i].device_id = i;
1633 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1636 for (i = 0; i < hisi_hba->queue_count; i++) {
1637 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1638 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1640 /* Completion queue structure */
1642 cq->hisi_hba = hisi_hba;
1644 /* Delivery queue structure */
1646 dq->hisi_hba = hisi_hba;
1648 /* Delivery queue */
1649 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1650 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1651 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1652 if (!hisi_hba->cmd_hdr[i])
1655 /* Completion queue */
1656 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1657 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1658 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1659 if (!hisi_hba->complete_hdr[i])
1663 s = sizeof(struct hisi_sas_slot_buf_table);
1664 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1665 if (!hisi_hba->buffer_pool)
1668 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1669 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1671 if (!hisi_hba->itct)
1674 memset(hisi_hba->itct, 0, s);
1676 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1677 sizeof(struct hisi_sas_slot),
1679 if (!hisi_hba->slot_info)
1682 s = max_command_entries * sizeof(struct hisi_sas_iost);
1683 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1685 if (!hisi_hba->iost)
1688 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1689 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1690 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1691 if (!hisi_hba->breakpoint)
1694 hisi_hba->slot_index_count = max_command_entries;
1695 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1696 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1697 if (!hisi_hba->slot_index_tags)
1700 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1701 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1702 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1703 if (!hisi_hba->initial_fis)
1706 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1707 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1708 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1709 if (!hisi_hba->sata_breakpoint)
1711 hisi_sas_init_mem(hisi_hba);
1713 hisi_sas_slot_index_init(hisi_hba);
1715 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1716 if (!hisi_hba->wq) {
1717 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1725 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1727 void hisi_sas_free(struct hisi_hba *hisi_hba)
1729 struct device *dev = hisi_hba->dev;
1730 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1732 for (i = 0; i < hisi_hba->queue_count; i++) {
1733 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1734 if (hisi_hba->cmd_hdr[i])
1735 dma_free_coherent(dev, s,
1736 hisi_hba->cmd_hdr[i],
1737 hisi_hba->cmd_hdr_dma[i]);
1739 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1740 if (hisi_hba->complete_hdr[i])
1741 dma_free_coherent(dev, s,
1742 hisi_hba->complete_hdr[i],
1743 hisi_hba->complete_hdr_dma[i]);
1746 dma_pool_destroy(hisi_hba->buffer_pool);
1748 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1750 dma_free_coherent(dev, s,
1751 hisi_hba->itct, hisi_hba->itct_dma);
1753 s = max_command_entries * sizeof(struct hisi_sas_iost);
1755 dma_free_coherent(dev, s,
1756 hisi_hba->iost, hisi_hba->iost_dma);
1758 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1759 if (hisi_hba->breakpoint)
1760 dma_free_coherent(dev, s,
1761 hisi_hba->breakpoint,
1762 hisi_hba->breakpoint_dma);
1765 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1766 if (hisi_hba->initial_fis)
1767 dma_free_coherent(dev, s,
1768 hisi_hba->initial_fis,
1769 hisi_hba->initial_fis_dma);
1771 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1772 if (hisi_hba->sata_breakpoint)
1773 dma_free_coherent(dev, s,
1774 hisi_hba->sata_breakpoint,
1775 hisi_hba->sata_breakpoint_dma);
1778 destroy_workqueue(hisi_hba->wq);
1780 EXPORT_SYMBOL_GPL(hisi_sas_free);
1782 static void hisi_sas_rst_work_handler(struct work_struct *work)
1784 struct hisi_hba *hisi_hba =
1785 container_of(work, struct hisi_hba, rst_work);
1787 hisi_sas_controller_reset(hisi_hba);
1790 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1792 struct device *dev = hisi_hba->dev;
1793 struct platform_device *pdev = hisi_hba->platform_dev;
1794 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
1797 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1799 dev_err(dev, "could not get property sas-addr\n");
1805 * These properties are only required for platform device-based
1806 * controller with DT firmware.
1808 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1809 "hisilicon,sas-syscon");
1810 if (IS_ERR(hisi_hba->ctrl)) {
1811 dev_err(dev, "could not get syscon\n");
1815 if (device_property_read_u32(dev, "ctrl-reset-reg",
1816 &hisi_hba->ctrl_reset_reg)) {
1818 "could not get property ctrl-reset-reg\n");
1822 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1823 &hisi_hba->ctrl_reset_sts_reg)) {
1825 "could not get property ctrl-reset-sts-reg\n");
1829 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1830 &hisi_hba->ctrl_clock_ena_reg)) {
1832 "could not get property ctrl-clock-ena-reg\n");
1837 refclk = devm_clk_get(dev, NULL);
1839 dev_dbg(dev, "no ref clk property\n");
1841 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1843 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
1844 dev_err(dev, "could not get property phy-count\n");
1848 if (device_property_read_u32(dev, "queue-count",
1849 &hisi_hba->queue_count)) {
1850 dev_err(dev, "could not get property queue-count\n");
1856 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
1858 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1859 const struct hisi_sas_hw *hw)
1861 struct resource *res;
1862 struct Scsi_Host *shost;
1863 struct hisi_hba *hisi_hba;
1864 struct device *dev = &pdev->dev;
1866 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
1868 dev_err(dev, "scsi host alloc failed\n");
1871 hisi_hba = shost_priv(shost);
1873 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1875 hisi_hba->dev = dev;
1876 hisi_hba->platform_dev = pdev;
1877 hisi_hba->shost = shost;
1878 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1880 init_timer(&hisi_hba->timer);
1882 if (hisi_sas_get_fw_info(hisi_hba) < 0)
1885 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1886 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1887 dev_err(dev, "No usable DMA addressing method\n");
1891 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1892 hisi_hba->regs = devm_ioremap_resource(dev, res);
1893 if (IS_ERR(hisi_hba->regs))
1896 if (hisi_sas_alloc(hisi_hba, shost)) {
1897 hisi_sas_free(hisi_hba);
1903 scsi_host_put(shost);
1904 dev_err(dev, "shost alloc failed\n");
1908 void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1912 for (i = 0; i < hisi_hba->n_phy; i++)
1913 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1917 EXPORT_SYMBOL_GPL(hisi_sas_init_add);
1919 int hisi_sas_probe(struct platform_device *pdev,
1920 const struct hisi_sas_hw *hw)
1922 struct Scsi_Host *shost;
1923 struct hisi_hba *hisi_hba;
1924 struct device *dev = &pdev->dev;
1925 struct asd_sas_phy **arr_phy;
1926 struct asd_sas_port **arr_port;
1927 struct sas_ha_struct *sha;
1928 int rc, phy_nr, port_nr, i;
1930 shost = hisi_sas_shost_alloc(pdev, hw);
1934 sha = SHOST_TO_SAS_HA(shost);
1935 hisi_hba = shost_priv(shost);
1936 platform_set_drvdata(pdev, sha);
1938 phy_nr = port_nr = hisi_hba->n_phy;
1940 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1941 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1942 if (!arr_phy || !arr_port) {
1947 sha->sas_phy = arr_phy;
1948 sha->sas_port = arr_port;
1949 sha->lldd_ha = hisi_hba;
1951 shost->transportt = hisi_sas_stt;
1952 shost->max_id = HISI_SAS_MAX_DEVICES;
1953 shost->max_lun = ~0;
1954 shost->max_channel = 1;
1955 shost->max_cmd_len = 16;
1956 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1957 shost->can_queue = hisi_hba->hw->max_command_entries;
1958 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
1960 sha->sas_ha_name = DRV_NAME;
1961 sha->dev = hisi_hba->dev;
1962 sha->lldd_module = THIS_MODULE;
1963 sha->sas_addr = &hisi_hba->sas_addr[0];
1964 sha->num_phys = hisi_hba->n_phy;
1965 sha->core.shost = hisi_hba->shost;
1967 for (i = 0; i < hisi_hba->n_phy; i++) {
1968 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1969 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1972 hisi_sas_init_add(hisi_hba);
1974 rc = scsi_add_host(shost, &pdev->dev);
1978 rc = sas_register_ha(sha);
1980 goto err_out_register_ha;
1982 rc = hisi_hba->hw->hw_init(hisi_hba);
1984 goto err_out_register_ha;
1986 scsi_scan_host(shost);
1990 err_out_register_ha:
1991 scsi_remove_host(shost);
1993 hisi_sas_free(hisi_hba);
1994 scsi_host_put(shost);
1997 EXPORT_SYMBOL_GPL(hisi_sas_probe);
1999 int hisi_sas_remove(struct platform_device *pdev)
2001 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2002 struct hisi_hba *hisi_hba = sha->lldd_ha;
2003 struct Scsi_Host *shost = sha->core.shost;
2005 sas_unregister_ha(sha);
2006 sas_remove_host(sha->core.shost);
2008 hisi_sas_free(hisi_hba);
2009 scsi_host_put(shost);
2012 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2014 static __init int hisi_sas_init(void)
2016 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2023 static __exit void hisi_sas_exit(void)
2025 sas_release_transport(hisi_sas_stt);
2028 module_init(hisi_sas_init);
2029 module_exit(hisi_sas_exit);
2031 MODULE_LICENSE("GPL");
2032 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2033 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2034 MODULE_ALIAS("platform:" DRV_NAME);