1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, HiSilicon Ltd.
6 #include <linux/device.h>
7 #include <linux/eventfd.h>
8 #include <linux/file.h>
9 #include <linux/hisi_acc_qm.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/vfio.h>
14 #include <linux/vfio_pci_core.h>
15 #include <linux/anon_inodes.h>
17 #include "hisi_acc_vfio_pci.h"
19 /* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
20 static int qm_wait_dev_not_ready(struct hisi_qm *qm)
24 return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE,
25 val, !(val & 0x1), MB_POLL_PERIOD_US,
30 * Each state Reg is checked 100 times,
31 * with a delay of 100 microseconds after each check
33 static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs)
38 state = readl(qm->io_base + regs);
39 while (state && check_times < ERROR_CHECK_TIMEOUT) {
40 udelay(CHECK_DELAY_TIME);
41 state = readl(qm->io_base + regs);
48 static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr,
53 if (nums < 1 || nums > QM_REGS_MAX_LEN)
56 for (i = 0; i < nums; i++) {
57 data[i] = readl(qm->io_base + reg_addr);
58 reg_addr += QM_REG_ADDR_OFFSET;
64 static int qm_write_regs(struct hisi_qm *qm, u32 reg,
69 if (nums < 1 || nums > QM_REGS_MAX_LEN)
72 for (i = 0; i < nums; i++)
73 writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET);
78 static int qm_get_vft(struct hisi_qm *qm, u32 *base)
84 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
88 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
89 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
91 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
92 qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
93 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
98 static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
102 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
106 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
107 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
113 static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
117 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
121 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
122 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
128 static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
130 struct device *dev = &qm->pdev->dev;
133 ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
135 dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n");
139 ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
141 dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n");
145 ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V,
146 &vf_data->ifc_int_source, 1);
148 dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n");
152 ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
154 dev_err(dev, "failed to read QM_IFC_INT_MASK\n");
158 ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
160 dev_err(dev, "failed to read QM_IFC_INT_SET_V\n");
164 ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
166 dev_err(dev, "failed to read QM_PAGE_SIZE\n");
170 /* QM_EQC_DW has 7 regs */
171 ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
173 dev_err(dev, "failed to read QM_EQC_DW\n");
177 /* QM_AEQC_DW has 7 regs */
178 ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
180 dev_err(dev, "failed to read QM_AEQC_DW\n");
187 static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
189 struct device *dev = &qm->pdev->dev;
193 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
194 dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
198 ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
200 dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n");
204 ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
206 dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n");
210 ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V,
211 &vf_data->ifc_int_source, 1);
213 dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n");
217 ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
219 dev_err(dev, "failed to write QM_IFC_INT_MASK\n");
223 ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
225 dev_err(dev, "failed to write QM_IFC_INT_SET_V\n");
229 ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
231 dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n");
235 ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
237 dev_err(dev, "failed to write QM_PAGE_SIZE\n");
241 /* QM_EQC_DW has 7 regs */
242 ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
244 dev_err(dev, "failed to write QM_EQC_DW\n");
248 /* QM_AEQC_DW has 7 regs */
249 ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
251 dev_err(dev, "failed to write QM_AEQC_DW\n");
258 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
259 u16 index, u8 priority)
265 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
266 dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
268 dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
270 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
271 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
272 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
273 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
275 writeq(doorbell, qm->io_base + dbase);
278 static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase)
285 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
286 val & BIT(0), MB_POLL_PERIOD_US,
291 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
293 writel(0x0, qm->io_base + QM_VFT_CFG_TYPE);
294 writel(vf_id, qm->io_base + QM_VFT_CFG);
296 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
297 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
299 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
300 val & BIT(0), MB_POLL_PERIOD_US,
305 sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
306 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) <<
308 *rbase = QM_SQC_VFT_BASE_MASK_V2 &
309 (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
310 qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
311 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
316 static void qm_dev_cmd_init(struct hisi_qm *qm)
318 /* Clear VF communication status registers. */
319 writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V);
321 /* Enable pf and vf communication. */
322 writel(0x0, qm->io_base + QM_IFC_INT_MASK);
325 static int vf_qm_cache_wb(struct hisi_qm *qm)
329 writel(0x1, qm->io_base + QM_CACHE_WB_START);
330 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
331 val, val & BIT(0), MB_POLL_PERIOD_US,
332 MB_POLL_TIMEOUT_US)) {
333 dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
340 static void vf_qm_fun_reset(struct hisi_qm *qm)
344 for (i = 0; i < qm->qp_num; i++)
345 qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1);
348 static int vf_qm_func_stop(struct hisi_qm *qm)
350 return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
353 static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
354 struct hisi_acc_vf_migration_file *migf)
356 struct acc_vf_data *vf_data = &migf->vf_data;
357 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
358 struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
359 struct device *dev = &vf_qm->pdev->dev;
363 if (migf->total_length < QM_MATCH_SIZE)
366 if (vf_data->acc_magic != ACC_DEV_MAGIC) {
367 dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
371 if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
372 dev_err(dev, "failed to match VF devices\n");
376 /* VF qp num check */
377 ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
379 dev_err(dev, "failed to get vft qp nums\n");
383 if (ret != vf_data->qp_num) {
384 dev_err(dev, "failed to match VF qp num\n");
390 /* VF isolation state check */
391 ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
393 dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
397 if (vf_data->que_iso_cfg != que_iso_state) {
398 dev_err(dev, "failed to match isolation state\n");
402 ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
404 dev_err(dev, "failed to write QM_VF_STATE\n");
408 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
412 static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
413 struct acc_vf_data *vf_data)
415 struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
416 struct device *dev = &pf_qm->pdev->dev;
417 int vf_id = hisi_acc_vdev->vf_id;
420 vf_data->acc_magic = ACC_DEV_MAGIC;
422 vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
424 /* VF qp num save from PF */
425 ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
427 dev_err(dev, "failed to get vft qp nums!\n");
431 vf_data->qp_num = ret;
433 /* VF isolation state save from PF */
434 ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
436 dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n");
443 static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
444 struct hisi_acc_vf_migration_file *migf)
446 struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
447 struct device *dev = &qm->pdev->dev;
448 struct acc_vf_data *vf_data = &migf->vf_data;
451 /* Return if only match data was transferred */
452 if (migf->total_length == QM_MATCH_SIZE)
455 if (migf->total_length < sizeof(struct acc_vf_data))
458 qm->eqe_dma = vf_data->eqe_dma;
459 qm->aeqe_dma = vf_data->aeqe_dma;
460 qm->sqc_dma = vf_data->sqc_dma;
461 qm->cqc_dma = vf_data->cqc_dma;
463 qm->qp_base = vf_data->qp_base;
464 qm->qp_num = vf_data->qp_num;
466 ret = qm_set_regs(qm, vf_data);
468 dev_err(dev, "set VF regs failed\n");
472 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
474 dev_err(dev, "set sqc failed\n");
478 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
480 dev_err(dev, "set cqc failed\n");
488 static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
489 struct hisi_acc_vf_migration_file *migf)
491 struct acc_vf_data *vf_data = &migf->vf_data;
492 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
493 struct device *dev = &vf_qm->pdev->dev;
496 ret = vf_qm_get_match_data(hisi_acc_vdev, vf_data);
500 if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
501 /* Update state and return with match data */
502 vf_data->vf_qm_state = QM_NOT_READY;
503 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
504 migf->total_length = QM_MATCH_SIZE;
508 vf_data->vf_qm_state = QM_READY;
509 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
511 ret = vf_qm_cache_wb(vf_qm);
513 dev_err(dev, "failed to writeback QM Cache!\n");
517 ret = qm_get_regs(vf_qm, vf_data);
521 /* Every reg is 32 bit, the dma address is 64 bit. */
522 vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
523 vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
524 vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
525 vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
526 vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
527 vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
529 /* Through SQC_BT/CQC_BT to get sqc and cqc address */
530 ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
532 dev_err(dev, "failed to read SQC addr!\n");
536 ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
538 dev_err(dev, "failed to read CQC addr!\n");
542 migf->total_length = sizeof(struct acc_vf_data);
546 static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev)
548 struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
550 return container_of(core_device, struct hisi_acc_vf_core_device,
554 /* Check the PF's RAS state and Function INT state */
556 hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
558 struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm;
559 struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
560 struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev;
561 struct device *dev = &qm->pdev->dev;
564 /* Check RAS state */
565 state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS);
567 dev_err(dev, "failed to check QM RAS state!\n");
571 /* Check Function Communication state between PF and VF */
572 state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS);
574 dev_err(dev, "failed to check QM IFC INT state!\n");
577 state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V);
579 dev_err(dev, "failed to check QM IFC INT SET state!\n");
583 /* Check submodule task state */
584 switch (vf_pdev->device) {
585 case PCI_DEVICE_ID_HUAWEI_SEC_VF:
586 state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS);
588 dev_err(dev, "failed to check QM SEC Core INT state!\n");
592 case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
593 state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS);
595 dev_err(dev, "failed to check QM HPRE HAC INT state!\n");
599 case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
600 state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS);
602 dev_err(dev, "failed to check QM ZIP Core INT state!\n");
607 dev_err(dev, "failed to detect acc module type!\n");
612 static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf)
614 mutex_lock(&migf->lock);
615 migf->disabled = true;
616 migf->total_length = 0;
617 migf->filp->f_pos = 0;
618 mutex_unlock(&migf->lock);
621 static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev)
623 if (hisi_acc_vdev->resuming_migf) {
624 hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf);
625 fput(hisi_acc_vdev->resuming_migf->filp);
626 hisi_acc_vdev->resuming_migf = NULL;
629 if (hisi_acc_vdev->saving_migf) {
630 hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf);
631 fput(hisi_acc_vdev->saving_migf->filp);
632 hisi_acc_vdev->saving_migf = NULL;
637 * This function is called in all state_mutex unlock cases to
638 * handle a 'deferred_reset' if exists.
641 hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
644 spin_lock(&hisi_acc_vdev->reset_lock);
645 if (hisi_acc_vdev->deferred_reset) {
646 hisi_acc_vdev->deferred_reset = false;
647 spin_unlock(&hisi_acc_vdev->reset_lock);
648 hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
649 hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
650 hisi_acc_vf_disable_fds(hisi_acc_vdev);
653 mutex_unlock(&hisi_acc_vdev->state_mutex);
654 spin_unlock(&hisi_acc_vdev->reset_lock);
657 static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
659 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
661 if (hisi_acc_vdev->vf_qm_state != QM_READY)
664 /* Make sure the device is enabled */
665 qm_dev_cmd_init(vf_qm);
667 vf_qm_fun_reset(vf_qm);
670 static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
672 struct device *dev = &hisi_acc_vdev->vf_dev->dev;
673 struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf;
676 /* Check dev compatibility */
677 ret = vf_qm_check_match(hisi_acc_vdev, migf);
679 dev_err(dev, "failed to match the VF!\n");
682 /* Recover data to VF */
683 ret = vf_qm_load_data(hisi_acc_vdev, migf);
685 dev_err(dev, "failed to recover the VF!\n");
692 static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp)
694 struct hisi_acc_vf_migration_file *migf = filp->private_data;
696 hisi_acc_vf_disable_fd(migf);
697 mutex_destroy(&migf->lock);
702 static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf,
703 size_t len, loff_t *pos)
705 struct hisi_acc_vf_migration_file *migf = filp->private_data;
706 loff_t requested_length;
715 check_add_overflow((loff_t)len, *pos, &requested_length))
718 if (requested_length > sizeof(struct acc_vf_data))
721 mutex_lock(&migf->lock);
722 if (migf->disabled) {
727 ret = copy_from_user(&migf->vf_data, buf, len);
734 migf->total_length += len;
736 mutex_unlock(&migf->lock);
740 static const struct file_operations hisi_acc_vf_resume_fops = {
741 .owner = THIS_MODULE,
742 .write = hisi_acc_vf_resume_write,
743 .release = hisi_acc_vf_release_file,
747 static struct hisi_acc_vf_migration_file *
748 hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev)
750 struct hisi_acc_vf_migration_file *migf;
752 migf = kzalloc(sizeof(*migf), GFP_KERNEL);
754 return ERR_PTR(-ENOMEM);
756 migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf,
758 if (IS_ERR(migf->filp)) {
759 int err = PTR_ERR(migf->filp);
765 stream_open(migf->filp->f_inode, migf->filp);
766 mutex_init(&migf->lock);
770 static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len,
773 struct hisi_acc_vf_migration_file *migf = filp->private_data;
781 mutex_lock(&migf->lock);
782 if (*pos > migf->total_length) {
787 if (migf->disabled) {
792 len = min_t(size_t, migf->total_length - *pos, len);
794 ret = copy_to_user(buf, &migf->vf_data, len);
803 mutex_unlock(&migf->lock);
807 static const struct file_operations hisi_acc_vf_save_fops = {
808 .owner = THIS_MODULE,
809 .read = hisi_acc_vf_save_read,
810 .release = hisi_acc_vf_release_file,
814 static struct hisi_acc_vf_migration_file *
815 hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev)
817 struct hisi_acc_vf_migration_file *migf;
820 migf = kzalloc(sizeof(*migf), GFP_KERNEL);
822 return ERR_PTR(-ENOMEM);
824 migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf,
826 if (IS_ERR(migf->filp)) {
827 int err = PTR_ERR(migf->filp);
833 stream_open(migf->filp->f_inode, migf->filp);
834 mutex_init(&migf->lock);
836 ret = vf_qm_state_save(hisi_acc_vdev, migf);
845 static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
847 struct device *dev = &hisi_acc_vdev->vf_dev->dev;
848 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
851 ret = vf_qm_func_stop(vf_qm);
853 dev_err(dev, "failed to stop QM VF function!\n");
857 ret = hisi_acc_check_int_state(hisi_acc_vdev);
859 dev_err(dev, "failed to check QM INT state!\n");
866 hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
869 u32 cur = hisi_acc_vdev->mig_state;
872 if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
873 ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
879 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
880 struct hisi_acc_vf_migration_file *migf;
882 migf = hisi_acc_vf_stop_copy(hisi_acc_vdev);
884 return ERR_CAST(migf);
885 get_file(migf->filp);
886 hisi_acc_vdev->saving_migf = migf;
890 if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
891 hisi_acc_vf_disable_fds(hisi_acc_vdev);
895 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
896 struct hisi_acc_vf_migration_file *migf;
898 migf = hisi_acc_vf_pci_resume(hisi_acc_vdev);
900 return ERR_CAST(migf);
901 get_file(migf->filp);
902 hisi_acc_vdev->resuming_migf = migf;
906 if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
907 ret = hisi_acc_vf_load_state(hisi_acc_vdev);
910 hisi_acc_vf_disable_fds(hisi_acc_vdev);
914 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
915 hisi_acc_vf_start_device(hisi_acc_vdev);
920 * vfio_mig_get_next_state() does not use arcs other than the above
923 return ERR_PTR(-EINVAL);
927 hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
928 enum vfio_device_mig_state new_state)
930 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
931 struct hisi_acc_vf_core_device, core_device.vdev);
932 enum vfio_device_mig_state next_state;
933 struct file *res = NULL;
936 mutex_lock(&hisi_acc_vdev->state_mutex);
937 while (new_state != hisi_acc_vdev->mig_state) {
938 ret = vfio_mig_get_next_state(vdev,
939 hisi_acc_vdev->mig_state,
940 new_state, &next_state);
942 res = ERR_PTR(-EINVAL);
946 res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state);
949 hisi_acc_vdev->mig_state = next_state;
950 if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) {
952 res = ERR_PTR(-EINVAL);
956 hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
961 hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
962 enum vfio_device_mig_state *curr_state)
964 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
965 struct hisi_acc_vf_core_device, core_device.vdev);
967 mutex_lock(&hisi_acc_vdev->state_mutex);
968 *curr_state = hisi_acc_vdev->mig_state;
969 hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
973 static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
975 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
977 if (hisi_acc_vdev->core_device.vdev.migration_flags !=
978 VFIO_MIGRATION_STOP_COPY)
982 * As the higher VFIO layers are holding locks across reset and using
983 * those same locks with the mm_lock we need to prevent ABBA deadlock
984 * with the state_mutex and mm_lock.
985 * In case the state_mutex was taken already we defer the cleanup work
986 * to the unlock flow of the other running context.
988 spin_lock(&hisi_acc_vdev->reset_lock);
989 hisi_acc_vdev->deferred_reset = true;
990 if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
991 spin_unlock(&hisi_acc_vdev->reset_lock);
994 spin_unlock(&hisi_acc_vdev->reset_lock);
995 hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
998 static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
1000 struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1001 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1002 struct pci_dev *vf_dev = vdev->pdev;
1005 * ACC VF dev BAR2 region consists of both functional register space
1006 * and migration control register space. For migration to work, we
1007 * need access to both. Hence, we map the entire BAR2 region here.
1008 * But unnecessarily exposing the migration BAR region to the Guest
1009 * has the potential to prevent/corrupt the Guest migration. Hence,
1010 * we restrict access to the migration control space from
1011 * Guest(Please see mmap/ioctl/read/write override functions).
1013 * Please note that it is OK to expose the entire VF BAR if migration
1014 * is not supported or required as this cannot affect the ACC PF
1017 * Also the HiSilicon ACC VF devices supported by this driver on
1018 * HiSilicon hardware platforms are integrated end point devices
1019 * and the platform lacks the capability to perform any PCIe P2P
1020 * between these devices.
1024 ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
1025 pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
1026 if (!vf_qm->io_base)
1029 vf_qm->fun_type = QM_HW_VF;
1030 vf_qm->pdev = vf_dev;
1031 mutex_init(&vf_qm->mailbox_lock);
1036 static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev)
1038 struct hisi_qm *pf_qm;
1039 struct pci_driver *pf_driver;
1041 if (!pdev->is_virtfn)
1044 switch (pdev->device) {
1045 case PCI_DEVICE_ID_HUAWEI_SEC_VF:
1046 pf_driver = hisi_sec_get_pf_driver();
1048 case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
1049 pf_driver = hisi_hpre_get_pf_driver();
1051 case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
1052 pf_driver = hisi_zip_get_pf_driver();
1061 pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver);
1063 return !IS_ERR(pf_qm) ? pf_qm : NULL;
1066 static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
1067 size_t count, loff_t *ppos,
1070 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1071 struct vfio_pci_core_device *vdev =
1072 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1074 if (index == VFIO_PCI_BAR2_REGION_INDEX) {
1075 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
1076 resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
1078 /* Check if access is for migration control region */
1082 *new_count = min(count, (size_t)(end - pos));
1088 static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
1089 struct vm_area_struct *vma)
1091 struct vfio_pci_core_device *vdev =
1092 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1095 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1096 if (index == VFIO_PCI_BAR2_REGION_INDEX) {
1097 u64 req_len, pgoff, req_start;
1098 resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
1100 req_len = vma->vm_end - vma->vm_start;
1101 pgoff = vma->vm_pgoff &
1102 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1103 req_start = pgoff << PAGE_SHIFT;
1105 if (req_start + req_len > end)
1109 return vfio_pci_core_mmap(core_vdev, vma);
1112 static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
1113 const char __user *buf, size_t count,
1116 size_t new_count = count;
1119 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
1123 return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
1126 static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
1127 char __user *buf, size_t count,
1130 size_t new_count = count;
1133 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
1137 return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
1140 static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
1143 if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1144 struct vfio_pci_core_device *vdev =
1145 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1146 struct pci_dev *pdev = vdev->pdev;
1147 struct vfio_region_info info;
1148 unsigned long minsz;
1150 minsz = offsetofend(struct vfio_region_info, offset);
1152 if (copy_from_user(&info, (void __user *)arg, minsz))
1155 if (info.argsz < minsz)
1158 if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
1159 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1162 * ACC VF dev BAR2 region consists of both functional
1163 * register space and migration control register space.
1164 * Report only the functional region to Guest.
1166 info.size = pci_resource_len(pdev, info.index) / 2;
1168 info.flags = VFIO_REGION_INFO_FLAG_READ |
1169 VFIO_REGION_INFO_FLAG_WRITE |
1170 VFIO_REGION_INFO_FLAG_MMAP;
1172 return copy_to_user((void __user *)arg, &info, minsz) ?
1176 return vfio_pci_core_ioctl(core_vdev, cmd, arg);
1179 static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
1181 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
1182 struct hisi_acc_vf_core_device, core_device.vdev);
1183 struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1186 ret = vfio_pci_core_enable(vdev);
1190 if (core_vdev->mig_ops) {
1191 ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
1193 vfio_pci_core_disable(vdev);
1196 hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
1199 vfio_pci_core_finish_enable(vdev);
1203 static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
1205 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
1206 struct hisi_acc_vf_core_device, core_device.vdev);
1207 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1209 iounmap(vf_qm->io_base);
1210 vfio_pci_core_close_device(core_vdev);
1213 static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
1214 .migration_set_state = hisi_acc_vfio_pci_set_device_state,
1215 .migration_get_state = hisi_acc_vfio_pci_get_device_state,
1218 static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
1220 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
1221 struct hisi_acc_vf_core_device, core_device.vdev);
1222 struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
1223 struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev);
1225 hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
1226 hisi_acc_vdev->pf_qm = pf_qm;
1227 hisi_acc_vdev->vf_dev = pdev;
1228 mutex_init(&hisi_acc_vdev->state_mutex);
1230 core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY;
1231 core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops;
1233 return vfio_pci_core_init_dev(core_vdev);
1236 static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
1237 .name = "hisi-acc-vfio-pci-migration",
1238 .init = hisi_acc_vfio_pci_migrn_init_dev,
1239 .release = vfio_pci_core_release_dev,
1240 .open_device = hisi_acc_vfio_pci_open_device,
1241 .close_device = hisi_acc_vfio_pci_close_device,
1242 .ioctl = hisi_acc_vfio_pci_ioctl,
1243 .device_feature = vfio_pci_core_ioctl_feature,
1244 .read = hisi_acc_vfio_pci_read,
1245 .write = hisi_acc_vfio_pci_write,
1246 .mmap = hisi_acc_vfio_pci_mmap,
1247 .request = vfio_pci_core_request,
1248 .match = vfio_pci_core_match,
1251 static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
1252 .name = "hisi-acc-vfio-pci",
1253 .init = vfio_pci_core_init_dev,
1254 .release = vfio_pci_core_release_dev,
1255 .open_device = hisi_acc_vfio_pci_open_device,
1256 .close_device = vfio_pci_core_close_device,
1257 .ioctl = vfio_pci_core_ioctl,
1258 .device_feature = vfio_pci_core_ioctl_feature,
1259 .read = vfio_pci_core_read,
1260 .write = vfio_pci_core_write,
1261 .mmap = vfio_pci_core_mmap,
1262 .request = vfio_pci_core_request,
1263 .match = vfio_pci_core_match,
1266 static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1268 struct hisi_acc_vf_core_device *hisi_acc_vdev;
1269 const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops;
1270 struct hisi_qm *pf_qm;
1274 pf_qm = hisi_acc_get_pf_qm(pdev);
1275 if (pf_qm && pf_qm->ver >= QM_HW_V3) {
1276 vf_id = pci_iov_vf_id(pdev);
1278 ops = &hisi_acc_vfio_pci_migrn_ops;
1280 pci_warn(pdev, "migration support failed, continue with generic interface\n");
1283 hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device,
1284 core_device.vdev, &pdev->dev, ops);
1285 if (IS_ERR(hisi_acc_vdev))
1286 return PTR_ERR(hisi_acc_vdev);
1288 dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
1289 ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
1295 vfio_put_device(&hisi_acc_vdev->core_device.vdev);
1299 static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
1301 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
1303 vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
1304 vfio_put_device(&hisi_acc_vdev->core_device.vdev);
1307 static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
1308 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
1309 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
1310 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
1314 MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
1316 static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
1317 .reset_done = hisi_acc_vf_pci_aer_reset_done,
1318 .error_detected = vfio_pci_core_aer_err_detected,
1321 static struct pci_driver hisi_acc_vfio_pci_driver = {
1322 .name = KBUILD_MODNAME,
1323 .id_table = hisi_acc_vfio_pci_table,
1324 .probe = hisi_acc_vfio_pci_probe,
1325 .remove = hisi_acc_vfio_pci_remove,
1326 .err_handler = &hisi_acc_vf_err_handlers,
1327 .driver_managed_dma = true,
1330 module_pci_driver(hisi_acc_vfio_pci_driver);
1332 MODULE_LICENSE("GPL v2");
1333 MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
1334 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
1335 MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");