2 * Copyright (C) 2016 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
14 #define DRV_NAME "thunder-cptvf"
15 #define DRV_VERSION "1.0"
18 struct tasklet_struct twork;
23 struct cptvf_wqe_info {
24 struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
27 static void vq_work_handler(unsigned long data)
29 struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
30 struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
32 vq_post_process(cwqe->cptvf, cwqe->qno);
35 static int init_worker_threads(struct cpt_vf *cptvf)
37 struct pci_dev *pdev = cptvf->pdev;
38 struct cptvf_wqe_info *cwqe_info;
41 cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
45 if (cptvf->nr_queues) {
46 dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
50 for (i = 0; i < cptvf->nr_queues; i++) {
51 tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
53 cwqe_info->vq_wqe[i].qno = i;
54 cwqe_info->vq_wqe[i].cptvf = cptvf;
57 cptvf->wqe_info = cwqe_info;
62 static void cleanup_worker_threads(struct cpt_vf *cptvf)
64 struct cptvf_wqe_info *cwqe_info;
65 struct pci_dev *pdev = cptvf->pdev;
68 cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
72 if (cptvf->nr_queues) {
73 dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
77 for (i = 0; i < cptvf->nr_queues; i++)
78 tasklet_kill(&cwqe_info->vq_wqe[i].twork);
81 cptvf->wqe_info = NULL;
84 static void free_pending_queues(struct pending_qinfo *pqinfo)
87 struct pending_queue *queue;
89 for_each_pending_queue(pqinfo, queue, i) {
93 /* free single queue */
94 kzfree((queue->head));
103 pqinfo->nr_queues = 0;
106 static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
112 struct pending_queue *queue = NULL;
114 pqinfo->nr_queues = nr_queues;
117 size = (qlen * sizeof(struct pending_entry));
119 for_each_pending_queue(pqinfo, queue, i) {
120 queue->head = kzalloc((size), GFP_KERNEL);
128 atomic64_set((&queue->pending_count), (0));
130 /* init queue spin lock */
131 spin_lock_init(&queue->lock);
137 free_pending_queues(pqinfo);
142 static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
144 struct pci_dev *pdev = cptvf->pdev;
150 ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
152 dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
160 static void cleanup_pending_queues(struct cpt_vf *cptvf)
162 struct pci_dev *pdev = cptvf->pdev;
164 if (!cptvf->nr_queues)
167 dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
169 free_pending_queues(&cptvf->pqinfo);
172 static void free_command_queues(struct cpt_vf *cptvf,
173 struct command_qinfo *cqinfo)
176 struct command_queue *queue = NULL;
177 struct command_chunk *chunk = NULL;
178 struct pci_dev *pdev = cptvf->pdev;
179 struct hlist_node *node;
181 /* clean up for each queue */
182 for (i = 0; i < cptvf->nr_queues; i++) {
183 queue = &cqinfo->queue[i];
184 if (hlist_empty(&cqinfo->queue[i].chead))
187 hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
189 dma_free_coherent(&pdev->dev, chunk->size,
194 hlist_del(&chunk->nextchunk);
203 cqinfo->cmd_size = 0;
206 static int alloc_command_queues(struct cpt_vf *cptvf,
207 struct command_qinfo *cqinfo, size_t cmd_size,
212 struct command_queue *queue = NULL;
213 struct pci_dev *pdev = cptvf->pdev;
216 cqinfo->cmd_size = cmd_size;
217 /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
218 cptvf->qsize = min(qlen, cqinfo->qchunksize) *
219 CPT_NEXT_CHUNK_PTR_SIZE + 1;
220 /* Qsize in bytes to create space for alignment */
221 q_size = qlen * cqinfo->cmd_size;
223 /* per queue initialization */
224 for (i = 0; i < cptvf->nr_queues; i++) {
226 size_t rem_q_size = q_size;
227 struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
228 u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
230 queue = &cqinfo->queue[i];
231 INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
233 curr = kzalloc(sizeof(*curr), GFP_KERNEL);
237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
239 curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
240 c_size + CPT_NEXT_CHUNK_PTR_SIZE,
244 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
251 if (queue->nchunks == 0) {
252 hlist_add_head(&curr->nextchunk,
253 &cqinfo->queue[i].chead);
256 hlist_add_behind(&curr->nextchunk,
261 rem_q_size -= c_size;
263 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
266 } while (rem_q_size);
268 /* Make the queue circular */
269 /* Tie back last chunk entry to head */
271 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
273 spin_lock_init(&queue->lock);
278 free_command_queues(cptvf, cqinfo);
282 static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
284 struct pci_dev *pdev = cptvf->pdev;
287 /* setup AE command queues */
288 ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
291 dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
299 static void cleanup_command_queues(struct cpt_vf *cptvf)
301 struct pci_dev *pdev = cptvf->pdev;
303 if (!cptvf->nr_queues)
306 dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
308 free_command_queues(cptvf, &cptvf->cqinfo);
311 static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
313 cleanup_worker_threads(cptvf);
314 cleanup_pending_queues(cptvf);
315 cleanup_command_queues(cptvf);
318 static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
320 struct pci_dev *pdev = cptvf->pdev;
322 u32 max_dev_queues = 0;
324 max_dev_queues = CPT_NUM_QS_PER_VF;
326 nr_queues = min_t(u32, nr_queues, max_dev_queues);
327 cptvf->nr_queues = nr_queues;
329 ret = init_command_queues(cptvf, qlen);
331 dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
336 ret = init_pending_queues(cptvf, qlen, nr_queues);
338 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
343 /* Create worker threads for BH processing */
344 ret = init_worker_threads(cptvf);
346 dev_err(&pdev->dev, "Failed to setup worker threads\n");
353 cleanup_worker_threads(cptvf);
354 cleanup_pending_queues(cptvf);
357 cleanup_command_queues(cptvf);
362 static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
364 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
365 free_cpumask_var(cptvf->affinity_mask[vec]);
368 static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
370 union cptx_vqx_ctl vqx_ctl;
372 vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
374 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
377 void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
379 union cptx_vqx_doorbell vqx_dbell;
381 vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
382 CPTX_VQX_DOORBELL(0, 0));
383 vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
384 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
388 static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
390 union cptx_vqx_inprog vqx_inprg;
392 vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
393 vqx_inprg.s.inflight = val;
394 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
397 static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
399 union cptx_vqx_done_wait vqx_dwait;
401 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
402 CPTX_VQX_DONE_WAIT(0, 0));
403 vqx_dwait.s.num_wait = val;
404 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
408 static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
410 union cptx_vqx_done_wait vqx_dwait;
412 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
413 CPTX_VQX_DONE_WAIT(0, 0));
414 vqx_dwait.s.time_wait = time;
415 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
419 static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
421 union cptx_vqx_misc_ena_w1s vqx_misc_ena;
423 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
424 CPTX_VQX_MISC_ENA_W1S(0, 0));
425 /* Set mbox(0) interupts for the requested vf */
426 vqx_misc_ena.s.swerr = 1;
427 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
431 static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
433 union cptx_vqx_misc_ena_w1s vqx_misc_ena;
435 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
436 CPTX_VQX_MISC_ENA_W1S(0, 0));
437 /* Set mbox(0) interupts for the requested vf */
438 vqx_misc_ena.s.mbox = 1;
439 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
443 static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
445 union cptx_vqx_done_ena_w1s vqx_done_ena;
447 vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
448 CPTX_VQX_DONE_ENA_W1S(0, 0));
449 /* Set DONE interrupt for the requested vf */
450 vqx_done_ena.s.done = 1;
451 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
455 static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
457 union cptx_vqx_misc_int vqx_misc_int;
459 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
460 CPTX_VQX_MISC_INT(0, 0));
462 vqx_misc_int.s.dovf = 1;
463 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
467 static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
469 union cptx_vqx_misc_int vqx_misc_int;
471 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
472 CPTX_VQX_MISC_INT(0, 0));
474 vqx_misc_int.s.irde = 1;
475 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
479 static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
481 union cptx_vqx_misc_int vqx_misc_int;
483 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
484 CPTX_VQX_MISC_INT(0, 0));
486 vqx_misc_int.s.nwrp = 1;
487 cpt_write_csr64(cptvf->reg_base,
488 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
491 static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
493 union cptx_vqx_misc_int vqx_misc_int;
495 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
496 CPTX_VQX_MISC_INT(0, 0));
498 vqx_misc_int.s.mbox = 1;
499 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
503 static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
505 union cptx_vqx_misc_int vqx_misc_int;
507 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
508 CPTX_VQX_MISC_INT(0, 0));
510 vqx_misc_int.s.swerr = 1;
511 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
515 static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
517 return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
520 static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
522 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
523 struct pci_dev *pdev = cptvf->pdev;
526 intr = cptvf_read_vf_misc_intr_status(cptvf);
527 /*Check for MISC interrupt types*/
528 if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
529 dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
531 cptvf_handle_mbox_intr(cptvf);
532 cptvf_clear_mbox_intr(cptvf);
533 } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
534 cptvf_clear_dovf_intr(cptvf);
535 /*Clear doorbell count*/
536 cptvf_write_vq_doorbell(cptvf, 0);
537 dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
539 } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
540 cptvf_clear_irde_intr(cptvf);
541 dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
543 } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
544 cptvf_clear_nwrp_intr(cptvf);
545 dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
547 } else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
548 cptvf_clear_swerr_intr(cptvf);
549 dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
552 dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
559 static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
562 struct cptvf_wqe_info *nwqe_info;
564 if (unlikely(qno >= cptvf->nr_queues))
566 nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
568 return &nwqe_info->vq_wqe[qno];
571 static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
573 union cptx_vqx_done vqx_done;
575 vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
576 return vqx_done.s.done;
579 static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
582 union cptx_vqx_done_ack vqx_dack_cnt;
584 vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
585 CPTX_VQX_DONE_ACK(0, 0));
586 vqx_dack_cnt.s.done_ack = ackcnt;
587 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
591 static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
593 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
594 struct pci_dev *pdev = cptvf->pdev;
595 /* Read the number of completions */
596 u32 intr = cptvf_read_vq_done_count(cptvf);
599 struct cptvf_wqe *wqe;
601 /* Acknowledge the number of
602 * scheduled completions for processing
604 cptvf_write_vq_done_ack(cptvf, intr);
605 wqe = get_cptvf_vq_wqe(cptvf, 0);
606 if (unlikely(!wqe)) {
607 dev_err(&pdev->dev, "No work to schedule for VF (%d)",
611 tasklet_hi_schedule(&wqe->twork);
617 static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
619 struct pci_dev *pdev = cptvf->pdev;
622 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
624 dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
629 cpu = cptvf->vfid % num_online_cpus();
630 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
631 cptvf->affinity_mask[vec]);
632 irq_set_affinity_hint(pci_irq_vector(pdev, vec),
633 cptvf->affinity_mask[vec]);
636 static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
638 union cptx_vqx_saddr vqx_saddr;
641 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
644 void cptvf_device_init(struct cpt_vf *cptvf)
649 cptvf_write_vq_ctl(cptvf, 0);
650 /* Reset the doorbell */
651 cptvf_write_vq_doorbell(cptvf, 0);
653 cptvf_write_vq_inprog(cptvf, 0);
655 /* TODO: for now only one queue, so hard coded */
656 base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
657 cptvf_write_vq_saddr(cptvf, base_addr);
658 /* Configure timerhold / coalescence */
659 cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
660 cptvf_write_vq_done_numwait(cptvf, 1);
662 cptvf_write_vq_ctl(cptvf, 1);
663 /* Flag the VF ready */
664 cptvf->flags |= CPT_FLAG_DEVICE_READY;
667 static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
669 struct device *dev = &pdev->dev;
670 struct cpt_vf *cptvf;
673 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
677 pci_set_drvdata(pdev, cptvf);
679 err = pci_enable_device(pdev);
681 dev_err(dev, "Failed to enable PCI device\n");
682 pci_set_drvdata(pdev, NULL);
686 err = pci_request_regions(pdev, DRV_NAME);
688 dev_err(dev, "PCI request regions failed 0x%x\n", err);
689 goto cptvf_err_disable_device;
691 /* Mark as VF driver */
692 cptvf->flags |= CPT_FLAG_VF_DRIVER;
693 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
695 dev_err(dev, "Unable to get usable DMA configuration\n");
696 goto cptvf_err_release_regions;
699 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
701 dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
702 goto cptvf_err_release_regions;
705 /* MAP PF's configuration registers */
706 cptvf->reg_base = pcim_iomap(pdev, 0, 0);
707 if (!cptvf->reg_base) {
708 dev_err(dev, "Cannot map config register space, aborting\n");
710 goto cptvf_err_release_regions;
713 cptvf->node = dev_to_node(&pdev->dev);
714 err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
715 CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
717 dev_err(dev, "Request for #%d msix vectors failed\n",
718 CPT_VF_MSIX_VECTORS);
719 goto cptvf_err_release_regions;
722 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
723 cptvf_misc_intr_handler, 0, "CPT VF misc intr",
726 dev_err(dev, "Request misc irq failed");
727 goto cptvf_free_vectors;
730 /* Enable mailbox interrupt */
731 cptvf_enable_mbox_interrupts(cptvf);
732 cptvf_enable_swerr_interrupts(cptvf);
734 /* Check ready with PF */
735 /* Gets chip ID / device Id from PF if ready */
736 err = cptvf_check_pf_ready(cptvf);
738 dev_err(dev, "PF not responding to READY msg");
739 goto cptvf_free_misc_irq;
742 /* CPT VF software resources initialization */
743 cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
744 err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
746 dev_err(dev, "cptvf_sw_init() failed");
747 goto cptvf_free_misc_irq;
749 /* Convey VQ LEN to PF */
750 err = cptvf_send_vq_size_msg(cptvf);
752 dev_err(dev, "PF not responding to QLEN msg");
753 goto cptvf_free_misc_irq;
756 /* CPT VF device initialization */
757 cptvf_device_init(cptvf);
758 /* Send msg to PF to assign currnet Q to required group */
760 err = cptvf_send_vf_to_grp_msg(cptvf);
762 dev_err(dev, "PF not responding to VF_GRP msg");
763 goto cptvf_free_misc_irq;
767 err = cptvf_send_vf_priority_msg(cptvf);
769 dev_err(dev, "PF not responding to VF_PRIO msg");
770 goto cptvf_free_misc_irq;
773 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
774 cptvf_done_intr_handler, 0, "CPT VF done intr",
777 dev_err(dev, "Request done irq failed\n");
778 goto cptvf_free_misc_irq;
781 /* Enable mailbox interrupt */
782 cptvf_enable_done_interrupts(cptvf);
784 /* Set irq affinity masks */
785 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
786 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
788 err = cptvf_send_vf_up(cptvf);
790 dev_err(dev, "PF not responding to UP msg");
791 goto cptvf_free_irq_affinity;
793 err = cvm_crypto_init(cptvf);
795 dev_err(dev, "Algorithm register failed\n");
796 goto cptvf_free_irq_affinity;
800 cptvf_free_irq_affinity:
801 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
802 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
804 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
806 pci_free_irq_vectors(cptvf->pdev);
807 cptvf_err_release_regions:
808 pci_release_regions(pdev);
809 cptvf_err_disable_device:
810 pci_disable_device(pdev);
811 pci_set_drvdata(pdev, NULL);
816 static void cptvf_remove(struct pci_dev *pdev)
818 struct cpt_vf *cptvf = pci_get_drvdata(pdev);
821 dev_err(&pdev->dev, "Invalid CPT-VF device\n");
825 /* Convey DOWN to PF */
826 if (cptvf_send_vf_down(cptvf)) {
827 dev_err(&pdev->dev, "PF not responding to DOWN msg");
829 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
830 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
831 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
832 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
833 pci_free_irq_vectors(cptvf->pdev);
834 cptvf_sw_cleanup(cptvf);
835 pci_set_drvdata(pdev, NULL);
836 pci_release_regions(pdev);
837 pci_disable_device(pdev);
842 static void cptvf_shutdown(struct pci_dev *pdev)
847 /* Supported devices */
848 static const struct pci_device_id cptvf_id_table[] = {
849 {PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
850 { 0, } /* end of table */
853 static struct pci_driver cptvf_pci_driver = {
855 .id_table = cptvf_id_table,
856 .probe = cptvf_probe,
857 .remove = cptvf_remove,
858 .shutdown = cptvf_shutdown,
861 module_pci_driver(cptvf_pci_driver);
863 MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
864 MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
865 MODULE_LICENSE("GPL v2");
866 MODULE_VERSION(DRV_VERSION);
867 MODULE_DEVICE_TABLE(pci, cptvf_id_table);