2 * Copyright (C) 2016 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
14 #define DRV_NAME "thunder-cptvf"
15 #define DRV_VERSION "1.0"
18 struct tasklet_struct twork;
23 struct cptvf_wqe_info {
24 struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
27 static void vq_work_handler(unsigned long data)
29 struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
30 struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
32 vq_post_process(cwqe->cptvf, cwqe->qno);
35 static int init_worker_threads(struct cpt_vf *cptvf)
37 struct pci_dev *pdev = cptvf->pdev;
38 struct cptvf_wqe_info *cwqe_info;
41 cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
45 if (cptvf->nr_queues) {
46 dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
50 for (i = 0; i < cptvf->nr_queues; i++) {
51 tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
53 cwqe_info->vq_wqe[i].qno = i;
54 cwqe_info->vq_wqe[i].cptvf = cptvf;
57 cptvf->wqe_info = cwqe_info;
62 static void cleanup_worker_threads(struct cpt_vf *cptvf)
64 struct cptvf_wqe_info *cwqe_info;
65 struct pci_dev *pdev = cptvf->pdev;
68 cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
72 if (cptvf->nr_queues) {
73 dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
77 for (i = 0; i < cptvf->nr_queues; i++)
78 tasklet_kill(&cwqe_info->vq_wqe[i].twork);
81 cptvf->wqe_info = NULL;
84 static void free_pending_queues(struct pending_qinfo *pqinfo)
87 struct pending_queue *queue;
89 for_each_pending_queue(pqinfo, queue, i) {
93 /* free single queue */
94 kzfree((queue->head));
103 pqinfo->nr_queues = 0;
106 static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
112 struct pending_queue *queue = NULL;
114 pqinfo->nr_queues = nr_queues;
117 size = (qlen * sizeof(struct pending_entry));
119 for_each_pending_queue(pqinfo, queue, i) {
120 queue->head = kzalloc((size), GFP_KERNEL);
128 atomic64_set((&queue->pending_count), (0));
130 /* init queue spin lock */
131 spin_lock_init(&queue->lock);
137 free_pending_queues(pqinfo);
142 static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
144 struct pci_dev *pdev = cptvf->pdev;
150 ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
152 dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
160 static void cleanup_pending_queues(struct cpt_vf *cptvf)
162 struct pci_dev *pdev = cptvf->pdev;
164 if (!cptvf->nr_queues)
167 dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
169 free_pending_queues(&cptvf->pqinfo);
172 static void free_command_queues(struct cpt_vf *cptvf,
173 struct command_qinfo *cqinfo)
176 struct command_queue *queue = NULL;
177 struct command_chunk *chunk = NULL;
178 struct pci_dev *pdev = cptvf->pdev;
179 struct hlist_node *node;
181 /* clean up for each queue */
182 for (i = 0; i < cptvf->nr_queues; i++) {
183 queue = &cqinfo->queue[i];
184 if (hlist_empty(&cqinfo->queue[i].chead))
187 hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
189 dma_free_coherent(&pdev->dev, chunk->size,
194 hlist_del(&chunk->nextchunk);
203 cqinfo->cmd_size = 0;
206 static int alloc_command_queues(struct cpt_vf *cptvf,
207 struct command_qinfo *cqinfo, size_t cmd_size,
212 struct command_queue *queue = NULL;
213 struct pci_dev *pdev = cptvf->pdev;
216 cqinfo->cmd_size = cmd_size;
217 /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
218 cptvf->qsize = min(qlen, cqinfo->qchunksize) *
219 CPT_NEXT_CHUNK_PTR_SIZE + 1;
220 /* Qsize in bytes to create space for alignment */
221 q_size = qlen * cqinfo->cmd_size;
223 /* per queue initialization */
224 for (i = 0; i < cptvf->nr_queues; i++) {
226 size_t rem_q_size = q_size;
227 struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
228 u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
230 queue = &cqinfo->queue[i];
231 INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
233 curr = kzalloc(sizeof(*curr), GFP_KERNEL);
237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
239 curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev,
240 c_size + CPT_NEXT_CHUNK_PTR_SIZE,
241 &curr->dma_addr, GFP_KERNEL);
243 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
249 if (queue->nchunks == 0) {
250 hlist_add_head(&curr->nextchunk,
251 &cqinfo->queue[i].chead);
254 hlist_add_behind(&curr->nextchunk,
259 rem_q_size -= c_size;
261 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
264 } while (rem_q_size);
266 /* Make the queue circular */
267 /* Tie back last chunk entry to head */
269 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
271 spin_lock_init(&queue->lock);
276 free_command_queues(cptvf, cqinfo);
280 static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
282 struct pci_dev *pdev = cptvf->pdev;
285 /* setup AE command queues */
286 ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
289 dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
297 static void cleanup_command_queues(struct cpt_vf *cptvf)
299 struct pci_dev *pdev = cptvf->pdev;
301 if (!cptvf->nr_queues)
304 dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
306 free_command_queues(cptvf, &cptvf->cqinfo);
309 static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
311 cleanup_worker_threads(cptvf);
312 cleanup_pending_queues(cptvf);
313 cleanup_command_queues(cptvf);
316 static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
318 struct pci_dev *pdev = cptvf->pdev;
320 u32 max_dev_queues = 0;
322 max_dev_queues = CPT_NUM_QS_PER_VF;
324 nr_queues = min_t(u32, nr_queues, max_dev_queues);
325 cptvf->nr_queues = nr_queues;
327 ret = init_command_queues(cptvf, qlen);
329 dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
334 ret = init_pending_queues(cptvf, qlen, nr_queues);
336 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
341 /* Create worker threads for BH processing */
342 ret = init_worker_threads(cptvf);
344 dev_err(&pdev->dev, "Failed to setup worker threads\n");
351 cleanup_worker_threads(cptvf);
352 cleanup_pending_queues(cptvf);
355 cleanup_command_queues(cptvf);
360 static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
362 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
363 free_cpumask_var(cptvf->affinity_mask[vec]);
366 static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
368 union cptx_vqx_ctl vqx_ctl;
370 vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
372 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
375 void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
377 union cptx_vqx_doorbell vqx_dbell;
379 vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
380 CPTX_VQX_DOORBELL(0, 0));
381 vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
382 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
386 static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
388 union cptx_vqx_inprog vqx_inprg;
390 vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
391 vqx_inprg.s.inflight = val;
392 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
395 static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
397 union cptx_vqx_done_wait vqx_dwait;
399 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
400 CPTX_VQX_DONE_WAIT(0, 0));
401 vqx_dwait.s.num_wait = val;
402 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
406 static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
408 union cptx_vqx_done_wait vqx_dwait;
410 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
411 CPTX_VQX_DONE_WAIT(0, 0));
412 vqx_dwait.s.time_wait = time;
413 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
417 static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
419 union cptx_vqx_misc_ena_w1s vqx_misc_ena;
421 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
422 CPTX_VQX_MISC_ENA_W1S(0, 0));
423 /* Set mbox(0) interupts for the requested vf */
424 vqx_misc_ena.s.swerr = 1;
425 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
429 static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
431 union cptx_vqx_misc_ena_w1s vqx_misc_ena;
433 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
434 CPTX_VQX_MISC_ENA_W1S(0, 0));
435 /* Set mbox(0) interupts for the requested vf */
436 vqx_misc_ena.s.mbox = 1;
437 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
441 static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
443 union cptx_vqx_done_ena_w1s vqx_done_ena;
445 vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
446 CPTX_VQX_DONE_ENA_W1S(0, 0));
447 /* Set DONE interrupt for the requested vf */
448 vqx_done_ena.s.done = 1;
449 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
453 static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
455 union cptx_vqx_misc_int vqx_misc_int;
457 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
458 CPTX_VQX_MISC_INT(0, 0));
460 vqx_misc_int.s.dovf = 1;
461 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
465 static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
467 union cptx_vqx_misc_int vqx_misc_int;
469 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
470 CPTX_VQX_MISC_INT(0, 0));
472 vqx_misc_int.s.irde = 1;
473 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
477 static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
479 union cptx_vqx_misc_int vqx_misc_int;
481 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
482 CPTX_VQX_MISC_INT(0, 0));
484 vqx_misc_int.s.nwrp = 1;
485 cpt_write_csr64(cptvf->reg_base,
486 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
489 static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
491 union cptx_vqx_misc_int vqx_misc_int;
493 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
494 CPTX_VQX_MISC_INT(0, 0));
496 vqx_misc_int.s.mbox = 1;
497 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
501 static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
503 union cptx_vqx_misc_int vqx_misc_int;
505 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
506 CPTX_VQX_MISC_INT(0, 0));
508 vqx_misc_int.s.swerr = 1;
509 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
513 static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
515 return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
518 static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
520 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
521 struct pci_dev *pdev = cptvf->pdev;
524 intr = cptvf_read_vf_misc_intr_status(cptvf);
525 /*Check for MISC interrupt types*/
526 if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
527 dev_err(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
529 cptvf_handle_mbox_intr(cptvf);
530 cptvf_clear_mbox_intr(cptvf);
531 } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
532 cptvf_clear_dovf_intr(cptvf);
533 /*Clear doorbell count*/
534 cptvf_write_vq_doorbell(cptvf, 0);
535 dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
537 } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
538 cptvf_clear_irde_intr(cptvf);
539 dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
541 } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
542 cptvf_clear_nwrp_intr(cptvf);
543 dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
545 } else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
546 cptvf_clear_swerr_intr(cptvf);
547 dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
550 dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
557 static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
560 struct cptvf_wqe_info *nwqe_info;
562 if (unlikely(qno >= cptvf->nr_queues))
564 nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
566 return &nwqe_info->vq_wqe[qno];
569 static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
571 union cptx_vqx_done vqx_done;
573 vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
574 return vqx_done.s.done;
577 static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
580 union cptx_vqx_done_ack vqx_dack_cnt;
582 vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
583 CPTX_VQX_DONE_ACK(0, 0));
584 vqx_dack_cnt.s.done_ack = ackcnt;
585 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
589 static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
591 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
592 struct pci_dev *pdev = cptvf->pdev;
593 /* Read the number of completions */
594 u32 intr = cptvf_read_vq_done_count(cptvf);
597 struct cptvf_wqe *wqe;
599 /* Acknowledge the number of
600 * scheduled completions for processing
602 cptvf_write_vq_done_ack(cptvf, intr);
603 wqe = get_cptvf_vq_wqe(cptvf, 0);
604 if (unlikely(!wqe)) {
605 dev_err(&pdev->dev, "No work to schedule for VF (%d)",
609 tasklet_hi_schedule(&wqe->twork);
615 static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
617 struct pci_dev *pdev = cptvf->pdev;
620 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
622 dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
627 cpu = cptvf->vfid % num_online_cpus();
628 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
629 cptvf->affinity_mask[vec]);
630 irq_set_affinity_hint(pci_irq_vector(pdev, vec),
631 cptvf->affinity_mask[vec]);
634 static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
636 union cptx_vqx_saddr vqx_saddr;
639 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
642 void cptvf_device_init(struct cpt_vf *cptvf)
647 cptvf_write_vq_ctl(cptvf, 0);
648 /* Reset the doorbell */
649 cptvf_write_vq_doorbell(cptvf, 0);
651 cptvf_write_vq_inprog(cptvf, 0);
653 /* TODO: for now only one queue, so hard coded */
654 base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
655 cptvf_write_vq_saddr(cptvf, base_addr);
656 /* Configure timerhold / coalescence */
657 cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
658 cptvf_write_vq_done_numwait(cptvf, 1);
660 cptvf_write_vq_ctl(cptvf, 1);
661 /* Flag the VF ready */
662 cptvf->flags |= CPT_FLAG_DEVICE_READY;
665 static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
667 struct device *dev = &pdev->dev;
668 struct cpt_vf *cptvf;
671 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
675 pci_set_drvdata(pdev, cptvf);
677 err = pci_enable_device(pdev);
679 dev_err(dev, "Failed to enable PCI device\n");
680 pci_set_drvdata(pdev, NULL);
684 err = pci_request_regions(pdev, DRV_NAME);
686 dev_err(dev, "PCI request regions failed 0x%x\n", err);
687 goto cptvf_err_disable_device;
689 /* Mark as VF driver */
690 cptvf->flags |= CPT_FLAG_VF_DRIVER;
691 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
693 dev_err(dev, "Unable to get usable DMA configuration\n");
694 goto cptvf_err_release_regions;
697 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
699 dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
700 goto cptvf_err_release_regions;
703 /* MAP PF's configuration registers */
704 cptvf->reg_base = pcim_iomap(pdev, 0, 0);
705 if (!cptvf->reg_base) {
706 dev_err(dev, "Cannot map config register space, aborting\n");
708 goto cptvf_err_release_regions;
711 cptvf->node = dev_to_node(&pdev->dev);
712 err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
713 CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
715 dev_err(dev, "Request for #%d msix vectors failed\n",
716 CPT_VF_MSIX_VECTORS);
717 goto cptvf_err_release_regions;
720 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
721 cptvf_misc_intr_handler, 0, "CPT VF misc intr",
724 dev_err(dev, "Request misc irq failed");
725 goto cptvf_free_vectors;
728 /* Enable mailbox interrupt */
729 cptvf_enable_mbox_interrupts(cptvf);
730 cptvf_enable_swerr_interrupts(cptvf);
732 /* Check ready with PF */
733 /* Gets chip ID / device Id from PF if ready */
734 err = cptvf_check_pf_ready(cptvf);
736 dev_err(dev, "PF not responding to READY msg");
737 goto cptvf_free_misc_irq;
740 /* CPT VF software resources initialization */
741 cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
742 err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
744 dev_err(dev, "cptvf_sw_init() failed");
745 goto cptvf_free_misc_irq;
747 /* Convey VQ LEN to PF */
748 err = cptvf_send_vq_size_msg(cptvf);
750 dev_err(dev, "PF not responding to QLEN msg");
751 goto cptvf_free_misc_irq;
754 /* CPT VF device initialization */
755 cptvf_device_init(cptvf);
756 /* Send msg to PF to assign currnet Q to required group */
758 err = cptvf_send_vf_to_grp_msg(cptvf);
760 dev_err(dev, "PF not responding to VF_GRP msg");
761 goto cptvf_free_misc_irq;
765 err = cptvf_send_vf_priority_msg(cptvf);
767 dev_err(dev, "PF not responding to VF_PRIO msg");
768 goto cptvf_free_misc_irq;
771 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
772 cptvf_done_intr_handler, 0, "CPT VF done intr",
775 dev_err(dev, "Request done irq failed\n");
776 goto cptvf_free_misc_irq;
779 /* Enable mailbox interrupt */
780 cptvf_enable_done_interrupts(cptvf);
782 /* Set irq affinity masks */
783 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
784 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
786 err = cptvf_send_vf_up(cptvf);
788 dev_err(dev, "PF not responding to UP msg");
789 goto cptvf_free_irq_affinity;
791 err = cvm_crypto_init(cptvf);
793 dev_err(dev, "Algorithm register failed\n");
794 goto cptvf_free_irq_affinity;
798 cptvf_free_irq_affinity:
799 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
800 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
802 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
804 pci_free_irq_vectors(cptvf->pdev);
805 cptvf_err_release_regions:
806 pci_release_regions(pdev);
807 cptvf_err_disable_device:
808 pci_disable_device(pdev);
809 pci_set_drvdata(pdev, NULL);
814 static void cptvf_remove(struct pci_dev *pdev)
816 struct cpt_vf *cptvf = pci_get_drvdata(pdev);
819 dev_err(&pdev->dev, "Invalid CPT-VF device\n");
821 /* Convey DOWN to PF */
822 if (cptvf_send_vf_down(cptvf)) {
823 dev_err(&pdev->dev, "PF not responding to DOWN msg");
825 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
826 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
827 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
828 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
829 pci_free_irq_vectors(cptvf->pdev);
830 cptvf_sw_cleanup(cptvf);
831 pci_set_drvdata(pdev, NULL);
832 pci_release_regions(pdev);
833 pci_disable_device(pdev);
838 static void cptvf_shutdown(struct pci_dev *pdev)
843 /* Supported devices */
844 static const struct pci_device_id cptvf_id_table[] = {
845 {PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
846 { 0, } /* end of table */
849 static struct pci_driver cptvf_pci_driver = {
851 .id_table = cptvf_id_table,
852 .probe = cptvf_probe,
853 .remove = cptvf_remove,
854 .shutdown = cptvf_shutdown,
857 module_pci_driver(cptvf_pci_driver);
859 MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
860 MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
861 MODULE_LICENSE("GPL v2");
862 MODULE_VERSION(DRV_VERSION);
863 MODULE_DEVICE_TABLE(pci, cptvf_id_table);