Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[sfrench/cifs-2.6.git] / drivers / crypto / cavium / cpt / cptvf_main.c
1 /*
2  * Copyright (C) 2016 Cavium, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License
6  * as published by the Free Software Foundation.
7  */
8
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11
12 #include "cptvf.h"
13
14 #define DRV_NAME        "thunder-cptvf"
15 #define DRV_VERSION     "1.0"
16
17 struct cptvf_wqe {
18         struct tasklet_struct twork;
19         void *cptvf;
20         u32 qno;
21 };
22
23 struct cptvf_wqe_info {
24         struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
25 };
26
27 static void vq_work_handler(unsigned long data)
28 {
29         struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
30         struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
31
32         vq_post_process(cwqe->cptvf, cwqe->qno);
33 }
34
35 static int init_worker_threads(struct cpt_vf *cptvf)
36 {
37         struct pci_dev *pdev = cptvf->pdev;
38         struct cptvf_wqe_info *cwqe_info;
39         int i;
40
41         cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
42         if (!cwqe_info)
43                 return -ENOMEM;
44
45         if (cptvf->nr_queues) {
46                 dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
47                          cptvf->nr_queues);
48         }
49
50         for (i = 0; i < cptvf->nr_queues; i++) {
51                 tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
52                              (u64)cwqe_info);
53                 cwqe_info->vq_wqe[i].qno = i;
54                 cwqe_info->vq_wqe[i].cptvf = cptvf;
55         }
56
57         cptvf->wqe_info = cwqe_info;
58
59         return 0;
60 }
61
62 static void cleanup_worker_threads(struct cpt_vf *cptvf)
63 {
64         struct cptvf_wqe_info *cwqe_info;
65         struct pci_dev *pdev = cptvf->pdev;
66         int i;
67
68         cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
69         if (!cwqe_info)
70                 return;
71
72         if (cptvf->nr_queues) {
73                 dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
74                          cptvf->nr_queues);
75         }
76
77         for (i = 0; i < cptvf->nr_queues; i++)
78                 tasklet_kill(&cwqe_info->vq_wqe[i].twork);
79
80         kzfree(cwqe_info);
81         cptvf->wqe_info = NULL;
82 }
83
84 static void free_pending_queues(struct pending_qinfo *pqinfo)
85 {
86         int i;
87         struct pending_queue *queue;
88
89         for_each_pending_queue(pqinfo, queue, i) {
90                 if (!queue->head)
91                         continue;
92
93                 /* free single queue */
94                 kzfree((queue->head));
95
96                 queue->front = 0;
97                 queue->rear = 0;
98
99                 return;
100         }
101
102         pqinfo->qlen = 0;
103         pqinfo->nr_queues = 0;
104 }
105
106 static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
107                                 u32 nr_queues)
108 {
109         u32 i;
110         size_t size;
111         int ret;
112         struct pending_queue *queue = NULL;
113
114         pqinfo->nr_queues = nr_queues;
115         pqinfo->qlen = qlen;
116
117         size = (qlen * sizeof(struct pending_entry));
118
119         for_each_pending_queue(pqinfo, queue, i) {
120                 queue->head = kzalloc((size), GFP_KERNEL);
121                 if (!queue->head) {
122                         ret = -ENOMEM;
123                         goto pending_qfail;
124                 }
125
126                 queue->front = 0;
127                 queue->rear = 0;
128                 atomic64_set((&queue->pending_count), (0));
129
130                 /* init queue spin lock */
131                 spin_lock_init(&queue->lock);
132         }
133
134         return 0;
135
136 pending_qfail:
137         free_pending_queues(pqinfo);
138
139         return ret;
140 }
141
142 static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
143 {
144         struct pci_dev *pdev = cptvf->pdev;
145         int ret;
146
147         if (!nr_queues)
148                 return 0;
149
150         ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
151         if (ret) {
152                 dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
153                         nr_queues);
154                 return ret;
155         }
156
157         return 0;
158 }
159
160 static void cleanup_pending_queues(struct cpt_vf *cptvf)
161 {
162         struct pci_dev *pdev = cptvf->pdev;
163
164         if (!cptvf->nr_queues)
165                 return;
166
167         dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
168                  cptvf->nr_queues);
169         free_pending_queues(&cptvf->pqinfo);
170 }
171
172 static void free_command_queues(struct cpt_vf *cptvf,
173                                 struct command_qinfo *cqinfo)
174 {
175         int i;
176         struct command_queue *queue = NULL;
177         struct command_chunk *chunk = NULL;
178         struct pci_dev *pdev = cptvf->pdev;
179         struct hlist_node *node;
180
181         /* clean up for each queue */
182         for (i = 0; i < cptvf->nr_queues; i++) {
183                 queue = &cqinfo->queue[i];
184                 if (hlist_empty(&cqinfo->queue[i].chead))
185                         continue;
186
187                 hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
188                                           nextchunk) {
189                         dma_free_coherent(&pdev->dev, chunk->size,
190                                           chunk->head,
191                                           chunk->dma_addr);
192                         chunk->head = NULL;
193                         chunk->dma_addr = 0;
194                         hlist_del(&chunk->nextchunk);
195                         kzfree(chunk);
196                 }
197
198                 queue->nchunks = 0;
199                 queue->idx = 0;
200         }
201
202         /* common cleanup */
203         cqinfo->cmd_size = 0;
204 }
205
206 static int alloc_command_queues(struct cpt_vf *cptvf,
207                                 struct command_qinfo *cqinfo, size_t cmd_size,
208                                 u32 qlen)
209 {
210         int i;
211         size_t q_size;
212         struct command_queue *queue = NULL;
213         struct pci_dev *pdev = cptvf->pdev;
214
215         /* common init */
216         cqinfo->cmd_size = cmd_size;
217         /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
218         cptvf->qsize = min(qlen, cqinfo->qchunksize) *
219                         CPT_NEXT_CHUNK_PTR_SIZE + 1;
220         /* Qsize in bytes to create space for alignment */
221         q_size = qlen * cqinfo->cmd_size;
222
223         /* per queue initialization */
224         for (i = 0; i < cptvf->nr_queues; i++) {
225                 size_t c_size = 0;
226                 size_t rem_q_size = q_size;
227                 struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
228                 u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
229
230                 queue = &cqinfo->queue[i];
231                 INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
232                 do {
233                         curr = kzalloc(sizeof(*curr), GFP_KERNEL);
234                         if (!curr)
235                                 goto cmd_qfail;
236
237                         c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
238                                         rem_q_size;
239                         curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev,
240                                           c_size + CPT_NEXT_CHUNK_PTR_SIZE,
241                                           &curr->dma_addr, GFP_KERNEL);
242                         if (!curr->head) {
243                                 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
244                                         i, queue->nchunks);
245                                 goto cmd_qfail;
246                         }
247
248                         curr->size = c_size;
249                         if (queue->nchunks == 0) {
250                                 hlist_add_head(&curr->nextchunk,
251                                                &cqinfo->queue[i].chead);
252                                 first = curr;
253                         } else {
254                                 hlist_add_behind(&curr->nextchunk,
255                                                  &last->nextchunk);
256                         }
257
258                         queue->nchunks++;
259                         rem_q_size -= c_size;
260                         if (last)
261                                 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
262
263                         last = curr;
264                 } while (rem_q_size);
265
266                 /* Make the queue circular */
267                 /* Tie back last chunk entry to head */
268                 curr = first;
269                 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
270                 queue->qhead = curr;
271                 spin_lock_init(&queue->lock);
272         }
273         return 0;
274
275 cmd_qfail:
276         free_command_queues(cptvf, cqinfo);
277         return -ENOMEM;
278 }
279
280 static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
281 {
282         struct pci_dev *pdev = cptvf->pdev;
283         int ret;
284
285         /* setup AE command queues */
286         ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
287                                    qlen);
288         if (ret) {
289                 dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
290                         cptvf->nr_queues);
291                 return ret;
292         }
293
294         return ret;
295 }
296
297 static void cleanup_command_queues(struct cpt_vf *cptvf)
298 {
299         struct pci_dev *pdev = cptvf->pdev;
300
301         if (!cptvf->nr_queues)
302                 return;
303
304         dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
305                  cptvf->nr_queues);
306         free_command_queues(cptvf, &cptvf->cqinfo);
307 }
308
309 static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
310 {
311         cleanup_worker_threads(cptvf);
312         cleanup_pending_queues(cptvf);
313         cleanup_command_queues(cptvf);
314 }
315
316 static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
317 {
318         struct pci_dev *pdev = cptvf->pdev;
319         int ret = 0;
320         u32 max_dev_queues = 0;
321
322         max_dev_queues = CPT_NUM_QS_PER_VF;
323         /* possible cpus */
324         nr_queues = min_t(u32, nr_queues, max_dev_queues);
325         cptvf->nr_queues = nr_queues;
326
327         ret = init_command_queues(cptvf, qlen);
328         if (ret) {
329                 dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
330                         nr_queues);
331                 return ret;
332         }
333
334         ret = init_pending_queues(cptvf, qlen, nr_queues);
335         if (ret) {
336                 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
337                         nr_queues);
338                 goto setup_pqfail;
339         }
340
341         /* Create worker threads for BH processing */
342         ret = init_worker_threads(cptvf);
343         if (ret) {
344                 dev_err(&pdev->dev, "Failed to setup worker threads\n");
345                 goto init_work_fail;
346         }
347
348         return 0;
349
350 init_work_fail:
351         cleanup_worker_threads(cptvf);
352         cleanup_pending_queues(cptvf);
353
354 setup_pqfail:
355         cleanup_command_queues(cptvf);
356
357         return ret;
358 }
359
360 static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
361 {
362         irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
363         free_cpumask_var(cptvf->affinity_mask[vec]);
364 }
365
366 static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
367 {
368         union cptx_vqx_ctl vqx_ctl;
369
370         vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
371         vqx_ctl.s.ena = val;
372         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
373 }
374
375 void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
376 {
377         union cptx_vqx_doorbell vqx_dbell;
378
379         vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
380                                      CPTX_VQX_DOORBELL(0, 0));
381         vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
382         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
383                         vqx_dbell.u);
384 }
385
386 static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
387 {
388         union cptx_vqx_inprog vqx_inprg;
389
390         vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
391         vqx_inprg.s.inflight = val;
392         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
393 }
394
395 static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
396 {
397         union cptx_vqx_done_wait vqx_dwait;
398
399         vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
400                                      CPTX_VQX_DONE_WAIT(0, 0));
401         vqx_dwait.s.num_wait = val;
402         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
403                         vqx_dwait.u);
404 }
405
406 static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
407 {
408         union cptx_vqx_done_wait vqx_dwait;
409
410         vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
411                                      CPTX_VQX_DONE_WAIT(0, 0));
412         vqx_dwait.s.time_wait = time;
413         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
414                         vqx_dwait.u);
415 }
416
417 static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
418 {
419         union cptx_vqx_misc_ena_w1s vqx_misc_ena;
420
421         vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
422                                         CPTX_VQX_MISC_ENA_W1S(0, 0));
423         /* Set mbox(0) interupts for the requested vf */
424         vqx_misc_ena.s.swerr = 1;
425         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
426                         vqx_misc_ena.u);
427 }
428
429 static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
430 {
431         union cptx_vqx_misc_ena_w1s vqx_misc_ena;
432
433         vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
434                                         CPTX_VQX_MISC_ENA_W1S(0, 0));
435         /* Set mbox(0) interupts for the requested vf */
436         vqx_misc_ena.s.mbox = 1;
437         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
438                         vqx_misc_ena.u);
439 }
440
441 static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
442 {
443         union cptx_vqx_done_ena_w1s vqx_done_ena;
444
445         vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
446                                         CPTX_VQX_DONE_ENA_W1S(0, 0));
447         /* Set DONE interrupt for the requested vf */
448         vqx_done_ena.s.done = 1;
449         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
450                         vqx_done_ena.u);
451 }
452
453 static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
454 {
455         union cptx_vqx_misc_int vqx_misc_int;
456
457         vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
458                                         CPTX_VQX_MISC_INT(0, 0));
459         /* W1C for the VF */
460         vqx_misc_int.s.dovf = 1;
461         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
462                         vqx_misc_int.u);
463 }
464
465 static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
466 {
467         union cptx_vqx_misc_int vqx_misc_int;
468
469         vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
470                                         CPTX_VQX_MISC_INT(0, 0));
471         /* W1C for the VF */
472         vqx_misc_int.s.irde = 1;
473         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
474                         vqx_misc_int.u);
475 }
476
477 static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
478 {
479         union cptx_vqx_misc_int vqx_misc_int;
480
481         vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
482                                         CPTX_VQX_MISC_INT(0, 0));
483         /* W1C for the VF */
484         vqx_misc_int.s.nwrp = 1;
485         cpt_write_csr64(cptvf->reg_base,
486                         CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
487 }
488
489 static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
490 {
491         union cptx_vqx_misc_int vqx_misc_int;
492
493         vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
494                                         CPTX_VQX_MISC_INT(0, 0));
495         /* W1C for the VF */
496         vqx_misc_int.s.mbox = 1;
497         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
498                         vqx_misc_int.u);
499 }
500
501 static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
502 {
503         union cptx_vqx_misc_int vqx_misc_int;
504
505         vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
506                                         CPTX_VQX_MISC_INT(0, 0));
507         /* W1C for the VF */
508         vqx_misc_int.s.swerr = 1;
509         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
510                         vqx_misc_int.u);
511 }
512
513 static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
514 {
515         return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
516 }
517
518 static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
519 {
520         struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
521         struct pci_dev *pdev = cptvf->pdev;
522         u64 intr;
523
524         intr = cptvf_read_vf_misc_intr_status(cptvf);
525         /*Check for MISC interrupt types*/
526         if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
527                 dev_err(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
528                         intr, cptvf->vfid);
529                 cptvf_handle_mbox_intr(cptvf);
530                 cptvf_clear_mbox_intr(cptvf);
531         } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
532                 cptvf_clear_dovf_intr(cptvf);
533                 /*Clear doorbell count*/
534                 cptvf_write_vq_doorbell(cptvf, 0);
535                 dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
536                         intr, cptvf->vfid);
537         } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
538                 cptvf_clear_irde_intr(cptvf);
539                 dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
540                         intr, cptvf->vfid);
541         } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
542                 cptvf_clear_nwrp_intr(cptvf);
543                 dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
544                         intr, cptvf->vfid);
545         } else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
546                 cptvf_clear_swerr_intr(cptvf);
547                 dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
548                         intr, cptvf->vfid);
549         } else {
550                 dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
551                         cptvf->vfid);
552         }
553
554         return IRQ_HANDLED;
555 }
556
557 static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
558                                                  int qno)
559 {
560         struct cptvf_wqe_info *nwqe_info;
561
562         if (unlikely(qno >= cptvf->nr_queues))
563                 return NULL;
564         nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
565
566         return &nwqe_info->vq_wqe[qno];
567 }
568
569 static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
570 {
571         union cptx_vqx_done vqx_done;
572
573         vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
574         return vqx_done.s.done;
575 }
576
577 static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
578                                            u32 ackcnt)
579 {
580         union cptx_vqx_done_ack vqx_dack_cnt;
581
582         vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
583                                         CPTX_VQX_DONE_ACK(0, 0));
584         vqx_dack_cnt.s.done_ack = ackcnt;
585         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
586                         vqx_dack_cnt.u);
587 }
588
589 static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
590 {
591         struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
592         struct pci_dev *pdev = cptvf->pdev;
593         /* Read the number of completions */
594         u32 intr = cptvf_read_vq_done_count(cptvf);
595
596         if (intr) {
597                 struct cptvf_wqe *wqe;
598
599                 /* Acknowledge the number of
600                  * scheduled completions for processing
601                  */
602                 cptvf_write_vq_done_ack(cptvf, intr);
603                 wqe = get_cptvf_vq_wqe(cptvf, 0);
604                 if (unlikely(!wqe)) {
605                         dev_err(&pdev->dev, "No work to schedule for VF (%d)",
606                                 cptvf->vfid);
607                         return IRQ_NONE;
608                 }
609                 tasklet_hi_schedule(&wqe->twork);
610         }
611
612         return IRQ_HANDLED;
613 }
614
615 static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
616 {
617         struct pci_dev *pdev = cptvf->pdev;
618         int cpu;
619
620         if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
621                                 GFP_KERNEL)) {
622                 dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
623                         cptvf->vfid);
624                 return;
625         }
626
627         cpu = cptvf->vfid % num_online_cpus();
628         cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
629                         cptvf->affinity_mask[vec]);
630         irq_set_affinity_hint(pci_irq_vector(pdev, vec),
631                         cptvf->affinity_mask[vec]);
632 }
633
634 static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
635 {
636         union cptx_vqx_saddr vqx_saddr;
637
638         vqx_saddr.u = val;
639         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
640 }
641
642 void cptvf_device_init(struct cpt_vf *cptvf)
643 {
644         u64 base_addr = 0;
645
646         /* Disable the VQ */
647         cptvf_write_vq_ctl(cptvf, 0);
648         /* Reset the doorbell */
649         cptvf_write_vq_doorbell(cptvf, 0);
650         /* Clear inflight */
651         cptvf_write_vq_inprog(cptvf, 0);
652         /* Write VQ SADDR */
653         /* TODO: for now only one queue, so hard coded */
654         base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
655         cptvf_write_vq_saddr(cptvf, base_addr);
656         /* Configure timerhold / coalescence */
657         cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
658         cptvf_write_vq_done_numwait(cptvf, 1);
659         /* Enable the VQ */
660         cptvf_write_vq_ctl(cptvf, 1);
661         /* Flag the VF ready */
662         cptvf->flags |= CPT_FLAG_DEVICE_READY;
663 }
664
665 static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
666 {
667         struct device *dev = &pdev->dev;
668         struct cpt_vf *cptvf;
669         int    err;
670
671         cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
672         if (!cptvf)
673                 return -ENOMEM;
674
675         pci_set_drvdata(pdev, cptvf);
676         cptvf->pdev = pdev;
677         err = pci_enable_device(pdev);
678         if (err) {
679                 dev_err(dev, "Failed to enable PCI device\n");
680                 pci_set_drvdata(pdev, NULL);
681                 return err;
682         }
683
684         err = pci_request_regions(pdev, DRV_NAME);
685         if (err) {
686                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
687                 goto cptvf_err_disable_device;
688         }
689         /* Mark as VF driver */
690         cptvf->flags |= CPT_FLAG_VF_DRIVER;
691         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
692         if (err) {
693                 dev_err(dev, "Unable to get usable DMA configuration\n");
694                 goto cptvf_err_release_regions;
695         }
696
697         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
698         if (err) {
699                 dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
700                 goto cptvf_err_release_regions;
701         }
702
703         /* MAP PF's configuration registers */
704         cptvf->reg_base = pcim_iomap(pdev, 0, 0);
705         if (!cptvf->reg_base) {
706                 dev_err(dev, "Cannot map config register space, aborting\n");
707                 err = -ENOMEM;
708                 goto cptvf_err_release_regions;
709         }
710
711         cptvf->node = dev_to_node(&pdev->dev);
712         err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
713                         CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
714         if (err < 0) {
715                 dev_err(dev, "Request for #%d msix vectors failed\n",
716                         CPT_VF_MSIX_VECTORS);
717                 goto cptvf_err_release_regions;
718         }
719
720         err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
721                           cptvf_misc_intr_handler, 0, "CPT VF misc intr",
722                           cptvf);
723         if (err) {
724                 dev_err(dev, "Request misc irq failed");
725                 goto cptvf_free_vectors;
726         }
727
728         /* Enable mailbox interrupt */
729         cptvf_enable_mbox_interrupts(cptvf);
730         cptvf_enable_swerr_interrupts(cptvf);
731
732         /* Check ready with PF */
733         /* Gets chip ID / device Id from PF if ready */
734         err = cptvf_check_pf_ready(cptvf);
735         if (err) {
736                 dev_err(dev, "PF not responding to READY msg");
737                 goto cptvf_free_misc_irq;
738         }
739
740         /* CPT VF software resources initialization */
741         cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
742         err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
743         if (err) {
744                 dev_err(dev, "cptvf_sw_init() failed");
745                 goto cptvf_free_misc_irq;
746         }
747         /* Convey VQ LEN to PF */
748         err = cptvf_send_vq_size_msg(cptvf);
749         if (err) {
750                 dev_err(dev, "PF not responding to QLEN msg");
751                 goto cptvf_free_misc_irq;
752         }
753
754         /* CPT VF device initialization */
755         cptvf_device_init(cptvf);
756         /* Send msg to PF to assign currnet Q to required group */
757         cptvf->vfgrp = 1;
758         err = cptvf_send_vf_to_grp_msg(cptvf);
759         if (err) {
760                 dev_err(dev, "PF not responding to VF_GRP msg");
761                 goto cptvf_free_misc_irq;
762         }
763
764         cptvf->priority = 1;
765         err = cptvf_send_vf_priority_msg(cptvf);
766         if (err) {
767                 dev_err(dev, "PF not responding to VF_PRIO msg");
768                 goto cptvf_free_misc_irq;
769         }
770
771         err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
772                           cptvf_done_intr_handler, 0, "CPT VF done intr",
773                           cptvf);
774         if (err) {
775                 dev_err(dev, "Request done irq failed\n");
776                 goto cptvf_free_misc_irq;
777         }
778
779         /* Enable mailbox interrupt */
780         cptvf_enable_done_interrupts(cptvf);
781
782         /* Set irq affinity masks */
783         cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
784         cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
785
786         err = cptvf_send_vf_up(cptvf);
787         if (err) {
788                 dev_err(dev, "PF not responding to UP msg");
789                 goto cptvf_free_irq_affinity;
790         }
791         err = cvm_crypto_init(cptvf);
792         if (err) {
793                 dev_err(dev, "Algorithm register failed\n");
794                 goto cptvf_free_irq_affinity;
795         }
796         return 0;
797
798 cptvf_free_irq_affinity:
799         cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
800         cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
801 cptvf_free_misc_irq:
802         free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
803 cptvf_free_vectors:
804         pci_free_irq_vectors(cptvf->pdev);
805 cptvf_err_release_regions:
806         pci_release_regions(pdev);
807 cptvf_err_disable_device:
808         pci_disable_device(pdev);
809         pci_set_drvdata(pdev, NULL);
810
811         return err;
812 }
813
814 static void cptvf_remove(struct pci_dev *pdev)
815 {
816         struct cpt_vf *cptvf = pci_get_drvdata(pdev);
817
818         if (!cptvf)
819                 dev_err(&pdev->dev, "Invalid CPT-VF device\n");
820
821         /* Convey DOWN to PF */
822         if (cptvf_send_vf_down(cptvf)) {
823                 dev_err(&pdev->dev, "PF not responding to DOWN msg");
824         } else {
825                 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
826                 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
827                 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
828                 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
829                 pci_free_irq_vectors(cptvf->pdev);
830                 cptvf_sw_cleanup(cptvf);
831                 pci_set_drvdata(pdev, NULL);
832                 pci_release_regions(pdev);
833                 pci_disable_device(pdev);
834                 cvm_crypto_exit();
835         }
836 }
837
838 static void cptvf_shutdown(struct pci_dev *pdev)
839 {
840         cptvf_remove(pdev);
841 }
842
843 /* Supported devices */
844 static const struct pci_device_id cptvf_id_table[] = {
845         {PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
846         { 0, }  /* end of table */
847 };
848
849 static struct pci_driver cptvf_pci_driver = {
850         .name = DRV_NAME,
851         .id_table = cptvf_id_table,
852         .probe = cptvf_probe,
853         .remove = cptvf_remove,
854         .shutdown = cptvf_shutdown,
855 };
856
857 module_pci_driver(cptvf_pci_driver);
858
859 MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
860 MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
861 MODULE_LICENSE("GPL v2");
862 MODULE_VERSION(DRV_VERSION);
863 MODULE_DEVICE_TABLE(pci, cptvf_id_table);