]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/crypto/cavium/cpt/cptvf_main.c
Merge tag 'sh-pfc-for-v5.1-tag2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-focal-kernel.git] / drivers / crypto / cavium / cpt / cptvf_main.c
CommitLineData
c694b233
GC
1/*
2 * Copyright (C) 2016 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9#include <linux/interrupt.h>
10#include <linux/module.h>
11
12#include "cptvf.h"
13
14#define DRV_NAME "thunder-cptvf"
15#define DRV_VERSION "1.0"
16
17struct cptvf_wqe {
18 struct tasklet_struct twork;
19 void *cptvf;
20 u32 qno;
21};
22
23struct cptvf_wqe_info {
24 struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
25};
26
27static void vq_work_handler(unsigned long data)
28{
29 struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
30 struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
31
32 vq_post_process(cwqe->cptvf, cwqe->qno);
33}
34
35static int init_worker_threads(struct cpt_vf *cptvf)
36{
37 struct pci_dev *pdev = cptvf->pdev;
38 struct cptvf_wqe_info *cwqe_info;
39 int i;
40
41 cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
42 if (!cwqe_info)
43 return -ENOMEM;
44
45 if (cptvf->nr_queues) {
46 dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
47 cptvf->nr_queues);
48 }
49
50 for (i = 0; i < cptvf->nr_queues; i++) {
51 tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
52 (u64)cwqe_info);
53 cwqe_info->vq_wqe[i].qno = i;
54 cwqe_info->vq_wqe[i].cptvf = cptvf;
55 }
56
57 cptvf->wqe_info = cwqe_info;
58
59 return 0;
60}
61
62static void cleanup_worker_threads(struct cpt_vf *cptvf)
63{
64 struct cptvf_wqe_info *cwqe_info;
65 struct pci_dev *pdev = cptvf->pdev;
66 int i;
67
68 cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
69 if (!cwqe_info)
70 return;
71
72 if (cptvf->nr_queues) {
73 dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
74 cptvf->nr_queues);
75 }
76
77 for (i = 0; i < cptvf->nr_queues; i++)
78 tasklet_kill(&cwqe_info->vq_wqe[i].twork);
79
80 kzfree(cwqe_info);
81 cptvf->wqe_info = NULL;
82}
83
84static void free_pending_queues(struct pending_qinfo *pqinfo)
85{
86 int i;
87 struct pending_queue *queue;
88
89 for_each_pending_queue(pqinfo, queue, i) {
90 if (!queue->head)
91 continue;
92
93 /* free single queue */
94 kzfree((queue->head));
95
96 queue->front = 0;
97 queue->rear = 0;
98
99 return;
100 }
101
102 pqinfo->qlen = 0;
103 pqinfo->nr_queues = 0;
104}
105
106static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
107 u32 nr_queues)
108{
109 u32 i;
110 size_t size;
111 int ret;
112 struct pending_queue *queue = NULL;
113
114 pqinfo->nr_queues = nr_queues;
115 pqinfo->qlen = qlen;
116
117 size = (qlen * sizeof(struct pending_entry));
118
119 for_each_pending_queue(pqinfo, queue, i) {
120 queue->head = kzalloc((size), GFP_KERNEL);
121 if (!queue->head) {
122 ret = -ENOMEM;
123 goto pending_qfail;
124 }
125
126 queue->front = 0;
127 queue->rear = 0;
128 atomic64_set((&queue->pending_count), (0));
129
130 /* init queue spin lock */
131 spin_lock_init(&queue->lock);
132 }
133
134 return 0;
135
136pending_qfail:
137 free_pending_queues(pqinfo);
138
139 return ret;
140}
141
142static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
143{
144 struct pci_dev *pdev = cptvf->pdev;
145 int ret;
146
147 if (!nr_queues)
148 return 0;
149
150 ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
151 if (ret) {
152 dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
153 nr_queues);
154 return ret;
155 }
156
157 return 0;
158}
159
160static void cleanup_pending_queues(struct cpt_vf *cptvf)
161{
162 struct pci_dev *pdev = cptvf->pdev;
163
164 if (!cptvf->nr_queues)
165 return;
166
167 dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
168 cptvf->nr_queues);
169 free_pending_queues(&cptvf->pqinfo);
170}
171
172static void free_command_queues(struct cpt_vf *cptvf,
173 struct command_qinfo *cqinfo)
174{
175 int i;
176 struct command_queue *queue = NULL;
177 struct command_chunk *chunk = NULL;
178 struct pci_dev *pdev = cptvf->pdev;
179 struct hlist_node *node;
180
181 /* clean up for each queue */
182 for (i = 0; i < cptvf->nr_queues; i++) {
183 queue = &cqinfo->queue[i];
184 if (hlist_empty(&cqinfo->queue[i].chead))
185 continue;
186
187 hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
188 nextchunk) {
189 dma_free_coherent(&pdev->dev, chunk->size,
190 chunk->head,
191 chunk->dma_addr);
192 chunk->head = NULL;
193 chunk->dma_addr = 0;
194 hlist_del(&chunk->nextchunk);
195 kzfree(chunk);
196 }
197
198 queue->nchunks = 0;
199 queue->idx = 0;
200 }
201
202 /* common cleanup */
203 cqinfo->cmd_size = 0;
204}
205
206static int alloc_command_queues(struct cpt_vf *cptvf,
207 struct command_qinfo *cqinfo, size_t cmd_size,
208 u32 qlen)
209{
210 int i;
211 size_t q_size;
212 struct command_queue *queue = NULL;
213 struct pci_dev *pdev = cptvf->pdev;
214
215 /* common init */
216 cqinfo->cmd_size = cmd_size;
217 /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
218 cptvf->qsize = min(qlen, cqinfo->qchunksize) *
219 CPT_NEXT_CHUNK_PTR_SIZE + 1;
220 /* Qsize in bytes to create space for alignment */
221 q_size = qlen * cqinfo->cmd_size;
222
223 /* per queue initialization */
224 for (i = 0; i < cptvf->nr_queues; i++) {
225 size_t c_size = 0;
226 size_t rem_q_size = q_size;
227 struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
228 u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
229
230 queue = &cqinfo->queue[i];
231 INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
232 do {
233 curr = kzalloc(sizeof(*curr), GFP_KERNEL);
234 if (!curr)
235 goto cmd_qfail;
236
237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
238 rem_q_size;
750afb08
LC
239 curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
240 c_size + CPT_NEXT_CHUNK_PTR_SIZE,
241 &curr->dma_addr,
242 GFP_KERNEL);
c694b233
GC
243 if (!curr->head) {
244 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
245 i, queue->nchunks);
d80388ec 246 kfree(curr);
c694b233
GC
247 goto cmd_qfail;
248 }
249
250 curr->size = c_size;
251 if (queue->nchunks == 0) {
252 hlist_add_head(&curr->nextchunk,
253 &cqinfo->queue[i].chead);
254 first = curr;
255 } else {
256 hlist_add_behind(&curr->nextchunk,
257 &last->nextchunk);
258 }
259
260 queue->nchunks++;
261 rem_q_size -= c_size;
262 if (last)
263 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
264
265 last = curr;
266 } while (rem_q_size);
267
268 /* Make the queue circular */
269 /* Tie back last chunk entry to head */
270 curr = first;
271 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
272 queue->qhead = curr;
273 spin_lock_init(&queue->lock);
274 }
275 return 0;
276
277cmd_qfail:
278 free_command_queues(cptvf, cqinfo);
279 return -ENOMEM;
280}
281
282static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
283{
284 struct pci_dev *pdev = cptvf->pdev;
285 int ret;
286
287 /* setup AE command queues */
288 ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
289 qlen);
290 if (ret) {
291 dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
292 cptvf->nr_queues);
293 return ret;
294 }
295
296 return ret;
297}
298
299static void cleanup_command_queues(struct cpt_vf *cptvf)
300{
301 struct pci_dev *pdev = cptvf->pdev;
302
303 if (!cptvf->nr_queues)
304 return;
305
306 dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
307 cptvf->nr_queues);
308 free_command_queues(cptvf, &cptvf->cqinfo);
309}
310
311static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
312{
313 cleanup_worker_threads(cptvf);
314 cleanup_pending_queues(cptvf);
315 cleanup_command_queues(cptvf);
316}
317
318static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
319{
320 struct pci_dev *pdev = cptvf->pdev;
321 int ret = 0;
322 u32 max_dev_queues = 0;
323
324 max_dev_queues = CPT_NUM_QS_PER_VF;
325 /* possible cpus */
326 nr_queues = min_t(u32, nr_queues, max_dev_queues);
327 cptvf->nr_queues = nr_queues;
328
329 ret = init_command_queues(cptvf, qlen);
330 if (ret) {
331 dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
332 nr_queues);
333 return ret;
334 }
335
336 ret = init_pending_queues(cptvf, qlen, nr_queues);
337 if (ret) {
338 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
339 nr_queues);
340 goto setup_pqfail;
341 }
342
343 /* Create worker threads for BH processing */
344 ret = init_worker_threads(cptvf);
345 if (ret) {
346 dev_err(&pdev->dev, "Failed to setup worker threads\n");
347 goto init_work_fail;
348 }
349
350 return 0;
351
352init_work_fail:
353 cleanup_worker_threads(cptvf);
354 cleanup_pending_queues(cptvf);
355
356setup_pqfail:
357 cleanup_command_queues(cptvf);
358
359 return ret;
360}
361
15c0b9ed 362static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
c694b233 363{
15c0b9ed
CH
364 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
365 free_cpumask_var(cptvf->affinity_mask[vec]);
c694b233
GC
366}
367
368static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
369{
370 union cptx_vqx_ctl vqx_ctl;
371
372 vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
373 vqx_ctl.s.ena = val;
374 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
375}
376
377void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
378{
379 union cptx_vqx_doorbell vqx_dbell;
380
381 vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
382 CPTX_VQX_DOORBELL(0, 0));
383 vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
384 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
385 vqx_dbell.u);
386}
387
388static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
389{
390 union cptx_vqx_inprog vqx_inprg;
391
392 vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
393 vqx_inprg.s.inflight = val;
394 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
395}
396
397static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
398{
399 union cptx_vqx_done_wait vqx_dwait;
400
401 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
402 CPTX_VQX_DONE_WAIT(0, 0));
403 vqx_dwait.s.num_wait = val;
404 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
405 vqx_dwait.u);
406}
407
408static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
409{
410 union cptx_vqx_done_wait vqx_dwait;
411
412 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
413 CPTX_VQX_DONE_WAIT(0, 0));
414 vqx_dwait.s.time_wait = time;
415 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
416 vqx_dwait.u);
417}
418
419static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
420{
421 union cptx_vqx_misc_ena_w1s vqx_misc_ena;
422
423 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
424 CPTX_VQX_MISC_ENA_W1S(0, 0));
425 /* Set mbox(0) interupts for the requested vf */
426 vqx_misc_ena.s.swerr = 1;
427 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
428 vqx_misc_ena.u);
429}
430
431static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
432{
433 union cptx_vqx_misc_ena_w1s vqx_misc_ena;
434
435 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
436 CPTX_VQX_MISC_ENA_W1S(0, 0));
437 /* Set mbox(0) interupts for the requested vf */
438 vqx_misc_ena.s.mbox = 1;
439 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
440 vqx_misc_ena.u);
441}
442
443static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
444{
445 union cptx_vqx_done_ena_w1s vqx_done_ena;
446
447 vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
448 CPTX_VQX_DONE_ENA_W1S(0, 0));
449 /* Set DONE interrupt for the requested vf */
450 vqx_done_ena.s.done = 1;
451 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
452 vqx_done_ena.u);
453}
454
455static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
456{
457 union cptx_vqx_misc_int vqx_misc_int;
458
459 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
460 CPTX_VQX_MISC_INT(0, 0));
461 /* W1C for the VF */
462 vqx_misc_int.s.dovf = 1;
463 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
464 vqx_misc_int.u);
465}
466
467static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
468{
469 union cptx_vqx_misc_int vqx_misc_int;
470
471 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
472 CPTX_VQX_MISC_INT(0, 0));
473 /* W1C for the VF */
474 vqx_misc_int.s.irde = 1;
475 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
476 vqx_misc_int.u);
477}
478
479static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
480{
481 union cptx_vqx_misc_int vqx_misc_int;
482
483 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
484 CPTX_VQX_MISC_INT(0, 0));
485 /* W1C for the VF */
486 vqx_misc_int.s.nwrp = 1;
487 cpt_write_csr64(cptvf->reg_base,
488 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
489}
490
491static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
492{
493 union cptx_vqx_misc_int vqx_misc_int;
494
495 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
496 CPTX_VQX_MISC_INT(0, 0));
497 /* W1C for the VF */
498 vqx_misc_int.s.mbox = 1;
499 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
500 vqx_misc_int.u);
501}
502
503static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
504{
505 union cptx_vqx_misc_int vqx_misc_int;
506
507 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
508 CPTX_VQX_MISC_INT(0, 0));
509 /* W1C for the VF */
510 vqx_misc_int.s.swerr = 1;
511 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
512 vqx_misc_int.u);
513}
514
515static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
516{
517 return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
518}
519
520static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
521{
522 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
523 struct pci_dev *pdev = cptvf->pdev;
524 u64 intr;
525
526 intr = cptvf_read_vf_misc_intr_status(cptvf);
527 /*Check for MISC interrupt types*/
528 if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
cc53e92a 529 dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
c694b233
GC
530 intr, cptvf->vfid);
531 cptvf_handle_mbox_intr(cptvf);
532 cptvf_clear_mbox_intr(cptvf);
533 } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
534 cptvf_clear_dovf_intr(cptvf);
535 /*Clear doorbell count*/
536 cptvf_write_vq_doorbell(cptvf, 0);
537 dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
538 intr, cptvf->vfid);
539 } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
540 cptvf_clear_irde_intr(cptvf);
541 dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
542 intr, cptvf->vfid);
543 } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
544 cptvf_clear_nwrp_intr(cptvf);
545 dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
546 intr, cptvf->vfid);
547 } else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
548 cptvf_clear_swerr_intr(cptvf);
549 dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
550 intr, cptvf->vfid);
551 } else {
552 dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
553 cptvf->vfid);
554 }
555
556 return IRQ_HANDLED;
557}
558
559static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
560 int qno)
561{
562 struct cptvf_wqe_info *nwqe_info;
563
564 if (unlikely(qno >= cptvf->nr_queues))
565 return NULL;
566 nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
567
568 return &nwqe_info->vq_wqe[qno];
569}
570
571static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
572{
573 union cptx_vqx_done vqx_done;
574
575 vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
576 return vqx_done.s.done;
577}
578
579static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
580 u32 ackcnt)
581{
582 union cptx_vqx_done_ack vqx_dack_cnt;
583
584 vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
585 CPTX_VQX_DONE_ACK(0, 0));
586 vqx_dack_cnt.s.done_ack = ackcnt;
587 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
588 vqx_dack_cnt.u);
589}
590
591static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
592{
593 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
594 struct pci_dev *pdev = cptvf->pdev;
595 /* Read the number of completions */
596 u32 intr = cptvf_read_vq_done_count(cptvf);
597
598 if (intr) {
599 struct cptvf_wqe *wqe;
600
601 /* Acknowledge the number of
602 * scheduled completions for processing
603 */
604 cptvf_write_vq_done_ack(cptvf, intr);
605 wqe = get_cptvf_vq_wqe(cptvf, 0);
606 if (unlikely(!wqe)) {
607 dev_err(&pdev->dev, "No work to schedule for VF (%d)",
608 cptvf->vfid);
609 return IRQ_NONE;
610 }
611 tasklet_hi_schedule(&wqe->twork);
612 }
613
614 return IRQ_HANDLED;
615}
616
15c0b9ed 617static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
c694b233
GC
618{
619 struct pci_dev *pdev = cptvf->pdev;
15c0b9ed 620 int cpu;
c694b233 621
15c0b9ed
CH
622 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
623 GFP_KERNEL)) {
624 dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
625 cptvf->vfid);
626 return;
c694b233 627 }
15c0b9ed
CH
628
629 cpu = cptvf->vfid % num_online_cpus();
630 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
631 cptvf->affinity_mask[vec]);
632 irq_set_affinity_hint(pci_irq_vector(pdev, vec),
633 cptvf->affinity_mask[vec]);
c694b233
GC
634}
635
636static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
637{
638 union cptx_vqx_saddr vqx_saddr;
639
640 vqx_saddr.u = val;
641 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
642}
643
644void cptvf_device_init(struct cpt_vf *cptvf)
645{
646 u64 base_addr = 0;
647
648 /* Disable the VQ */
649 cptvf_write_vq_ctl(cptvf, 0);
650 /* Reset the doorbell */
651 cptvf_write_vq_doorbell(cptvf, 0);
652 /* Clear inflight */
653 cptvf_write_vq_inprog(cptvf, 0);
654 /* Write VQ SADDR */
655 /* TODO: for now only one queue, so hard coded */
656 base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
657 cptvf_write_vq_saddr(cptvf, base_addr);
658 /* Configure timerhold / coalescence */
659 cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
660 cptvf_write_vq_done_numwait(cptvf, 1);
661 /* Enable the VQ */
662 cptvf_write_vq_ctl(cptvf, 1);
663 /* Flag the VF ready */
664 cptvf->flags |= CPT_FLAG_DEVICE_READY;
665}
666
667static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
668{
669 struct device *dev = &pdev->dev;
670 struct cpt_vf *cptvf;
671 int err;
672
673 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
674 if (!cptvf)
675 return -ENOMEM;
676
677 pci_set_drvdata(pdev, cptvf);
678 cptvf->pdev = pdev;
679 err = pci_enable_device(pdev);
680 if (err) {
681 dev_err(dev, "Failed to enable PCI device\n");
682 pci_set_drvdata(pdev, NULL);
683 return err;
684 }
685
686 err = pci_request_regions(pdev, DRV_NAME);
687 if (err) {
688 dev_err(dev, "PCI request regions failed 0x%x\n", err);
689 goto cptvf_err_disable_device;
690 }
691 /* Mark as VF driver */
692 cptvf->flags |= CPT_FLAG_VF_DRIVER;
693 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
694 if (err) {
695 dev_err(dev, "Unable to get usable DMA configuration\n");
696 goto cptvf_err_release_regions;
697 }
698
699 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
700 if (err) {
701 dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
702 goto cptvf_err_release_regions;
703 }
704
705 /* MAP PF's configuration registers */
706 cptvf->reg_base = pcim_iomap(pdev, 0, 0);
707 if (!cptvf->reg_base) {
708 dev_err(dev, "Cannot map config register space, aborting\n");
709 err = -ENOMEM;
710 goto cptvf_err_release_regions;
711 }
712
713 cptvf->node = dev_to_node(&pdev->dev);
15c0b9ed
CH
714 err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
715 CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
716 if (err < 0) {
717 dev_err(dev, "Request for #%d msix vectors failed\n",
718 CPT_VF_MSIX_VECTORS);
c694b233
GC
719 goto cptvf_err_release_regions;
720 }
721
15c0b9ed
CH
722 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
723 cptvf_misc_intr_handler, 0, "CPT VF misc intr",
724 cptvf);
725 if (err) {
726 dev_err(dev, "Request misc irq failed");
727 goto cptvf_free_vectors;
728 }
729
730 /* Enable mailbox interrupt */
731 cptvf_enable_mbox_interrupts(cptvf);
732 cptvf_enable_swerr_interrupts(cptvf);
c694b233
GC
733
734 /* Check ready with PF */
735 /* Gets chip ID / device Id from PF if ready */
736 err = cptvf_check_pf_ready(cptvf);
737 if (err) {
738 dev_err(dev, "PF not responding to READY msg");
15c0b9ed 739 goto cptvf_free_misc_irq;
c694b233
GC
740 }
741
742 /* CPT VF software resources initialization */
743 cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
744 err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
745 if (err) {
746 dev_err(dev, "cptvf_sw_init() failed");
15c0b9ed 747 goto cptvf_free_misc_irq;
c694b233
GC
748 }
749 /* Convey VQ LEN to PF */
750 err = cptvf_send_vq_size_msg(cptvf);
751 if (err) {
752 dev_err(dev, "PF not responding to QLEN msg");
15c0b9ed 753 goto cptvf_free_misc_irq;
c694b233
GC
754 }
755
756 /* CPT VF device initialization */
757 cptvf_device_init(cptvf);
758 /* Send msg to PF to assign currnet Q to required group */
759 cptvf->vfgrp = 1;
760 err = cptvf_send_vf_to_grp_msg(cptvf);
761 if (err) {
762 dev_err(dev, "PF not responding to VF_GRP msg");
15c0b9ed 763 goto cptvf_free_misc_irq;
c694b233
GC
764 }
765
766 cptvf->priority = 1;
767 err = cptvf_send_vf_priority_msg(cptvf);
768 if (err) {
769 dev_err(dev, "PF not responding to VF_PRIO msg");
15c0b9ed
CH
770 goto cptvf_free_misc_irq;
771 }
772
773 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
774 cptvf_done_intr_handler, 0, "CPT VF done intr",
775 cptvf);
776 if (err) {
777 dev_err(dev, "Request done irq failed\n");
778 goto cptvf_free_misc_irq;
c694b233 779 }
15c0b9ed
CH
780
781 /* Enable mailbox interrupt */
782 cptvf_enable_done_interrupts(cptvf);
c694b233
GC
783
784 /* Set irq affinity masks */
15c0b9ed
CH
785 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
786 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
787
c694b233
GC
788 err = cptvf_send_vf_up(cptvf);
789 if (err) {
790 dev_err(dev, "PF not responding to UP msg");
15c0b9ed 791 goto cptvf_free_irq_affinity;
c694b233
GC
792 }
793 err = cvm_crypto_init(cptvf);
794 if (err) {
795 dev_err(dev, "Algorithm register failed\n");
15c0b9ed 796 goto cptvf_free_irq_affinity;
c694b233
GC
797 }
798 return 0;
799
15c0b9ed
CH
800cptvf_free_irq_affinity:
801 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
802 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
803cptvf_free_misc_irq:
804 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
805cptvf_free_vectors:
806 pci_free_irq_vectors(cptvf->pdev);
c694b233
GC
807cptvf_err_release_regions:
808 pci_release_regions(pdev);
809cptvf_err_disable_device:
810 pci_disable_device(pdev);
811 pci_set_drvdata(pdev, NULL);
812
813 return err;
814}
815
816static void cptvf_remove(struct pci_dev *pdev)
817{
818 struct cpt_vf *cptvf = pci_get_drvdata(pdev);
819
9bd82904 820 if (!cptvf) {
c694b233 821 dev_err(&pdev->dev, "Invalid CPT-VF device\n");
9bd82904
GC
822 return;
823 }
c694b233
GC
824
825 /* Convey DOWN to PF */
826 if (cptvf_send_vf_down(cptvf)) {
827 dev_err(&pdev->dev, "PF not responding to DOWN msg");
828 } else {
15c0b9ed
CH
829 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
830 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
831 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
832 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
833 pci_free_irq_vectors(cptvf->pdev);
c694b233
GC
834 cptvf_sw_cleanup(cptvf);
835 pci_set_drvdata(pdev, NULL);
836 pci_release_regions(pdev);
837 pci_disable_device(pdev);
838 cvm_crypto_exit();
839 }
840}
841
842static void cptvf_shutdown(struct pci_dev *pdev)
843{
844 cptvf_remove(pdev);
845}
846
847/* Supported devices */
848static const struct pci_device_id cptvf_id_table[] = {
849 {PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
850 { 0, } /* end of table */
851};
852
853static struct pci_driver cptvf_pci_driver = {
854 .name = DRV_NAME,
855 .id_table = cptvf_id_table,
856 .probe = cptvf_probe,
857 .remove = cptvf_remove,
858 .shutdown = cptvf_shutdown,
859};
860
861module_pci_driver(cptvf_pci_driver);
862
863MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
864MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
865MODULE_LICENSE("GPL v2");
866MODULE_VERSION(DRV_VERSION);
867MODULE_DEVICE_TABLE(pci, cptvf_id_table);