2 * CAAM/SEC 4.x QI transport/backend driver
3 * Queue Interface backend functionality
5 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP
9 #include <linux/cpumask.h>
10 #include <linux/kthread.h>
11 #include <soc/fsl/qman.h>
17 #include "desc_constr.h"
19 #define PREHDR_RSLS_SHIFT 31
22 * Use a reasonable backlog of frames (per CPU) as congestion threshold,
23 * so that resources used by the in-flight buffers do not become a memory hog.
25 #define MAX_RSP_FQ_BACKLOG_PER_CPU 256
27 /* Length of a single buffer in the QI driver memory cache */
28 #define CAAM_QI_MEMCACHE_SIZE 512
30 #define CAAM_QI_ENQUEUE_RETRIES 10000
32 #define CAAM_NAPI_WEIGHT 63
35 * caam_napi - struct holding CAAM NAPI-related params
36 * @irqtask: IRQ task for QI backend
40 struct napi_struct irqtask
;
41 struct qman_portal
*p
;
45 * caam_qi_pcpu_priv - percpu private data structure to main list of pending
46 * responses expected on each cpu.
47 * @caam_napi: CAAM NAPI params
48 * @net_dev: netdev used by NAPI
49 * @rsp_fq: response FQ from CAAM
51 struct caam_qi_pcpu_priv
{
52 struct caam_napi caam_napi
;
53 struct net_device net_dev
;
54 struct qman_fq
*rsp_fq
;
55 } ____cacheline_aligned
;
57 static DEFINE_PER_CPU(struct caam_qi_pcpu_priv
, pcpu_qipriv
);
58 static DEFINE_PER_CPU(int, last_cpu
);
61 * caam_qi_priv - CAAM QI backend private params
62 * @cgr: QMan congestion group
63 * @qi_pdev: platform device for QI backend
67 struct platform_device
*qi_pdev
;
70 static struct caam_qi_priv qipriv ____cacheline_aligned
;
73 * This is written by only one core - the one that initialized the CGR - and
74 * read by multiple cores (all the others).
76 bool caam_congested __read_mostly
;
77 EXPORT_SYMBOL(caam_congested
);
79 #ifdef CONFIG_DEBUG_FS
81 * This is a counter for the number of times the congestion group (where all
82 * the request and response queueus are) reached congestion. Incremented
83 * each time the congestion callback is called with congested == true.
85 static u64 times_congested
;
89 * CPU from where the module initialised. This is required because QMan driver
90 * requires CGRs to be removed from same CPU from where they were originally
93 static int mod_init_cpu
;
96 * This is a a cache of buffers, from which the users of CAAM QI driver
97 * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
98 * doing malloc on the hotpath.
99 * NOTE: A more elegant solution would be to have some headroom in the frames
100 * being processed. This could be added by the dpaa-ethernet driver.
101 * This would pose a problem for userspace application processing which
102 * cannot know of this limitation. So for now, this will work.
103 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
105 static struct kmem_cache
*qi_cache
;
107 int caam_qi_enqueue(struct device
*qidev
, struct caam_drv_req
*req
)
115 qm_fd_set_compound(&fd
, qm_sg_entry_get_len(&req
->fd_sgt
[1]));
117 addr
= dma_map_single(qidev
, req
->fd_sgt
, sizeof(req
->fd_sgt
),
119 if (dma_mapping_error(qidev
, addr
)) {
120 dev_err(qidev
, "DMA mapping error for QI enqueue request\n");
123 qm_fd_addr_set64(&fd
, addr
);
126 ret
= qman_enqueue(req
->drv_ctx
->req_fq
, &fd
);
133 } while (num_retries
< CAAM_QI_ENQUEUE_RETRIES
);
135 dev_err(qidev
, "qman_enqueue failed: %d\n", ret
);
139 EXPORT_SYMBOL(caam_qi_enqueue
);
141 static void caam_fq_ern_cb(struct qman_portal
*qm
, struct qman_fq
*fq
,
142 const union qm_mr_entry
*msg
)
144 const struct qm_fd
*fd
;
145 struct caam_drv_req
*drv_req
;
146 struct device
*qidev
= &(raw_cpu_ptr(&pcpu_qipriv
)->net_dev
.dev
);
150 if (qm_fd_get_format(fd
) != qm_fd_compound
) {
151 dev_err(qidev
, "Non-compound FD from CAAM\n");
155 drv_req
= (struct caam_drv_req
*)phys_to_virt(qm_fd_addr_get64(fd
));
158 "Can't find original request for CAAM response\n");
162 dma_unmap_single(drv_req
->drv_ctx
->qidev
, qm_fd_addr(fd
),
163 sizeof(drv_req
->fd_sgt
), DMA_BIDIRECTIONAL
);
165 drv_req
->cbk(drv_req
, -EIO
);
168 static struct qman_fq
*create_caam_req_fq(struct device
*qidev
,
169 struct qman_fq
*rsp_fq
,
174 struct qman_fq
*req_fq
;
175 struct qm_mcc_initfq opts
;
177 req_fq
= kzalloc(sizeof(*req_fq
), GFP_ATOMIC
);
179 return ERR_PTR(-ENOMEM
);
181 req_fq
->cb
.ern
= caam_fq_ern_cb
;
182 req_fq
->cb
.fqs
= NULL
;
184 ret
= qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID
|
185 QMAN_FQ_FLAG_TO_DCPORTAL
, req_fq
);
187 dev_err(qidev
, "Failed to create session req FQ\n");
188 goto create_req_fq_fail
;
191 memset(&opts
, 0, sizeof(opts
));
192 opts
.we_mask
= cpu_to_be16(QM_INITFQ_WE_FQCTRL
| QM_INITFQ_WE_DESTWQ
|
193 QM_INITFQ_WE_CONTEXTB
|
194 QM_INITFQ_WE_CONTEXTA
| QM_INITFQ_WE_CGID
);
195 opts
.fqd
.fq_ctrl
= cpu_to_be16(QM_FQCTRL_CPCSTASH
| QM_FQCTRL_CGE
);
196 qm_fqd_set_destwq(&opts
.fqd
, qm_channel_caam
, 2);
197 opts
.fqd
.context_b
= cpu_to_be32(qman_fq_fqid(rsp_fq
));
198 qm_fqd_context_a_set64(&opts
.fqd
, hwdesc
);
199 opts
.fqd
.cgid
= qipriv
.cgr
.cgrid
;
201 ret
= qman_init_fq(req_fq
, fq_sched_flag
, &opts
);
203 dev_err(qidev
, "Failed to init session req FQ\n");
204 goto init_req_fq_fail
;
207 dev_info(qidev
, "Allocated request FQ %u for CPU %u\n", req_fq
->fqid
,
212 qman_destroy_fq(req_fq
);
218 static int empty_retired_fq(struct device
*qidev
, struct qman_fq
*fq
)
222 ret
= qman_volatile_dequeue(fq
, QMAN_VOLATILE_FLAG_WAIT_INT
|
223 QMAN_VOLATILE_FLAG_FINISH
,
224 QM_VDQCR_PRECEDENCE_VDQCR
|
225 QM_VDQCR_NUMFRAMES_TILLEMPTY
);
227 dev_err(qidev
, "Volatile dequeue fail for FQ: %u\n", fq
->fqid
);
232 struct qman_portal
*p
;
234 p
= qman_get_affine_portal(smp_processor_id());
235 qman_p_poll_dqrr(p
, 16);
236 } while (fq
->flags
& QMAN_FQ_STATE_NE
);
241 static int kill_fq(struct device
*qidev
, struct qman_fq
*fq
)
246 ret
= qman_retire_fq(fq
, &flags
);
248 dev_err(qidev
, "qman_retire_fq failed: %d\n", ret
);
255 /* Async FQ retirement condition */
257 /* Retry till FQ gets in retired state */
260 } while (fq
->state
!= qman_fq_state_retired
);
262 WARN_ON(fq
->flags
& QMAN_FQ_STATE_BLOCKOOS
);
263 WARN_ON(fq
->flags
& QMAN_FQ_STATE_ORL
);
267 if (fq
->flags
& QMAN_FQ_STATE_NE
) {
268 ret
= empty_retired_fq(qidev
, fq
);
270 dev_err(qidev
, "empty_retired_fq fail for FQ: %u\n",
276 ret
= qman_oos_fq(fq
);
278 dev_err(qidev
, "OOS of FQID: %u failed\n", fq
->fqid
);
285 static int empty_caam_fq(struct qman_fq
*fq
)
288 struct qm_mcr_queryfq_np np
;
290 /* Wait till the older CAAM FQ get empty */
292 ret
= qman_query_fq_np(fq
, &np
);
296 if (!qm_mcr_np_get(&np
, frm_cnt
))
303 * Give extra time for pending jobs from this FQ in holding tanks
310 int caam_drv_ctx_update(struct caam_drv_ctx
*drv_ctx
, u32
*sh_desc
)
314 struct qman_fq
*new_fq
, *old_fq
;
315 struct device
*qidev
= drv_ctx
->qidev
;
317 num_words
= desc_len(sh_desc
);
318 if (num_words
> MAX_SDLEN
) {
319 dev_err(qidev
, "Invalid descriptor len: %d words\n", num_words
);
323 /* Note down older req FQ */
324 old_fq
= drv_ctx
->req_fq
;
326 /* Create a new req FQ in parked state */
327 new_fq
= create_caam_req_fq(drv_ctx
->qidev
, drv_ctx
->rsp_fq
,
328 drv_ctx
->context_a
, 0);
329 if (unlikely(IS_ERR_OR_NULL(new_fq
))) {
330 dev_err(qidev
, "FQ allocation for shdesc update failed\n");
331 return PTR_ERR(new_fq
);
334 /* Hook up new FQ to context so that new requests keep queuing */
335 drv_ctx
->req_fq
= new_fq
;
337 /* Empty and remove the older FQ */
338 ret
= empty_caam_fq(old_fq
);
340 dev_err(qidev
, "Old CAAM FQ empty failed: %d\n", ret
);
342 /* We can revert to older FQ */
343 drv_ctx
->req_fq
= old_fq
;
345 if (kill_fq(qidev
, new_fq
))
346 dev_warn(qidev
, "New CAAM FQ: %u kill failed\n",
353 * Re-initialise pre-header. Set RSLS and SDLEN.
354 * Update the shared descriptor for driver context.
356 drv_ctx
->prehdr
[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT
) |
358 memcpy(drv_ctx
->sh_desc
, sh_desc
, desc_bytes(sh_desc
));
359 dma_sync_single_for_device(qidev
, drv_ctx
->context_a
,
360 sizeof(drv_ctx
->sh_desc
) +
361 sizeof(drv_ctx
->prehdr
),
364 /* Put the new FQ in scheduled state */
365 ret
= qman_schedule_fq(new_fq
);
367 dev_err(qidev
, "Fail to sched new CAAM FQ, ecode = %d\n", ret
);
370 * We can kill new FQ and revert to old FQ.
371 * Since the desc is already modified, it is success case
374 drv_ctx
->req_fq
= old_fq
;
376 if (kill_fq(qidev
, new_fq
))
377 dev_warn(qidev
, "New CAAM FQ: %u kill failed\n",
379 } else if (kill_fq(qidev
, old_fq
)) {
380 dev_warn(qidev
, "Old CAAM FQ: %u kill failed\n", old_fq
->fqid
);
385 EXPORT_SYMBOL(caam_drv_ctx_update
);
387 struct caam_drv_ctx
*caam_drv_ctx_init(struct device
*qidev
,
394 struct caam_drv_ctx
*drv_ctx
;
395 const cpumask_t
*cpus
= qman_affine_cpus();
397 num_words
= desc_len(sh_desc
);
398 if (num_words
> MAX_SDLEN
) {
399 dev_err(qidev
, "Invalid descriptor len: %d words\n",
401 return ERR_PTR(-EINVAL
);
404 drv_ctx
= kzalloc(sizeof(*drv_ctx
), GFP_ATOMIC
);
406 return ERR_PTR(-ENOMEM
);
409 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
412 drv_ctx
->prehdr
[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT
) |
414 memcpy(drv_ctx
->sh_desc
, sh_desc
, desc_bytes(sh_desc
));
415 size
= sizeof(drv_ctx
->prehdr
) + sizeof(drv_ctx
->sh_desc
);
416 hwdesc
= dma_map_single(qidev
, drv_ctx
->prehdr
, size
,
418 if (dma_mapping_error(qidev
, hwdesc
)) {
419 dev_err(qidev
, "DMA map error for preheader + shdesc\n");
421 return ERR_PTR(-ENOMEM
);
423 drv_ctx
->context_a
= hwdesc
;
425 /* If given CPU does not own the portal, choose another one that does */
426 if (!cpumask_test_cpu(*cpu
, cpus
)) {
427 int *pcpu
= &get_cpu_var(last_cpu
);
429 *pcpu
= cpumask_next(*pcpu
, cpus
);
430 if (*pcpu
>= nr_cpu_ids
)
431 *pcpu
= cpumask_first(cpus
);
434 put_cpu_var(last_cpu
);
438 /* Find response FQ hooked with this CPU */
439 drv_ctx
->rsp_fq
= per_cpu(pcpu_qipriv
.rsp_fq
, drv_ctx
->cpu
);
441 /* Attach request FQ */
442 drv_ctx
->req_fq
= create_caam_req_fq(qidev
, drv_ctx
->rsp_fq
, hwdesc
,
443 QMAN_INITFQ_FLAG_SCHED
);
444 if (unlikely(IS_ERR_OR_NULL(drv_ctx
->req_fq
))) {
445 dev_err(qidev
, "create_caam_req_fq failed\n");
446 dma_unmap_single(qidev
, hwdesc
, size
, DMA_BIDIRECTIONAL
);
448 return ERR_PTR(-ENOMEM
);
451 drv_ctx
->qidev
= qidev
;
454 EXPORT_SYMBOL(caam_drv_ctx_init
);
456 void *qi_cache_alloc(gfp_t flags
)
458 return kmem_cache_alloc(qi_cache
, flags
);
460 EXPORT_SYMBOL(qi_cache_alloc
);
462 void qi_cache_free(void *obj
)
464 kmem_cache_free(qi_cache
, obj
);
466 EXPORT_SYMBOL(qi_cache_free
);
468 static int caam_qi_poll(struct napi_struct
*napi
, int budget
)
470 struct caam_napi
*np
= container_of(napi
, struct caam_napi
, irqtask
);
472 int cleaned
= qman_p_poll_dqrr(np
->p
, budget
);
474 if (cleaned
< budget
) {
476 qman_p_irqsource_add(np
->p
, QM_PIRQ_DQRI
);
482 void caam_drv_ctx_rel(struct caam_drv_ctx
*drv_ctx
)
484 if (IS_ERR_OR_NULL(drv_ctx
))
487 /* Remove request FQ */
488 if (kill_fq(drv_ctx
->qidev
, drv_ctx
->req_fq
))
489 dev_err(drv_ctx
->qidev
, "Crypto session req FQ kill failed\n");
491 dma_unmap_single(drv_ctx
->qidev
, drv_ctx
->context_a
,
492 sizeof(drv_ctx
->sh_desc
) + sizeof(drv_ctx
->prehdr
),
496 EXPORT_SYMBOL(caam_drv_ctx_rel
);
498 int caam_qi_shutdown(struct device
*qidev
)
501 struct caam_qi_priv
*priv
= dev_get_drvdata(qidev
);
502 const cpumask_t
*cpus
= qman_affine_cpus();
503 struct cpumask old_cpumask
= current
->cpus_allowed
;
505 for_each_cpu(i
, cpus
) {
506 struct napi_struct
*irqtask
;
508 irqtask
= &per_cpu_ptr(&pcpu_qipriv
.caam_napi
, i
)->irqtask
;
509 napi_disable(irqtask
);
510 netif_napi_del(irqtask
);
512 if (kill_fq(qidev
, per_cpu(pcpu_qipriv
.rsp_fq
, i
)))
513 dev_err(qidev
, "Rsp FQ kill failed, cpu: %d\n", i
);
514 kfree(per_cpu(pcpu_qipriv
.rsp_fq
, i
));
518 * QMan driver requires CGRs to be deleted from same CPU from where they
519 * were instantiated. Hence we get the module removal execute from the
520 * same CPU from where it was originally inserted.
522 set_cpus_allowed_ptr(current
, get_cpu_mask(mod_init_cpu
));
524 ret
= qman_delete_cgr(&priv
->cgr
);
526 dev_err(qidev
, "Deletion of CGR failed: %d\n", ret
);
528 qman_release_cgrid(priv
->cgr
.cgrid
);
530 kmem_cache_destroy(qi_cache
);
532 /* Now that we're done with the CGRs, restore the cpus allowed mask */
533 set_cpus_allowed_ptr(current
, &old_cpumask
);
535 platform_device_unregister(priv
->qi_pdev
);
539 static void cgr_cb(struct qman_portal
*qm
, struct qman_cgr
*cgr
, int congested
)
541 caam_congested
= congested
;
544 #ifdef CONFIG_DEBUG_FS
547 pr_debug_ratelimited("CAAM entered congestion\n");
550 pr_debug_ratelimited("CAAM exited congestion\n");
554 static int caam_qi_napi_schedule(struct qman_portal
*p
, struct caam_napi
*np
)
557 * In case of threaded ISR, for RT kernels in_irq() does not return
558 * appropriate value, so use in_serving_softirq to distinguish between
559 * softirq and irq contexts.
561 if (unlikely(in_irq() || !in_serving_softirq())) {
562 /* Disable QMan IRQ source and invoke NAPI */
563 qman_p_irqsource_remove(p
, QM_PIRQ_DQRI
);
565 napi_schedule(&np
->irqtask
);
571 static enum qman_cb_dqrr_result
caam_rsp_fq_dqrr_cb(struct qman_portal
*p
,
572 struct qman_fq
*rsp_fq
,
573 const struct qm_dqrr_entry
*dqrr
)
575 struct caam_napi
*caam_napi
= raw_cpu_ptr(&pcpu_qipriv
.caam_napi
);
576 struct caam_drv_req
*drv_req
;
577 const struct qm_fd
*fd
;
578 struct device
*qidev
= &(raw_cpu_ptr(&pcpu_qipriv
)->net_dev
.dev
);
581 if (caam_qi_napi_schedule(p
, caam_napi
))
582 return qman_cb_dqrr_stop
;
585 status
= be32_to_cpu(fd
->status
);
586 if (unlikely(status
))
587 dev_err(qidev
, "Error: %#x in CAAM response FD\n", status
);
589 if (unlikely(qm_fd_get_format(fd
) != qm_fd_compound
)) {
590 dev_err(qidev
, "Non-compound FD from CAAM\n");
591 return qman_cb_dqrr_consume
;
594 drv_req
= (struct caam_drv_req
*)phys_to_virt(qm_fd_addr_get64(fd
));
595 if (unlikely(!drv_req
)) {
597 "Can't find original request for caam response\n");
598 return qman_cb_dqrr_consume
;
601 dma_unmap_single(drv_req
->drv_ctx
->qidev
, qm_fd_addr(fd
),
602 sizeof(drv_req
->fd_sgt
), DMA_BIDIRECTIONAL
);
604 drv_req
->cbk(drv_req
, status
);
605 return qman_cb_dqrr_consume
;
608 static int alloc_rsp_fq_cpu(struct device
*qidev
, unsigned int cpu
)
610 struct qm_mcc_initfq opts
;
614 fq
= kzalloc(sizeof(*fq
), GFP_KERNEL
| GFP_DMA
);
618 fq
->cb
.dqrr
= caam_rsp_fq_dqrr_cb
;
620 ret
= qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE
|
621 QMAN_FQ_FLAG_DYNAMIC_FQID
, fq
);
623 dev_err(qidev
, "Rsp FQ create failed\n");
628 memset(&opts
, 0, sizeof(opts
));
629 opts
.we_mask
= cpu_to_be16(QM_INITFQ_WE_FQCTRL
| QM_INITFQ_WE_DESTWQ
|
630 QM_INITFQ_WE_CONTEXTB
|
631 QM_INITFQ_WE_CONTEXTA
| QM_INITFQ_WE_CGID
);
632 opts
.fqd
.fq_ctrl
= cpu_to_be16(QM_FQCTRL_CTXASTASHING
|
633 QM_FQCTRL_CPCSTASH
| QM_FQCTRL_CGE
);
634 qm_fqd_set_destwq(&opts
.fqd
, qman_affine_channel(cpu
), 3);
635 opts
.fqd
.cgid
= qipriv
.cgr
.cgrid
;
636 opts
.fqd
.context_a
.stashing
.exclusive
= QM_STASHING_EXCL_CTX
|
637 QM_STASHING_EXCL_DATA
;
638 qm_fqd_set_stashing(&opts
.fqd
, 0, 1, 1);
640 ret
= qman_init_fq(fq
, QMAN_INITFQ_FLAG_SCHED
, &opts
);
642 dev_err(qidev
, "Rsp FQ init failed\n");
647 per_cpu(pcpu_qipriv
.rsp_fq
, cpu
) = fq
;
649 dev_info(qidev
, "Allocated response FQ %u for CPU %u", fq
->fqid
, cpu
);
653 static int init_cgr(struct device
*qidev
)
656 struct qm_mcc_initcgr opts
;
657 const u64 cpus
= *(u64
*)qman_affine_cpus();
658 const int num_cpus
= hweight64(cpus
);
659 const u64 val
= num_cpus
* MAX_RSP_FQ_BACKLOG_PER_CPU
;
661 ret
= qman_alloc_cgrid(&qipriv
.cgr
.cgrid
);
663 dev_err(qidev
, "CGR alloc failed for rsp FQs: %d\n", ret
);
667 qipriv
.cgr
.cb
= cgr_cb
;
668 memset(&opts
, 0, sizeof(opts
));
669 opts
.we_mask
= cpu_to_be16(QM_CGR_WE_CSCN_EN
| QM_CGR_WE_CS_THRES
|
671 opts
.cgr
.cscn_en
= QM_CGR_EN
;
672 opts
.cgr
.mode
= QMAN_CGR_MODE_FRAME
;
673 qm_cgr_cs_thres_set64(&opts
.cgr
.cs_thres
, val
, 1);
675 ret
= qman_create_cgr(&qipriv
.cgr
, QMAN_CGR_FLAG_USE_INIT
, &opts
);
677 dev_err(qidev
, "Error %d creating CAAM CGRID: %u\n", ret
,
682 dev_info(qidev
, "Congestion threshold set to %llu\n", val
);
686 static int alloc_rsp_fqs(struct device
*qidev
)
689 const cpumask_t
*cpus
= qman_affine_cpus();
691 /*Now create response FQs*/
692 for_each_cpu(i
, cpus
) {
693 ret
= alloc_rsp_fq_cpu(qidev
, i
);
695 dev_err(qidev
, "CAAM rsp FQ alloc failed, cpu: %u", i
);
703 static void free_rsp_fqs(void)
706 const cpumask_t
*cpus
= qman_affine_cpus();
708 for_each_cpu(i
, cpus
)
709 kfree(per_cpu(pcpu_qipriv
.rsp_fq
, i
));
712 int caam_qi_init(struct platform_device
*caam_pdev
)
715 struct platform_device
*qi_pdev
;
716 struct device
*ctrldev
= &caam_pdev
->dev
, *qidev
;
717 struct caam_drv_private
*ctrlpriv
;
718 const cpumask_t
*cpus
= qman_affine_cpus();
719 struct cpumask old_cpumask
= current
->cpus_allowed
;
720 static struct platform_device_info qi_pdev_info
= {
722 .id
= PLATFORM_DEVID_NONE
726 * QMAN requires CGRs to be removed from same CPU+portal from where it
727 * was originally allocated. Hence we need to note down the
728 * initialisation CPU and use the same CPU for module exit.
729 * We select the first CPU to from the list of portal owning CPUs.
730 * Then we pin module init to this CPU.
732 mod_init_cpu
= cpumask_first(cpus
);
733 set_cpus_allowed_ptr(current
, get_cpu_mask(mod_init_cpu
));
735 qi_pdev_info
.parent
= ctrldev
;
736 qi_pdev_info
.dma_mask
= dma_get_mask(ctrldev
);
737 qi_pdev
= platform_device_register_full(&qi_pdev_info
);
739 return PTR_ERR(qi_pdev
);
741 ctrlpriv
= dev_get_drvdata(ctrldev
);
742 qidev
= &qi_pdev
->dev
;
744 qipriv
.qi_pdev
= qi_pdev
;
745 dev_set_drvdata(qidev
, &qipriv
);
747 /* Initialize the congestion detection */
748 err
= init_cgr(qidev
);
750 dev_err(qidev
, "CGR initialization failed: %d\n", err
);
751 platform_device_unregister(qi_pdev
);
755 /* Initialise response FQs */
756 err
= alloc_rsp_fqs(qidev
);
758 dev_err(qidev
, "Can't allocate CAAM response FQs: %d\n", err
);
760 platform_device_unregister(qi_pdev
);
765 * Enable the NAPI contexts on each of the core which has an affine
768 for_each_cpu(i
, cpus
) {
769 struct caam_qi_pcpu_priv
*priv
= per_cpu_ptr(&pcpu_qipriv
, i
);
770 struct caam_napi
*caam_napi
= &priv
->caam_napi
;
771 struct napi_struct
*irqtask
= &caam_napi
->irqtask
;
772 struct net_device
*net_dev
= &priv
->net_dev
;
774 net_dev
->dev
= *qidev
;
775 INIT_LIST_HEAD(&net_dev
->napi_list
);
777 netif_napi_add(net_dev
, irqtask
, caam_qi_poll
,
780 napi_enable(irqtask
);
783 /* Hook up QI device to parent controlling caam device */
784 ctrlpriv
->qidev
= qidev
;
786 qi_cache
= kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE
, 0,
787 SLAB_CACHE_DMA
, NULL
);
789 dev_err(qidev
, "Can't allocate CAAM cache\n");
791 platform_device_unregister(qi_pdev
);
795 /* Done with the CGRs; restore the cpus allowed mask */
796 set_cpus_allowed_ptr(current
, &old_cpumask
);
797 #ifdef CONFIG_DEBUG_FS
798 ctrlpriv
->qi_congested
= debugfs_create_file("qi_congested", 0444,
803 dev_info(qidev
, "Linux CAAM Queue I/F driver initialised\n");