1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/netdevice.h>
20 #include <linux/vmalloc.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28 #include "cn66xx_regs.h"
29 #include "cn66xx_device.h"
30 #include "cn23xx_pf_device.h"
31 #include "cn23xx_vf_device.h"
34 struct list_head list
;
39 struct list_head list
;
40 struct octeon_recv_info
*rinfo
;
41 octeon_dispatch_fn_t disp_fn
;
44 /** Get the argument that the user set when registering dispatch
45 * function for a given opcode/subcode.
46 * @param octeon_dev - the octeon device pointer.
47 * @param opcode - the opcode for which the dispatch argument
49 * @param subcode - the subcode for which the dispatch argument
51 * @return Success: void * (argument to the dispatch function)
52 * @return Failure: NULL
55 static inline void *octeon_get_dispatch_arg(struct octeon_device
*octeon_dev
,
56 u16 opcode
, u16 subcode
)
59 struct list_head
*dispatch
;
61 u16 combined_opcode
= OPCODE_SUBCODE(opcode
, subcode
);
63 idx
= combined_opcode
& OCTEON_OPCODE_MASK
;
65 spin_lock_bh(&octeon_dev
->dispatch
.lock
);
67 if (octeon_dev
->dispatch
.count
== 0) {
68 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
72 if (octeon_dev
->dispatch
.dlist
[idx
].opcode
== combined_opcode
) {
73 fn_arg
= octeon_dev
->dispatch
.dlist
[idx
].arg
;
75 list_for_each(dispatch
,
76 &octeon_dev
->dispatch
.dlist
[idx
].list
) {
77 if (((struct octeon_dispatch
*)dispatch
)->opcode
==
79 fn_arg
= ((struct octeon_dispatch
*)
86 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
90 /** Check for packets on Droq. This function should be called with lock held.
91 * @param droq - Droq on which count is checked.
92 * @return Returns packet count.
94 u32
octeon_droq_check_hw_for_pkts(struct octeon_droq
*droq
)
99 pkt_count
= readl(droq
->pkts_sent_reg
);
101 last_count
= pkt_count
- droq
->pkt_count
;
102 droq
->pkt_count
= pkt_count
;
104 /* we shall write to cnts at napi irq enable or end of droq tasklet */
106 atomic_add(last_count
, &droq
->pkts_pending
);
111 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq
*droq
)
115 /* max_empty_descs is the max. no. of descs that can have no buffers.
116 * If the empty desc count goes beyond this value, we cannot safely
117 * read in a 64K packet sent by Octeon
118 * (64K is max pkt size from Octeon)
120 droq
->max_empty_descs
= 0;
123 droq
->max_empty_descs
++;
124 count
+= droq
->buffer_size
;
125 } while (count
< (64 * 1024));
127 droq
->max_empty_descs
= droq
->max_count
- droq
->max_empty_descs
;
130 static void octeon_droq_reset_indices(struct octeon_droq
*droq
)
134 droq
->refill_idx
= 0;
135 droq
->refill_count
= 0;
136 atomic_set(&droq
->pkts_pending
, 0);
140 octeon_droq_destroy_ring_buffers(struct octeon_device
*oct
,
141 struct octeon_droq
*droq
)
144 struct octeon_skb_page_info
*pg_info
;
146 for (i
= 0; i
< droq
->max_count
; i
++) {
147 pg_info
= &droq
->recv_buf_list
[i
].pg_info
;
150 lio_unmap_ring(oct
->pci_dev
,
155 recv_buffer_destroy(droq
->recv_buf_list
[i
].buffer
,
158 if (droq
->desc_ring
&& droq
->desc_ring
[i
].info_ptr
)
159 lio_unmap_ring_info(oct
->pci_dev
,
161 desc_ring
[i
].info_ptr
,
163 droq
->recv_buf_list
[i
].buffer
= NULL
;
166 octeon_droq_reset_indices(droq
);
170 octeon_droq_setup_ring_buffers(struct octeon_device
*oct
,
171 struct octeon_droq
*droq
)
175 struct octeon_droq_desc
*desc_ring
= droq
->desc_ring
;
177 for (i
= 0; i
< droq
->max_count
; i
++) {
178 buf
= recv_buffer_alloc(oct
, &droq
->recv_buf_list
[i
].pg_info
);
181 dev_err(&oct
->pci_dev
->dev
, "%s buffer alloc failed\n",
183 droq
->stats
.rx_alloc_failure
++;
187 droq
->recv_buf_list
[i
].buffer
= buf
;
188 droq
->recv_buf_list
[i
].data
= get_rbd(buf
);
189 droq
->info_list
[i
].length
= 0;
191 /* map ring buffers into memory */
192 desc_ring
[i
].info_ptr
= lio_map_ring_info(droq
, i
);
193 desc_ring
[i
].buffer_ptr
=
194 lio_map_ring(droq
->recv_buf_list
[i
].buffer
);
197 octeon_droq_reset_indices(droq
);
199 octeon_droq_compute_max_packet_bufs(droq
);
204 int octeon_delete_droq(struct octeon_device
*oct
, u32 q_no
)
206 struct octeon_droq
*droq
= oct
->droq
[q_no
];
208 dev_dbg(&oct
->pci_dev
->dev
, "%s[%d]\n", __func__
, q_no
);
210 octeon_droq_destroy_ring_buffers(oct
, droq
);
211 vfree(droq
->recv_buf_list
);
213 if (droq
->info_base_addr
)
214 cnnic_free_aligned_dma(oct
->pci_dev
, droq
->info_list
,
215 droq
->info_alloc_size
,
216 droq
->info_base_addr
,
217 droq
->info_list_dma
);
220 lio_dma_free(oct
, (droq
->max_count
* OCT_DROQ_DESC_SIZE
),
221 droq
->desc_ring
, droq
->desc_ring_dma
);
223 memset(droq
, 0, OCT_DROQ_SIZE
);
228 int octeon_init_droq(struct octeon_device
*oct
,
234 struct octeon_droq
*droq
;
235 u32 desc_ring_size
= 0, c_num_descs
= 0, c_buf_size
= 0;
236 u32 c_pkts_per_intr
= 0, c_refill_threshold
= 0;
237 int orig_node
= dev_to_node(&oct
->pci_dev
->dev
);
238 int numa_node
= cpu_to_node(q_no
% num_online_cpus());
240 dev_dbg(&oct
->pci_dev
->dev
, "%s[%d]\n", __func__
, q_no
);
242 droq
= oct
->droq
[q_no
];
243 memset(droq
, 0, OCT_DROQ_SIZE
);
248 droq
->app_ctx
= app_ctx
;
250 droq
->app_ctx
= (void *)(size_t)q_no
;
252 c_num_descs
= num_descs
;
253 c_buf_size
= desc_size
;
254 if (OCTEON_CN6XXX(oct
)) {
255 struct octeon_config
*conf6x
= CHIP_CONF(oct
, cn6xxx
);
257 c_pkts_per_intr
= (u32
)CFG_GET_OQ_PKTS_PER_INTR(conf6x
);
259 (u32
)CFG_GET_OQ_REFILL_THRESHOLD(conf6x
);
260 } else if (OCTEON_CN23XX_PF(oct
)) {
261 struct octeon_config
*conf23
= CHIP_CONF(oct
, cn23xx_pf
);
263 c_pkts_per_intr
= (u32
)CFG_GET_OQ_PKTS_PER_INTR(conf23
);
264 c_refill_threshold
= (u32
)CFG_GET_OQ_REFILL_THRESHOLD(conf23
);
265 } else if (OCTEON_CN23XX_VF(oct
)) {
266 struct octeon_config
*conf23
= CHIP_CONF(oct
, cn23xx_vf
);
268 c_pkts_per_intr
= (u32
)CFG_GET_OQ_PKTS_PER_INTR(conf23
);
269 c_refill_threshold
= (u32
)CFG_GET_OQ_REFILL_THRESHOLD(conf23
);
274 droq
->max_count
= c_num_descs
;
275 droq
->buffer_size
= c_buf_size
;
277 desc_ring_size
= droq
->max_count
* OCT_DROQ_DESC_SIZE
;
278 set_dev_node(&oct
->pci_dev
->dev
, numa_node
);
279 droq
->desc_ring
= lio_dma_alloc(oct
, desc_ring_size
,
280 (dma_addr_t
*)&droq
->desc_ring_dma
);
281 set_dev_node(&oct
->pci_dev
->dev
, orig_node
);
282 if (!droq
->desc_ring
)
283 droq
->desc_ring
= lio_dma_alloc(oct
, desc_ring_size
,
284 (dma_addr_t
*)&droq
->desc_ring_dma
);
286 if (!droq
->desc_ring
) {
287 dev_err(&oct
->pci_dev
->dev
,
288 "Output queue %d ring alloc failed\n", q_no
);
292 dev_dbg(&oct
->pci_dev
->dev
, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
293 q_no
, droq
->desc_ring
, droq
->desc_ring_dma
);
294 dev_dbg(&oct
->pci_dev
->dev
, "droq[%d]: num_desc: %d\n", q_no
,
298 cnnic_numa_alloc_aligned_dma((droq
->max_count
*
300 &droq
->info_alloc_size
,
301 &droq
->info_base_addr
,
303 if (!droq
->info_list
) {
304 dev_err(&oct
->pci_dev
->dev
, "Cannot allocate memory for info list.\n");
305 lio_dma_free(oct
, (droq
->max_count
* OCT_DROQ_DESC_SIZE
),
306 droq
->desc_ring
, droq
->desc_ring_dma
);
310 droq
->recv_buf_list
= (struct octeon_recv_buffer
*)
311 vmalloc_node(droq
->max_count
*
312 OCT_DROQ_RECVBUF_SIZE
,
314 if (!droq
->recv_buf_list
)
315 droq
->recv_buf_list
= (struct octeon_recv_buffer
*)
316 vmalloc(droq
->max_count
*
317 OCT_DROQ_RECVBUF_SIZE
);
318 if (!droq
->recv_buf_list
) {
319 dev_err(&oct
->pci_dev
->dev
, "Output queue recv buf list alloc failed\n");
323 if (octeon_droq_setup_ring_buffers(oct
, droq
))
326 droq
->pkts_per_intr
= c_pkts_per_intr
;
327 droq
->refill_threshold
= c_refill_threshold
;
329 dev_dbg(&oct
->pci_dev
->dev
, "DROQ INIT: max_empty_descs: %d\n",
330 droq
->max_empty_descs
);
332 spin_lock_init(&droq
->lock
);
334 INIT_LIST_HEAD(&droq
->dispatch_list
);
336 /* For 56xx Pass1, this function won't be called, so no checks. */
337 oct
->fn_list
.setup_oq_regs(oct
, q_no
);
339 oct
->io_qmask
.oq
|= BIT_ULL(q_no
);
344 octeon_delete_droq(oct
, q_no
);
348 /* octeon_create_recv_info
350 * octeon_dev - pointer to the octeon device structure
351 * droq - droq in which the packet arrived.
352 * buf_cnt - no. of buffers used by the packet.
353 * idx - index in the descriptor for the first buffer in the packet.
355 * Allocates a recv_info_t and copies the buffer addresses for packet data
356 * into the recv_pkt space which starts at an 8B offset from recv_info_t.
357 * Flags the descriptors for refill later. If available descriptors go
358 * below the threshold to receive a 64K pkt, new buffers are first allocated
359 * before the recv_pkt_t is created.
360 * This routine will be called in interrupt context.
362 * Success: Pointer to recv_info_t
365 * The droq->lock is held when this routine is called.
367 static inline struct octeon_recv_info
*octeon_create_recv_info(
368 struct octeon_device
*octeon_dev
,
369 struct octeon_droq
*droq
,
373 struct octeon_droq_info
*info
;
374 struct octeon_recv_pkt
*recv_pkt
;
375 struct octeon_recv_info
*recv_info
;
377 struct octeon_skb_page_info
*pg_info
;
379 info
= &droq
->info_list
[idx
];
381 recv_info
= octeon_alloc_recv_info(sizeof(struct __dispatch
));
385 recv_pkt
= recv_info
->recv_pkt
;
386 recv_pkt
->rh
= info
->rh
;
387 recv_pkt
->length
= (u32
)info
->length
;
388 recv_pkt
->buffer_count
= (u16
)buf_cnt
;
389 recv_pkt
->octeon_id
= (u16
)octeon_dev
->octeon_id
;
392 bytes_left
= (u32
)info
->length
;
396 pg_info
= &droq
->recv_buf_list
[idx
].pg_info
;
398 lio_unmap_ring(octeon_dev
->pci_dev
,
400 pg_info
->page
= NULL
;
404 recv_pkt
->buffer_size
[i
] =
406 droq
->buffer_size
) ? droq
->buffer_size
: bytes_left
;
408 recv_pkt
->buffer_ptr
[i
] = droq
->recv_buf_list
[idx
].buffer
;
409 droq
->recv_buf_list
[idx
].buffer
= NULL
;
411 idx
= incr_index(idx
, 1, droq
->max_count
);
412 bytes_left
-= droq
->buffer_size
;
420 /* If we were not able to refill all buffers, try to move around
421 * the buffers that were not dispatched.
424 octeon_droq_refill_pullup_descs(struct octeon_droq
*droq
,
425 struct octeon_droq_desc
*desc_ring
)
427 u32 desc_refilled
= 0;
429 u32 refill_index
= droq
->refill_idx
;
431 while (refill_index
!= droq
->read_idx
) {
432 if (droq
->recv_buf_list
[refill_index
].buffer
) {
433 droq
->recv_buf_list
[droq
->refill_idx
].buffer
=
434 droq
->recv_buf_list
[refill_index
].buffer
;
435 droq
->recv_buf_list
[droq
->refill_idx
].data
=
436 droq
->recv_buf_list
[refill_index
].data
;
437 desc_ring
[droq
->refill_idx
].buffer_ptr
=
438 desc_ring
[refill_index
].buffer_ptr
;
439 droq
->recv_buf_list
[refill_index
].buffer
= NULL
;
440 desc_ring
[refill_index
].buffer_ptr
= 0;
442 droq
->refill_idx
= incr_index(droq
->refill_idx
,
446 droq
->refill_count
--;
447 } while (droq
->recv_buf_list
[droq
->refill_idx
].
450 refill_index
= incr_index(refill_index
, 1, droq
->max_count
);
452 return desc_refilled
;
455 /* octeon_droq_refill
457 * droq - droq in which descriptors require new buffers.
459 * Called during normal DROQ processing in interrupt mode or by the poll
460 * thread to refill the descriptors from which buffers were dispatched
461 * to upper layers. Attempts to allocate new buffers. If that fails, moves
462 * up buffers (that were not dispatched) to form a contiguous ring.
464 * No of descriptors refilled.
466 * This routine is called with droq->lock held.
469 octeon_droq_refill(struct octeon_device
*octeon_dev
, struct octeon_droq
*droq
)
471 struct octeon_droq_desc
*desc_ring
;
474 u32 desc_refilled
= 0;
475 struct octeon_skb_page_info
*pg_info
;
477 desc_ring
= droq
->desc_ring
;
479 while (droq
->refill_count
&& (desc_refilled
< droq
->max_count
)) {
480 /* If a valid buffer exists (happens if there is no dispatch),
482 * the buffer, else allocate.
484 if (!droq
->recv_buf_list
[droq
->refill_idx
].buffer
) {
486 &droq
->recv_buf_list
[droq
->refill_idx
].pg_info
;
487 /* Either recycle the existing pages or go for
491 buf
= recv_buffer_reuse(octeon_dev
, pg_info
);
493 buf
= recv_buffer_alloc(octeon_dev
, pg_info
);
494 /* If a buffer could not be allocated, no point in
498 droq
->stats
.rx_alloc_failure
++;
501 droq
->recv_buf_list
[droq
->refill_idx
].buffer
=
505 data
= get_rbd(droq
->recv_buf_list
506 [droq
->refill_idx
].buffer
);
509 droq
->recv_buf_list
[droq
->refill_idx
].data
= data
;
511 desc_ring
[droq
->refill_idx
].buffer_ptr
=
512 lio_map_ring(droq
->recv_buf_list
[droq
->
514 /* Reset any previous values in the length field. */
515 droq
->info_list
[droq
->refill_idx
].length
= 0;
517 droq
->refill_idx
= incr_index(droq
->refill_idx
, 1,
520 droq
->refill_count
--;
523 if (droq
->refill_count
)
525 octeon_droq_refill_pullup_descs(droq
, desc_ring
);
527 /* if droq->refill_count
528 * The refill count would not change in pass two. We only moved buffers
529 * to close the gap in the ring, but we would still have the same no. of
532 return desc_refilled
;
536 octeon_droq_get_bufcount(u32 buf_size
, u32 total_len
)
540 while (total_len
> (buf_size
* buf_cnt
))
546 octeon_droq_dispatch_pkt(struct octeon_device
*oct
,
547 struct octeon_droq
*droq
,
549 struct octeon_droq_info
*info
)
552 octeon_dispatch_fn_t disp_fn
;
553 struct octeon_recv_info
*rinfo
;
555 cnt
= octeon_droq_get_bufcount(droq
->buffer_size
, (u32
)info
->length
);
557 disp_fn
= octeon_get_dispatch(oct
, (u16
)rh
->r
.opcode
,
560 rinfo
= octeon_create_recv_info(oct
, droq
, cnt
, droq
->read_idx
);
562 struct __dispatch
*rdisp
= rinfo
->rsvd
;
564 rdisp
->rinfo
= rinfo
;
565 rdisp
->disp_fn
= disp_fn
;
566 rinfo
->recv_pkt
->rh
= *rh
;
567 list_add_tail(&rdisp
->list
,
568 &droq
->dispatch_list
);
570 droq
->stats
.dropped_nomem
++;
573 dev_err(&oct
->pci_dev
->dev
, "DROQ: No dispatch function (opcode %u/%u)\n",
574 (unsigned int)rh
->r
.opcode
,
575 (unsigned int)rh
->r
.subcode
);
576 droq
->stats
.dropped_nodispatch
++;
582 static inline void octeon_droq_drop_packets(struct octeon_device
*oct
,
583 struct octeon_droq
*droq
,
587 struct octeon_droq_info
*info
;
589 for (i
= 0; i
< cnt
; i
++) {
590 info
= &droq
->info_list
[droq
->read_idx
];
591 octeon_swap_8B_data((u64
*)info
, 2);
594 info
->length
-= OCT_RH_SIZE
;
595 droq
->stats
.bytes_received
+= info
->length
;
596 buf_cnt
= octeon_droq_get_bufcount(droq
->buffer_size
,
599 dev_err(&oct
->pci_dev
->dev
, "DROQ: In drop: pkt with len 0\n");
603 droq
->read_idx
= incr_index(droq
->read_idx
, buf_cnt
,
605 droq
->refill_count
+= buf_cnt
;
610 octeon_droq_fast_process_packets(struct octeon_device
*oct
,
611 struct octeon_droq
*droq
,
614 struct octeon_droq_info
*info
;
616 u32 pkt
, total_len
= 0, pkt_count
;
618 pkt_count
= pkts_to_process
;
620 for (pkt
= 0; pkt
< pkt_count
; pkt
++) {
622 struct sk_buff
*nicbuf
= NULL
;
623 struct octeon_skb_page_info
*pg_info
;
626 info
= &droq
->info_list
[droq
->read_idx
];
627 octeon_swap_8B_data((u64
*)info
, 2);
630 dev_err(&oct
->pci_dev
->dev
,
631 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
632 droq
->q_no
, droq
->read_idx
, pkt_count
);
633 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS
,
639 /* Len of resp hdr in included in the received data len. */
640 info
->length
-= OCT_RH_SIZE
;
643 total_len
+= (u32
)info
->length
;
644 if (opcode_slow_path(rh
)) {
647 buf_cnt
= octeon_droq_dispatch_pkt(oct
, droq
, rh
, info
);
648 droq
->read_idx
= incr_index(droq
->read_idx
,
649 buf_cnt
, droq
->max_count
);
650 droq
->refill_count
+= buf_cnt
;
652 if (info
->length
<= droq
->buffer_size
) {
653 pkt_len
= (u32
)info
->length
;
654 nicbuf
= droq
->recv_buf_list
[
655 droq
->read_idx
].buffer
;
656 pg_info
= &droq
->recv_buf_list
[
657 droq
->read_idx
].pg_info
;
658 if (recv_buffer_recycle(oct
, pg_info
))
659 pg_info
->page
= NULL
;
660 droq
->recv_buf_list
[droq
->read_idx
].buffer
=
663 droq
->read_idx
= incr_index(droq
->read_idx
, 1,
665 droq
->refill_count
++;
667 nicbuf
= octeon_fast_packet_alloc((u32
)
670 /* nicbuf allocation can fail. We'll handle it
673 while (pkt_len
< info
->length
) {
674 int cpy_len
, idx
= droq
->read_idx
;
676 cpy_len
= ((pkt_len
+ droq
->buffer_size
)
678 ((u32
)info
->length
- pkt_len
) :
682 octeon_fast_packet_next(droq
,
686 buf
= droq
->recv_buf_list
[idx
].
688 recv_buffer_fast_free(buf
);
689 droq
->recv_buf_list
[idx
].buffer
692 droq
->stats
.rx_alloc_failure
++;
697 incr_index(droq
->read_idx
, 1,
699 droq
->refill_count
++;
704 if (droq
->ops
.fptr
) {
705 droq
->ops
.fptr(oct
->octeon_id
,
710 recv_buffer_free(nicbuf
);
715 if (droq
->refill_count
>= droq
->refill_threshold
) {
716 int desc_refilled
= octeon_droq_refill(oct
, droq
);
718 /* Flush the droq descriptor data to memory to be sure
719 * that when we update the credits the data in memory
723 writel((desc_refilled
), droq
->pkts_credit_reg
);
724 /* make sure mmio write completes */
728 } /* for (each packet)... */
730 /* Increment refill_count by the number of buffers processed. */
731 droq
->stats
.pkts_received
+= pkt
;
732 droq
->stats
.bytes_received
+= total_len
;
734 if ((droq
->ops
.drop_on_max
) && (pkts_to_process
- pkt
)) {
735 octeon_droq_drop_packets(oct
, droq
, (pkts_to_process
- pkt
));
737 droq
->stats
.dropped_toomany
+= (pkts_to_process
- pkt
);
738 return pkts_to_process
;
745 octeon_droq_process_packets(struct octeon_device
*oct
,
746 struct octeon_droq
*droq
,
749 u32 pkt_count
= 0, pkts_processed
= 0;
750 struct list_head
*tmp
, *tmp2
;
752 /* Grab the droq lock */
753 spin_lock(&droq
->lock
);
755 octeon_droq_check_hw_for_pkts(droq
);
756 pkt_count
= atomic_read(&droq
->pkts_pending
);
759 spin_unlock(&droq
->lock
);
763 if (pkt_count
> budget
)
766 pkts_processed
= octeon_droq_fast_process_packets(oct
, droq
, pkt_count
);
768 atomic_sub(pkts_processed
, &droq
->pkts_pending
);
770 /* Release the spin lock */
771 spin_unlock(&droq
->lock
);
773 list_for_each_safe(tmp
, tmp2
, &droq
->dispatch_list
) {
774 struct __dispatch
*rdisp
= (struct __dispatch
*)tmp
;
777 rdisp
->disp_fn(rdisp
->rinfo
,
778 octeon_get_dispatch_arg
780 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.opcode
,
781 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.subcode
));
784 /* If there are packets pending. schedule tasklet again */
785 if (atomic_read(&droq
->pkts_pending
))
792 * Utility function to poll for packets. check_hw_for_packets must be
793 * called before calling this routine.
797 octeon_droq_process_poll_pkts(struct octeon_device
*oct
,
798 struct octeon_droq
*droq
, u32 budget
)
800 struct list_head
*tmp
, *tmp2
;
801 u32 pkts_available
= 0, pkts_processed
= 0;
802 u32 total_pkts_processed
= 0;
804 if (budget
> droq
->max_count
)
805 budget
= droq
->max_count
;
807 spin_lock(&droq
->lock
);
809 while (total_pkts_processed
< budget
) {
810 octeon_droq_check_hw_for_pkts(droq
);
812 pkts_available
= min((budget
- total_pkts_processed
),
813 (u32
)(atomic_read(&droq
->pkts_pending
)));
815 if (pkts_available
== 0)
819 octeon_droq_fast_process_packets(oct
, droq
,
822 atomic_sub(pkts_processed
, &droq
->pkts_pending
);
824 total_pkts_processed
+= pkts_processed
;
827 spin_unlock(&droq
->lock
);
829 list_for_each_safe(tmp
, tmp2
, &droq
->dispatch_list
) {
830 struct __dispatch
*rdisp
= (struct __dispatch
*)tmp
;
833 rdisp
->disp_fn(rdisp
->rinfo
,
834 octeon_get_dispatch_arg
836 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.opcode
,
837 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.subcode
));
840 return total_pkts_processed
;
844 octeon_process_droq_poll_cmd(struct octeon_device
*oct
, u32 q_no
, int cmd
,
847 struct octeon_droq
*droq
;
849 droq
= oct
->droq
[q_no
];
851 if (cmd
== POLL_EVENT_PROCESS_PKTS
)
852 return octeon_droq_process_poll_pkts(oct
, droq
, arg
);
854 if (cmd
== POLL_EVENT_PENDING_PKTS
) {
855 u32 pkt_cnt
= atomic_read(&droq
->pkts_pending
);
857 return octeon_droq_process_packets(oct
, droq
, pkt_cnt
);
860 if (cmd
== POLL_EVENT_ENABLE_INTR
) {
864 /* Enable Pkt Interrupt */
865 switch (oct
->chip_id
) {
867 case OCTEON_CN68XX
: {
868 struct octeon_cn6xxx
*cn6xxx
=
869 (struct octeon_cn6xxx
*)oct
->chip
;
871 (&cn6xxx
->lock_for_droq_int_enb_reg
, flags
);
874 CN6XXX_SLI_PKT_TIME_INT_ENB
);
875 value
|= (1 << q_no
);
876 octeon_write_csr(oct
,
877 CN6XXX_SLI_PKT_TIME_INT_ENB
,
881 CN6XXX_SLI_PKT_CNT_INT_ENB
);
882 value
|= (1 << q_no
);
883 octeon_write_csr(oct
,
884 CN6XXX_SLI_PKT_CNT_INT_ENB
,
887 /* don't bother flushing the enables */
889 spin_unlock_irqrestore
890 (&cn6xxx
->lock_for_droq_int_enb_reg
, flags
);
894 case OCTEON_CN23XX_PF_VID
: {
895 lio_enable_irq(oct
->droq
[q_no
], oct
->instr_queue
[q_no
]);
899 case OCTEON_CN23XX_VF_VID
:
900 lio_enable_irq(oct
->droq
[q_no
], oct
->instr_queue
[q_no
]);
906 dev_err(&oct
->pci_dev
->dev
, "%s Unknown command: %d\n", __func__
, cmd
);
910 int octeon_register_droq_ops(struct octeon_device
*oct
, u32 q_no
,
911 struct octeon_droq_ops
*ops
)
913 struct octeon_droq
*droq
;
915 struct octeon_config
*oct_cfg
= NULL
;
917 oct_cfg
= octeon_get_conf(oct
);
923 dev_err(&oct
->pci_dev
->dev
, "%s: droq_ops pointer is NULL\n",
928 if (q_no
>= CFG_GET_OQ_MAX_Q(oct_cfg
)) {
929 dev_err(&oct
->pci_dev
->dev
, "%s: droq id (%d) exceeds MAX (%d)\n",
930 __func__
, q_no
, (oct
->num_oqs
- 1));
934 droq
= oct
->droq
[q_no
];
936 spin_lock_irqsave(&droq
->lock
, flags
);
938 memcpy(&droq
->ops
, ops
, sizeof(struct octeon_droq_ops
));
940 spin_unlock_irqrestore(&droq
->lock
, flags
);
945 int octeon_unregister_droq_ops(struct octeon_device
*oct
, u32 q_no
)
948 struct octeon_droq
*droq
;
949 struct octeon_config
*oct_cfg
= NULL
;
951 oct_cfg
= octeon_get_conf(oct
);
956 if (q_no
>= CFG_GET_OQ_MAX_Q(oct_cfg
)) {
957 dev_err(&oct
->pci_dev
->dev
, "%s: droq id (%d) exceeds MAX (%d)\n",
958 __func__
, q_no
, oct
->num_oqs
- 1);
962 droq
= oct
->droq
[q_no
];
965 dev_info(&oct
->pci_dev
->dev
,
966 "Droq id (%d) not available.\n", q_no
);
970 spin_lock_irqsave(&droq
->lock
, flags
);
972 droq
->ops
.fptr
= NULL
;
973 droq
->ops
.farg
= NULL
;
974 droq
->ops
.drop_on_max
= 0;
976 spin_unlock_irqrestore(&droq
->lock
, flags
);
981 int octeon_create_droq(struct octeon_device
*oct
,
982 u32 q_no
, u32 num_descs
,
983 u32 desc_size
, void *app_ctx
)
985 struct octeon_droq
*droq
;
986 int numa_node
= cpu_to_node(q_no
% num_online_cpus());
988 if (oct
->droq
[q_no
]) {
989 dev_dbg(&oct
->pci_dev
->dev
, "Droq already in use. Cannot create droq %d again\n",
994 /* Allocate the DS for the new droq. */
995 droq
= vmalloc_node(sizeof(*droq
), numa_node
);
997 droq
= vmalloc(sizeof(*droq
));
1001 memset(droq
, 0, sizeof(struct octeon_droq
));
1003 /*Disable the pkt o/p for this Q */
1004 octeon_set_droq_pkt_op(oct
, q_no
, 0);
1005 oct
->droq
[q_no
] = droq
;
1007 /* Initialize the Droq */
1008 if (octeon_init_droq(oct
, q_no
, num_descs
, desc_size
, app_ctx
)) {
1009 vfree(oct
->droq
[q_no
]);
1010 oct
->droq
[q_no
] = NULL
;
1016 dev_dbg(&oct
->pci_dev
->dev
, "%s: Total number of OQ: %d\n", __func__
,
1019 /* Global Droq register settings */
1021 /* As of now not required, as setting are done for all 32 Droqs at