1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/netdevice.h>
20 #include <linux/vmalloc.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28 #include "cn66xx_regs.h"
29 #include "cn66xx_device.h"
30 #include "cn23xx_pf_device.h"
31 #include "cn23xx_vf_device.h"
34 struct list_head list
;
39 struct list_head list
;
40 struct octeon_recv_info
*rinfo
;
41 octeon_dispatch_fn_t disp_fn
;
44 /** Get the argument that the user set when registering dispatch
45 * function for a given opcode/subcode.
46 * @param octeon_dev - the octeon device pointer.
47 * @param opcode - the opcode for which the dispatch argument
49 * @param subcode - the subcode for which the dispatch argument
51 * @return Success: void * (argument to the dispatch function)
52 * @return Failure: NULL
55 static inline void *octeon_get_dispatch_arg(struct octeon_device
*octeon_dev
,
56 u16 opcode
, u16 subcode
)
59 struct list_head
*dispatch
;
61 u16 combined_opcode
= OPCODE_SUBCODE(opcode
, subcode
);
63 idx
= combined_opcode
& OCTEON_OPCODE_MASK
;
65 spin_lock_bh(&octeon_dev
->dispatch
.lock
);
67 if (octeon_dev
->dispatch
.count
== 0) {
68 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
72 if (octeon_dev
->dispatch
.dlist
[idx
].opcode
== combined_opcode
) {
73 fn_arg
= octeon_dev
->dispatch
.dlist
[idx
].arg
;
75 list_for_each(dispatch
,
76 &octeon_dev
->dispatch
.dlist
[idx
].list
) {
77 if (((struct octeon_dispatch
*)dispatch
)->opcode
==
79 fn_arg
= ((struct octeon_dispatch
*)
86 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
90 /** Check for packets on Droq. This function should be called with lock held.
91 * @param droq - Droq on which count is checked.
92 * @return Returns packet count.
94 u32
octeon_droq_check_hw_for_pkts(struct octeon_droq
*droq
)
99 pkt_count
= readl(droq
->pkts_sent_reg
);
101 last_count
= pkt_count
- droq
->pkt_count
;
102 droq
->pkt_count
= pkt_count
;
104 /* we shall write to cnts at napi irq enable or end of droq tasklet */
106 atomic_add(last_count
, &droq
->pkts_pending
);
111 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq
*droq
)
115 /* max_empty_descs is the max. no. of descs that can have no buffers.
116 * If the empty desc count goes beyond this value, we cannot safely
117 * read in a 64K packet sent by Octeon
118 * (64K is max pkt size from Octeon)
120 droq
->max_empty_descs
= 0;
123 droq
->max_empty_descs
++;
124 count
+= droq
->buffer_size
;
125 } while (count
< (64 * 1024));
127 droq
->max_empty_descs
= droq
->max_count
- droq
->max_empty_descs
;
130 static void octeon_droq_reset_indices(struct octeon_droq
*droq
)
134 droq
->refill_idx
= 0;
135 droq
->refill_count
= 0;
136 atomic_set(&droq
->pkts_pending
, 0);
140 octeon_droq_destroy_ring_buffers(struct octeon_device
*oct
,
141 struct octeon_droq
*droq
)
144 struct octeon_skb_page_info
*pg_info
;
146 for (i
= 0; i
< droq
->max_count
; i
++) {
147 pg_info
= &droq
->recv_buf_list
[i
].pg_info
;
150 lio_unmap_ring(oct
->pci_dev
,
155 recv_buffer_destroy(droq
->recv_buf_list
[i
].buffer
,
158 droq
->recv_buf_list
[i
].buffer
= NULL
;
161 octeon_droq_reset_indices(droq
);
165 octeon_droq_setup_ring_buffers(struct octeon_device
*oct
,
166 struct octeon_droq
*droq
)
170 struct octeon_droq_desc
*desc_ring
= droq
->desc_ring
;
172 for (i
= 0; i
< droq
->max_count
; i
++) {
173 buf
= recv_buffer_alloc(oct
, &droq
->recv_buf_list
[i
].pg_info
);
176 dev_err(&oct
->pci_dev
->dev
, "%s buffer alloc failed\n",
178 droq
->stats
.rx_alloc_failure
++;
182 droq
->recv_buf_list
[i
].buffer
= buf
;
183 droq
->recv_buf_list
[i
].data
= get_rbd(buf
);
184 droq
->info_list
[i
].length
= 0;
186 /* map ring buffers into memory */
187 desc_ring
[i
].info_ptr
= lio_map_ring_info(droq
, i
);
188 desc_ring
[i
].buffer_ptr
=
189 lio_map_ring(droq
->recv_buf_list
[i
].buffer
);
192 octeon_droq_reset_indices(droq
);
194 octeon_droq_compute_max_packet_bufs(droq
);
199 int octeon_delete_droq(struct octeon_device
*oct
, u32 q_no
)
201 struct octeon_droq
*droq
= oct
->droq
[q_no
];
203 dev_dbg(&oct
->pci_dev
->dev
, "%s[%d]\n", __func__
, q_no
);
205 octeon_droq_destroy_ring_buffers(oct
, droq
);
206 vfree(droq
->recv_buf_list
);
208 if (droq
->info_base_addr
)
209 lio_free_info_buffer(oct
, droq
);
212 lio_dma_free(oct
, (droq
->max_count
* OCT_DROQ_DESC_SIZE
),
213 droq
->desc_ring
, droq
->desc_ring_dma
);
215 memset(droq
, 0, OCT_DROQ_SIZE
);
220 int octeon_init_droq(struct octeon_device
*oct
,
226 struct octeon_droq
*droq
;
227 u32 desc_ring_size
= 0, c_num_descs
= 0, c_buf_size
= 0;
228 u32 c_pkts_per_intr
= 0, c_refill_threshold
= 0;
229 int orig_node
= dev_to_node(&oct
->pci_dev
->dev
);
230 int numa_node
= cpu_to_node(q_no
% num_online_cpus());
232 dev_dbg(&oct
->pci_dev
->dev
, "%s[%d]\n", __func__
, q_no
);
234 droq
= oct
->droq
[q_no
];
235 memset(droq
, 0, OCT_DROQ_SIZE
);
240 droq
->app_ctx
= app_ctx
;
242 droq
->app_ctx
= (void *)(size_t)q_no
;
244 c_num_descs
= num_descs
;
245 c_buf_size
= desc_size
;
246 if (OCTEON_CN6XXX(oct
)) {
247 struct octeon_config
*conf6x
= CHIP_CONF(oct
, cn6xxx
);
249 c_pkts_per_intr
= (u32
)CFG_GET_OQ_PKTS_PER_INTR(conf6x
);
251 (u32
)CFG_GET_OQ_REFILL_THRESHOLD(conf6x
);
252 } else if (OCTEON_CN23XX_PF(oct
)) {
253 struct octeon_config
*conf23
= CHIP_CONF(oct
, cn23xx_pf
);
255 c_pkts_per_intr
= (u32
)CFG_GET_OQ_PKTS_PER_INTR(conf23
);
256 c_refill_threshold
= (u32
)CFG_GET_OQ_REFILL_THRESHOLD(conf23
);
257 } else if (OCTEON_CN23XX_VF(oct
)) {
258 struct octeon_config
*conf23
= CHIP_CONF(oct
, cn23xx_vf
);
260 c_pkts_per_intr
= (u32
)CFG_GET_OQ_PKTS_PER_INTR(conf23
);
261 c_refill_threshold
= (u32
)CFG_GET_OQ_REFILL_THRESHOLD(conf23
);
266 droq
->max_count
= c_num_descs
;
267 droq
->buffer_size
= c_buf_size
;
269 desc_ring_size
= droq
->max_count
* OCT_DROQ_DESC_SIZE
;
270 set_dev_node(&oct
->pci_dev
->dev
, numa_node
);
271 droq
->desc_ring
= lio_dma_alloc(oct
, desc_ring_size
,
272 (dma_addr_t
*)&droq
->desc_ring_dma
);
273 set_dev_node(&oct
->pci_dev
->dev
, orig_node
);
274 if (!droq
->desc_ring
)
275 droq
->desc_ring
= lio_dma_alloc(oct
, desc_ring_size
,
276 (dma_addr_t
*)&droq
->desc_ring_dma
);
278 if (!droq
->desc_ring
) {
279 dev_err(&oct
->pci_dev
->dev
,
280 "Output queue %d ring alloc failed\n", q_no
);
284 dev_dbg(&oct
->pci_dev
->dev
, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
285 q_no
, droq
->desc_ring
, droq
->desc_ring_dma
);
286 dev_dbg(&oct
->pci_dev
->dev
, "droq[%d]: num_desc: %d\n", q_no
,
289 droq
->info_list
= lio_alloc_info_buffer(oct
, droq
);
290 if (!droq
->info_list
) {
291 dev_err(&oct
->pci_dev
->dev
, "Cannot allocate memory for info list.\n");
292 lio_dma_free(oct
, (droq
->max_count
* OCT_DROQ_DESC_SIZE
),
293 droq
->desc_ring
, droq
->desc_ring_dma
);
297 droq
->recv_buf_list
= (struct octeon_recv_buffer
*)
298 vmalloc_node(droq
->max_count
*
299 OCT_DROQ_RECVBUF_SIZE
,
301 if (!droq
->recv_buf_list
)
302 droq
->recv_buf_list
= (struct octeon_recv_buffer
*)
303 vmalloc(droq
->max_count
*
304 OCT_DROQ_RECVBUF_SIZE
);
305 if (!droq
->recv_buf_list
) {
306 dev_err(&oct
->pci_dev
->dev
, "Output queue recv buf list alloc failed\n");
310 if (octeon_droq_setup_ring_buffers(oct
, droq
))
313 droq
->pkts_per_intr
= c_pkts_per_intr
;
314 droq
->refill_threshold
= c_refill_threshold
;
316 dev_dbg(&oct
->pci_dev
->dev
, "DROQ INIT: max_empty_descs: %d\n",
317 droq
->max_empty_descs
);
319 spin_lock_init(&droq
->lock
);
321 INIT_LIST_HEAD(&droq
->dispatch_list
);
323 /* For 56xx Pass1, this function won't be called, so no checks. */
324 oct
->fn_list
.setup_oq_regs(oct
, q_no
);
326 oct
->io_qmask
.oq
|= BIT_ULL(q_no
);
331 octeon_delete_droq(oct
, q_no
);
335 /* octeon_create_recv_info
337 * octeon_dev - pointer to the octeon device structure
338 * droq - droq in which the packet arrived.
339 * buf_cnt - no. of buffers used by the packet.
340 * idx - index in the descriptor for the first buffer in the packet.
342 * Allocates a recv_info_t and copies the buffer addresses for packet data
343 * into the recv_pkt space which starts at an 8B offset from recv_info_t.
344 * Flags the descriptors for refill later. If available descriptors go
345 * below the threshold to receive a 64K pkt, new buffers are first allocated
346 * before the recv_pkt_t is created.
347 * This routine will be called in interrupt context.
349 * Success: Pointer to recv_info_t
352 * The droq->lock is held when this routine is called.
354 static inline struct octeon_recv_info
*octeon_create_recv_info(
355 struct octeon_device
*octeon_dev
,
356 struct octeon_droq
*droq
,
360 struct octeon_droq_info
*info
;
361 struct octeon_recv_pkt
*recv_pkt
;
362 struct octeon_recv_info
*recv_info
;
364 struct octeon_skb_page_info
*pg_info
;
366 info
= &droq
->info_list
[idx
];
368 recv_info
= octeon_alloc_recv_info(sizeof(struct __dispatch
));
372 recv_pkt
= recv_info
->recv_pkt
;
373 recv_pkt
->rh
= info
->rh
;
374 recv_pkt
->length
= (u32
)info
->length
;
375 recv_pkt
->buffer_count
= (u16
)buf_cnt
;
376 recv_pkt
->octeon_id
= (u16
)octeon_dev
->octeon_id
;
379 bytes_left
= (u32
)info
->length
;
383 pg_info
= &droq
->recv_buf_list
[idx
].pg_info
;
385 lio_unmap_ring(octeon_dev
->pci_dev
,
387 pg_info
->page
= NULL
;
391 recv_pkt
->buffer_size
[i
] =
393 droq
->buffer_size
) ? droq
->buffer_size
: bytes_left
;
395 recv_pkt
->buffer_ptr
[i
] = droq
->recv_buf_list
[idx
].buffer
;
396 droq
->recv_buf_list
[idx
].buffer
= NULL
;
398 idx
= incr_index(idx
, 1, droq
->max_count
);
399 bytes_left
-= droq
->buffer_size
;
407 /* If we were not able to refill all buffers, try to move around
408 * the buffers that were not dispatched.
411 octeon_droq_refill_pullup_descs(struct octeon_droq
*droq
,
412 struct octeon_droq_desc
*desc_ring
)
414 u32 desc_refilled
= 0;
416 u32 refill_index
= droq
->refill_idx
;
418 while (refill_index
!= droq
->read_idx
) {
419 if (droq
->recv_buf_list
[refill_index
].buffer
) {
420 droq
->recv_buf_list
[droq
->refill_idx
].buffer
=
421 droq
->recv_buf_list
[refill_index
].buffer
;
422 droq
->recv_buf_list
[droq
->refill_idx
].data
=
423 droq
->recv_buf_list
[refill_index
].data
;
424 desc_ring
[droq
->refill_idx
].buffer_ptr
=
425 desc_ring
[refill_index
].buffer_ptr
;
426 droq
->recv_buf_list
[refill_index
].buffer
= NULL
;
427 desc_ring
[refill_index
].buffer_ptr
= 0;
429 droq
->refill_idx
= incr_index(droq
->refill_idx
,
433 droq
->refill_count
--;
434 } while (droq
->recv_buf_list
[droq
->refill_idx
].
437 refill_index
= incr_index(refill_index
, 1, droq
->max_count
);
439 return desc_refilled
;
442 /* octeon_droq_refill
444 * droq - droq in which descriptors require new buffers.
446 * Called during normal DROQ processing in interrupt mode or by the poll
447 * thread to refill the descriptors from which buffers were dispatched
448 * to upper layers. Attempts to allocate new buffers. If that fails, moves
449 * up buffers (that were not dispatched) to form a contiguous ring.
451 * No of descriptors refilled.
453 * This routine is called with droq->lock held.
456 octeon_droq_refill(struct octeon_device
*octeon_dev
, struct octeon_droq
*droq
)
458 struct octeon_droq_desc
*desc_ring
;
461 u32 desc_refilled
= 0;
462 struct octeon_skb_page_info
*pg_info
;
464 desc_ring
= droq
->desc_ring
;
466 while (droq
->refill_count
&& (desc_refilled
< droq
->max_count
)) {
467 /* If a valid buffer exists (happens if there is no dispatch),
469 * the buffer, else allocate.
471 if (!droq
->recv_buf_list
[droq
->refill_idx
].buffer
) {
473 &droq
->recv_buf_list
[droq
->refill_idx
].pg_info
;
474 /* Either recycle the existing pages or go for
478 buf
= recv_buffer_reuse(octeon_dev
, pg_info
);
480 buf
= recv_buffer_alloc(octeon_dev
, pg_info
);
481 /* If a buffer could not be allocated, no point in
485 droq
->stats
.rx_alloc_failure
++;
488 droq
->recv_buf_list
[droq
->refill_idx
].buffer
=
492 data
= get_rbd(droq
->recv_buf_list
493 [droq
->refill_idx
].buffer
);
496 droq
->recv_buf_list
[droq
->refill_idx
].data
= data
;
498 desc_ring
[droq
->refill_idx
].buffer_ptr
=
499 lio_map_ring(droq
->recv_buf_list
[droq
->
501 /* Reset any previous values in the length field. */
502 droq
->info_list
[droq
->refill_idx
].length
= 0;
504 droq
->refill_idx
= incr_index(droq
->refill_idx
, 1,
507 droq
->refill_count
--;
510 if (droq
->refill_count
)
512 octeon_droq_refill_pullup_descs(droq
, desc_ring
);
514 /* if droq->refill_count
515 * The refill count would not change in pass two. We only moved buffers
516 * to close the gap in the ring, but we would still have the same no. of
519 return desc_refilled
;
523 octeon_droq_get_bufcount(u32 buf_size
, u32 total_len
)
527 while (total_len
> (buf_size
* buf_cnt
))
533 octeon_droq_dispatch_pkt(struct octeon_device
*oct
,
534 struct octeon_droq
*droq
,
536 struct octeon_droq_info
*info
)
539 octeon_dispatch_fn_t disp_fn
;
540 struct octeon_recv_info
*rinfo
;
542 cnt
= octeon_droq_get_bufcount(droq
->buffer_size
, (u32
)info
->length
);
544 disp_fn
= octeon_get_dispatch(oct
, (u16
)rh
->r
.opcode
,
547 rinfo
= octeon_create_recv_info(oct
, droq
, cnt
, droq
->read_idx
);
549 struct __dispatch
*rdisp
= rinfo
->rsvd
;
551 rdisp
->rinfo
= rinfo
;
552 rdisp
->disp_fn
= disp_fn
;
553 rinfo
->recv_pkt
->rh
= *rh
;
554 list_add_tail(&rdisp
->list
,
555 &droq
->dispatch_list
);
557 droq
->stats
.dropped_nomem
++;
560 dev_err(&oct
->pci_dev
->dev
, "DROQ: No dispatch function (opcode %u/%u)\n",
561 (unsigned int)rh
->r
.opcode
,
562 (unsigned int)rh
->r
.subcode
);
563 droq
->stats
.dropped_nodispatch
++;
569 static inline void octeon_droq_drop_packets(struct octeon_device
*oct
,
570 struct octeon_droq
*droq
,
574 struct octeon_droq_info
*info
;
576 for (i
= 0; i
< cnt
; i
++) {
577 info
= &droq
->info_list
[droq
->read_idx
];
578 octeon_swap_8B_data((u64
*)info
, 2);
581 info
->length
-= OCT_RH_SIZE
;
582 droq
->stats
.bytes_received
+= info
->length
;
583 buf_cnt
= octeon_droq_get_bufcount(droq
->buffer_size
,
586 dev_err(&oct
->pci_dev
->dev
, "DROQ: In drop: pkt with len 0\n");
590 droq
->read_idx
= incr_index(droq
->read_idx
, buf_cnt
,
592 droq
->refill_count
+= buf_cnt
;
597 octeon_droq_fast_process_packets(struct octeon_device
*oct
,
598 struct octeon_droq
*droq
,
601 struct octeon_droq_info
*info
;
603 u32 pkt
, total_len
= 0, pkt_count
;
605 pkt_count
= pkts_to_process
;
607 for (pkt
= 0; pkt
< pkt_count
; pkt
++) {
609 struct sk_buff
*nicbuf
= NULL
;
610 struct octeon_skb_page_info
*pg_info
;
613 info
= &droq
->info_list
[droq
->read_idx
];
614 octeon_swap_8B_data((u64
*)info
, 2);
617 dev_err(&oct
->pci_dev
->dev
,
618 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
619 droq
->q_no
, droq
->read_idx
, pkt_count
);
620 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS
,
626 /* Len of resp hdr in included in the received data len. */
627 info
->length
-= OCT_RH_SIZE
;
630 total_len
+= (u32
)info
->length
;
631 if (opcode_slow_path(rh
)) {
634 buf_cnt
= octeon_droq_dispatch_pkt(oct
, droq
, rh
, info
);
635 droq
->read_idx
= incr_index(droq
->read_idx
,
636 buf_cnt
, droq
->max_count
);
637 droq
->refill_count
+= buf_cnt
;
639 if (info
->length
<= droq
->buffer_size
) {
640 pkt_len
= (u32
)info
->length
;
641 nicbuf
= droq
->recv_buf_list
[
642 droq
->read_idx
].buffer
;
643 pg_info
= &droq
->recv_buf_list
[
644 droq
->read_idx
].pg_info
;
645 if (recv_buffer_recycle(oct
, pg_info
))
646 pg_info
->page
= NULL
;
647 droq
->recv_buf_list
[droq
->read_idx
].buffer
=
650 droq
->read_idx
= incr_index(droq
->read_idx
, 1,
652 droq
->refill_count
++;
654 nicbuf
= octeon_fast_packet_alloc((u32
)
657 /* nicbuf allocation can fail. We'll handle it
660 while (pkt_len
< info
->length
) {
661 int cpy_len
, idx
= droq
->read_idx
;
663 cpy_len
= ((pkt_len
+ droq
->buffer_size
)
665 ((u32
)info
->length
- pkt_len
) :
669 octeon_fast_packet_next(droq
,
673 buf
= droq
->recv_buf_list
[idx
].
675 recv_buffer_fast_free(buf
);
676 droq
->recv_buf_list
[idx
].buffer
679 droq
->stats
.rx_alloc_failure
++;
684 incr_index(droq
->read_idx
, 1,
686 droq
->refill_count
++;
691 if (droq
->ops
.fptr
) {
692 droq
->ops
.fptr(oct
->octeon_id
,
697 recv_buffer_free(nicbuf
);
702 if (droq
->refill_count
>= droq
->refill_threshold
) {
703 int desc_refilled
= octeon_droq_refill(oct
, droq
);
705 /* Flush the droq descriptor data to memory to be sure
706 * that when we update the credits the data in memory
710 writel((desc_refilled
), droq
->pkts_credit_reg
);
711 /* make sure mmio write completes */
715 } /* for (each packet)... */
717 /* Increment refill_count by the number of buffers processed. */
718 droq
->stats
.pkts_received
+= pkt
;
719 droq
->stats
.bytes_received
+= total_len
;
721 if ((droq
->ops
.drop_on_max
) && (pkts_to_process
- pkt
)) {
722 octeon_droq_drop_packets(oct
, droq
, (pkts_to_process
- pkt
));
724 droq
->stats
.dropped_toomany
+= (pkts_to_process
- pkt
);
725 return pkts_to_process
;
732 octeon_droq_process_packets(struct octeon_device
*oct
,
733 struct octeon_droq
*droq
,
736 u32 pkt_count
= 0, pkts_processed
= 0;
737 struct list_head
*tmp
, *tmp2
;
739 /* Grab the droq lock */
740 spin_lock(&droq
->lock
);
742 octeon_droq_check_hw_for_pkts(droq
);
743 pkt_count
= atomic_read(&droq
->pkts_pending
);
746 spin_unlock(&droq
->lock
);
750 if (pkt_count
> budget
)
753 pkts_processed
= octeon_droq_fast_process_packets(oct
, droq
, pkt_count
);
755 atomic_sub(pkts_processed
, &droq
->pkts_pending
);
757 /* Release the spin lock */
758 spin_unlock(&droq
->lock
);
760 list_for_each_safe(tmp
, tmp2
, &droq
->dispatch_list
) {
761 struct __dispatch
*rdisp
= (struct __dispatch
*)tmp
;
764 rdisp
->disp_fn(rdisp
->rinfo
,
765 octeon_get_dispatch_arg
767 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.opcode
,
768 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.subcode
));
771 /* If there are packets pending. schedule tasklet again */
772 if (atomic_read(&droq
->pkts_pending
))
779 * Utility function to poll for packets. check_hw_for_packets must be
780 * called before calling this routine.
784 octeon_droq_process_poll_pkts(struct octeon_device
*oct
,
785 struct octeon_droq
*droq
, u32 budget
)
787 struct list_head
*tmp
, *tmp2
;
788 u32 pkts_available
= 0, pkts_processed
= 0;
789 u32 total_pkts_processed
= 0;
791 if (budget
> droq
->max_count
)
792 budget
= droq
->max_count
;
794 spin_lock(&droq
->lock
);
796 while (total_pkts_processed
< budget
) {
797 octeon_droq_check_hw_for_pkts(droq
);
799 pkts_available
= min((budget
- total_pkts_processed
),
800 (u32
)(atomic_read(&droq
->pkts_pending
)));
802 if (pkts_available
== 0)
806 octeon_droq_fast_process_packets(oct
, droq
,
809 atomic_sub(pkts_processed
, &droq
->pkts_pending
);
811 total_pkts_processed
+= pkts_processed
;
814 spin_unlock(&droq
->lock
);
816 list_for_each_safe(tmp
, tmp2
, &droq
->dispatch_list
) {
817 struct __dispatch
*rdisp
= (struct __dispatch
*)tmp
;
820 rdisp
->disp_fn(rdisp
->rinfo
,
821 octeon_get_dispatch_arg
823 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.opcode
,
824 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.subcode
));
827 return total_pkts_processed
;
831 octeon_process_droq_poll_cmd(struct octeon_device
*oct
, u32 q_no
, int cmd
,
834 struct octeon_droq
*droq
;
836 droq
= oct
->droq
[q_no
];
838 if (cmd
== POLL_EVENT_PROCESS_PKTS
)
839 return octeon_droq_process_poll_pkts(oct
, droq
, arg
);
841 if (cmd
== POLL_EVENT_PENDING_PKTS
) {
842 u32 pkt_cnt
= atomic_read(&droq
->pkts_pending
);
844 return octeon_droq_process_packets(oct
, droq
, pkt_cnt
);
847 if (cmd
== POLL_EVENT_ENABLE_INTR
) {
851 /* Enable Pkt Interrupt */
852 switch (oct
->chip_id
) {
854 case OCTEON_CN68XX
: {
855 struct octeon_cn6xxx
*cn6xxx
=
856 (struct octeon_cn6xxx
*)oct
->chip
;
858 (&cn6xxx
->lock_for_droq_int_enb_reg
, flags
);
861 CN6XXX_SLI_PKT_TIME_INT_ENB
);
862 value
|= (1 << q_no
);
863 octeon_write_csr(oct
,
864 CN6XXX_SLI_PKT_TIME_INT_ENB
,
868 CN6XXX_SLI_PKT_CNT_INT_ENB
);
869 value
|= (1 << q_no
);
870 octeon_write_csr(oct
,
871 CN6XXX_SLI_PKT_CNT_INT_ENB
,
874 /* don't bother flushing the enables */
876 spin_unlock_irqrestore
877 (&cn6xxx
->lock_for_droq_int_enb_reg
, flags
);
881 case OCTEON_CN23XX_PF_VID
: {
882 lio_enable_irq(oct
->droq
[q_no
], oct
->instr_queue
[q_no
]);
886 case OCTEON_CN23XX_VF_VID
:
887 lio_enable_irq(oct
->droq
[q_no
], oct
->instr_queue
[q_no
]);
893 dev_err(&oct
->pci_dev
->dev
, "%s Unknown command: %d\n", __func__
, cmd
);
897 int octeon_register_droq_ops(struct octeon_device
*oct
, u32 q_no
,
898 struct octeon_droq_ops
*ops
)
900 struct octeon_droq
*droq
;
902 struct octeon_config
*oct_cfg
= NULL
;
904 oct_cfg
= octeon_get_conf(oct
);
910 dev_err(&oct
->pci_dev
->dev
, "%s: droq_ops pointer is NULL\n",
915 if (q_no
>= CFG_GET_OQ_MAX_Q(oct_cfg
)) {
916 dev_err(&oct
->pci_dev
->dev
, "%s: droq id (%d) exceeds MAX (%d)\n",
917 __func__
, q_no
, (oct
->num_oqs
- 1));
921 droq
= oct
->droq
[q_no
];
923 spin_lock_irqsave(&droq
->lock
, flags
);
925 memcpy(&droq
->ops
, ops
, sizeof(struct octeon_droq_ops
));
927 spin_unlock_irqrestore(&droq
->lock
, flags
);
932 int octeon_unregister_droq_ops(struct octeon_device
*oct
, u32 q_no
)
935 struct octeon_droq
*droq
;
936 struct octeon_config
*oct_cfg
= NULL
;
938 oct_cfg
= octeon_get_conf(oct
);
943 if (q_no
>= CFG_GET_OQ_MAX_Q(oct_cfg
)) {
944 dev_err(&oct
->pci_dev
->dev
, "%s: droq id (%d) exceeds MAX (%d)\n",
945 __func__
, q_no
, oct
->num_oqs
- 1);
949 droq
= oct
->droq
[q_no
];
952 dev_info(&oct
->pci_dev
->dev
,
953 "Droq id (%d) not available.\n", q_no
);
957 spin_lock_irqsave(&droq
->lock
, flags
);
959 droq
->ops
.fptr
= NULL
;
960 droq
->ops
.farg
= NULL
;
961 droq
->ops
.drop_on_max
= 0;
963 spin_unlock_irqrestore(&droq
->lock
, flags
);
968 int octeon_create_droq(struct octeon_device
*oct
,
969 u32 q_no
, u32 num_descs
,
970 u32 desc_size
, void *app_ctx
)
972 struct octeon_droq
*droq
;
973 int numa_node
= cpu_to_node(q_no
% num_online_cpus());
975 if (oct
->droq
[q_no
]) {
976 dev_dbg(&oct
->pci_dev
->dev
, "Droq already in use. Cannot create droq %d again\n",
981 /* Allocate the DS for the new droq. */
982 droq
= vmalloc_node(sizeof(*droq
), numa_node
);
984 droq
= vmalloc(sizeof(*droq
));
988 memset(droq
, 0, sizeof(struct octeon_droq
));
990 /*Disable the pkt o/p for this Q */
991 octeon_set_droq_pkt_op(oct
, q_no
, 0);
992 oct
->droq
[q_no
] = droq
;
994 /* Initialize the Droq */
995 if (octeon_init_droq(oct
, q_no
, num_descs
, desc_size
, app_ctx
)) {
996 vfree(oct
->droq
[q_no
]);
997 oct
->droq
[q_no
] = NULL
;
1003 dev_dbg(&oct
->pci_dev
->dev
, "%s: Total number of OQ: %d\n", __func__
,
1006 /* Global Droq register settings */
1008 /* As of now not required, as setting are done for all 32 Droqs at