1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2021 Google, Inc.
9 #include "gve_adminq.h"
10 #include "gve_utils.h"
12 #include <linux/ipv6.h>
13 #include <linux/skbuff.h>
14 #include <linux/slab.h>
15 #include <net/ip6_checksum.h>
19 static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo
*bs
)
21 return page_count(bs
->page_info
.page
) - bs
->page_info
.pagecnt_bias
;
24 static void gve_free_page_dqo(struct gve_priv
*priv
,
25 struct gve_rx_buf_state_dqo
*bs
)
27 page_ref_sub(bs
->page_info
.page
, bs
->page_info
.pagecnt_bias
- 1);
28 gve_free_page(&priv
->pdev
->dev
, bs
->page_info
.page
, bs
->addr
,
30 bs
->page_info
.page
= NULL
;
33 static struct gve_rx_buf_state_dqo
*gve_alloc_buf_state(struct gve_rx_ring
*rx
)
35 struct gve_rx_buf_state_dqo
*buf_state
;
38 buffer_id
= rx
->dqo
.free_buf_states
;
39 if (unlikely(buffer_id
== -1))
42 buf_state
= &rx
->dqo
.buf_states
[buffer_id
];
44 /* Remove buf_state from free list */
45 rx
->dqo
.free_buf_states
= buf_state
->next
;
47 /* Point buf_state to itself to mark it as allocated */
48 buf_state
->next
= buffer_id
;
53 static bool gve_buf_state_is_allocated(struct gve_rx_ring
*rx
,
54 struct gve_rx_buf_state_dqo
*buf_state
)
56 s16 buffer_id
= buf_state
- rx
->dqo
.buf_states
;
58 return buf_state
->next
== buffer_id
;
61 static void gve_free_buf_state(struct gve_rx_ring
*rx
,
62 struct gve_rx_buf_state_dqo
*buf_state
)
64 s16 buffer_id
= buf_state
- rx
->dqo
.buf_states
;
66 buf_state
->next
= rx
->dqo
.free_buf_states
;
67 rx
->dqo
.free_buf_states
= buffer_id
;
70 static struct gve_rx_buf_state_dqo
*
71 gve_dequeue_buf_state(struct gve_rx_ring
*rx
, struct gve_index_list
*list
)
73 struct gve_rx_buf_state_dqo
*buf_state
;
76 buffer_id
= list
->head
;
77 if (unlikely(buffer_id
== -1))
80 buf_state
= &rx
->dqo
.buf_states
[buffer_id
];
82 /* Remove buf_state from list */
83 list
->head
= buf_state
->next
;
84 if (buf_state
->next
== -1)
87 /* Point buf_state to itself to mark it as allocated */
88 buf_state
->next
= buffer_id
;
93 static void gve_enqueue_buf_state(struct gve_rx_ring
*rx
,
94 struct gve_index_list
*list
,
95 struct gve_rx_buf_state_dqo
*buf_state
)
97 s16 buffer_id
= buf_state
- rx
->dqo
.buf_states
;
101 if (list
->head
== -1) {
102 list
->head
= buffer_id
;
103 list
->tail
= buffer_id
;
105 int tail
= list
->tail
;
107 rx
->dqo
.buf_states
[tail
].next
= buffer_id
;
108 list
->tail
= buffer_id
;
112 static struct gve_rx_buf_state_dqo
*
113 gve_get_recycled_buf_state(struct gve_rx_ring
*rx
)
115 struct gve_rx_buf_state_dqo
*buf_state
;
118 /* Recycled buf states are immediately usable. */
119 buf_state
= gve_dequeue_buf_state(rx
, &rx
->dqo
.recycled_buf_states
);
120 if (likely(buf_state
))
123 if (unlikely(rx
->dqo
.used_buf_states
.head
== -1))
126 /* Used buf states are only usable when ref count reaches 0, which means
127 * no SKBs refer to them.
129 * Search a limited number before giving up.
131 for (i
= 0; i
< 5; i
++) {
132 buf_state
= gve_dequeue_buf_state(rx
, &rx
->dqo
.used_buf_states
);
133 if (gve_buf_ref_cnt(buf_state
) == 0)
136 gve_enqueue_buf_state(rx
, &rx
->dqo
.used_buf_states
, buf_state
);
139 /* If there are no free buf states discard an entry from
140 * `used_buf_states` so it can be used.
142 if (unlikely(rx
->dqo
.free_buf_states
== -1)) {
143 buf_state
= gve_dequeue_buf_state(rx
, &rx
->dqo
.used_buf_states
);
144 if (gve_buf_ref_cnt(buf_state
) == 0)
147 gve_free_page_dqo(rx
->gve
, buf_state
);
148 gve_free_buf_state(rx
, buf_state
);
154 static int gve_alloc_page_dqo(struct gve_priv
*priv
,
155 struct gve_rx_buf_state_dqo
*buf_state
)
159 err
= gve_alloc_page(priv
, &priv
->pdev
->dev
, &buf_state
->page_info
.page
,
160 &buf_state
->addr
, DMA_FROM_DEVICE
, GFP_KERNEL
);
164 buf_state
->page_info
.page_offset
= 0;
165 buf_state
->page_info
.page_address
=
166 page_address(buf_state
->page_info
.page
);
167 buf_state
->last_single_ref_offset
= 0;
169 /* The page already has 1 ref. */
170 page_ref_add(buf_state
->page_info
.page
, INT_MAX
- 1);
171 buf_state
->page_info
.pagecnt_bias
= INT_MAX
;
176 static void gve_rx_free_ring_dqo(struct gve_priv
*priv
, int idx
)
178 struct gve_rx_ring
*rx
= &priv
->rx
[idx
];
179 struct device
*hdev
= &priv
->pdev
->dev
;
180 size_t completion_queue_slots
;
181 size_t buffer_queue_slots
;
185 completion_queue_slots
= rx
->dqo
.complq
.mask
+ 1;
186 buffer_queue_slots
= rx
->dqo
.bufq
.mask
+ 1;
188 gve_rx_remove_from_block(priv
, idx
);
190 if (rx
->q_resources
) {
191 dma_free_coherent(hdev
, sizeof(*rx
->q_resources
),
192 rx
->q_resources
, rx
->q_resources_bus
);
193 rx
->q_resources
= NULL
;
196 for (i
= 0; i
< rx
->dqo
.num_buf_states
; i
++) {
197 struct gve_rx_buf_state_dqo
*bs
= &rx
->dqo
.buf_states
[i
];
199 if (bs
->page_info
.page
)
200 gve_free_page_dqo(priv
, bs
);
203 if (rx
->dqo
.bufq
.desc_ring
) {
204 size
= sizeof(rx
->dqo
.bufq
.desc_ring
[0]) * buffer_queue_slots
;
205 dma_free_coherent(hdev
, size
, rx
->dqo
.bufq
.desc_ring
,
207 rx
->dqo
.bufq
.desc_ring
= NULL
;
210 if (rx
->dqo
.complq
.desc_ring
) {
211 size
= sizeof(rx
->dqo
.complq
.desc_ring
[0]) *
212 completion_queue_slots
;
213 dma_free_coherent(hdev
, size
, rx
->dqo
.complq
.desc_ring
,
215 rx
->dqo
.complq
.desc_ring
= NULL
;
218 kvfree(rx
->dqo
.buf_states
);
219 rx
->dqo
.buf_states
= NULL
;
221 netif_dbg(priv
, drv
, priv
->dev
, "freed rx ring %d\n", idx
);
224 static int gve_rx_alloc_ring_dqo(struct gve_priv
*priv
, int idx
)
226 struct gve_rx_ring
*rx
= &priv
->rx
[idx
];
227 struct device
*hdev
= &priv
->pdev
->dev
;
231 const u32 buffer_queue_slots
=
232 priv
->options_dqo_rda
.rx_buff_ring_entries
;
233 const u32 completion_queue_slots
= priv
->rx_desc_cnt
;
235 netif_dbg(priv
, drv
, priv
->dev
, "allocating rx ring DQO\n");
237 memset(rx
, 0, sizeof(*rx
));
240 rx
->dqo
.bufq
.mask
= buffer_queue_slots
- 1;
241 rx
->dqo
.complq
.num_free_slots
= completion_queue_slots
;
242 rx
->dqo
.complq
.mask
= completion_queue_slots
- 1;
246 rx
->dqo
.num_buf_states
= min_t(s16
, S16_MAX
, buffer_queue_slots
* 4);
247 rx
->dqo
.buf_states
= kvcalloc(rx
->dqo
.num_buf_states
,
248 sizeof(rx
->dqo
.buf_states
[0]),
250 if (!rx
->dqo
.buf_states
)
253 /* Set up linked list of buffer IDs */
254 for (i
= 0; i
< rx
->dqo
.num_buf_states
- 1; i
++)
255 rx
->dqo
.buf_states
[i
].next
= i
+ 1;
257 rx
->dqo
.buf_states
[rx
->dqo
.num_buf_states
- 1].next
= -1;
258 rx
->dqo
.recycled_buf_states
.head
= -1;
259 rx
->dqo
.recycled_buf_states
.tail
= -1;
260 rx
->dqo
.used_buf_states
.head
= -1;
261 rx
->dqo
.used_buf_states
.tail
= -1;
263 /* Allocate RX completion queue */
264 size
= sizeof(rx
->dqo
.complq
.desc_ring
[0]) *
265 completion_queue_slots
;
266 rx
->dqo
.complq
.desc_ring
=
267 dma_alloc_coherent(hdev
, size
, &rx
->dqo
.complq
.bus
, GFP_KERNEL
);
268 if (!rx
->dqo
.complq
.desc_ring
)
271 /* Allocate RX buffer queue */
272 size
= sizeof(rx
->dqo
.bufq
.desc_ring
[0]) * buffer_queue_slots
;
273 rx
->dqo
.bufq
.desc_ring
=
274 dma_alloc_coherent(hdev
, size
, &rx
->dqo
.bufq
.bus
, GFP_KERNEL
);
275 if (!rx
->dqo
.bufq
.desc_ring
)
278 rx
->q_resources
= dma_alloc_coherent(hdev
, sizeof(*rx
->q_resources
),
279 &rx
->q_resources_bus
, GFP_KERNEL
);
280 if (!rx
->q_resources
)
283 gve_rx_add_to_block(priv
, idx
);
288 gve_rx_free_ring_dqo(priv
, idx
);
292 void gve_rx_write_doorbell_dqo(const struct gve_priv
*priv
, int queue_idx
)
294 const struct gve_rx_ring
*rx
= &priv
->rx
[queue_idx
];
295 u64 index
= be32_to_cpu(rx
->q_resources
->db_index
);
297 iowrite32(rx
->dqo
.bufq
.tail
, &priv
->db_bar2
[index
]);
300 int gve_rx_alloc_rings_dqo(struct gve_priv
*priv
)
305 for (i
= 0; i
< priv
->rx_cfg
.num_queues
; i
++) {
306 err
= gve_rx_alloc_ring_dqo(priv
, i
);
308 netif_err(priv
, drv
, priv
->dev
,
309 "Failed to alloc rx ring=%d: err=%d\n",
318 for (i
--; i
>= 0; i
--)
319 gve_rx_free_ring_dqo(priv
, i
);
324 void gve_rx_free_rings_dqo(struct gve_priv
*priv
)
328 for (i
= 0; i
< priv
->rx_cfg
.num_queues
; i
++)
329 gve_rx_free_ring_dqo(priv
, i
);
332 void gve_rx_post_buffers_dqo(struct gve_rx_ring
*rx
)
334 struct gve_rx_compl_queue_dqo
*complq
= &rx
->dqo
.complq
;
335 struct gve_rx_buf_queue_dqo
*bufq
= &rx
->dqo
.bufq
;
336 struct gve_priv
*priv
= rx
->gve
;
341 num_full_slots
= (bufq
->tail
- bufq
->head
) & bufq
->mask
;
342 num_avail_slots
= bufq
->mask
- num_full_slots
;
344 num_avail_slots
= min_t(u32
, num_avail_slots
, complq
->num_free_slots
);
345 while (num_posted
< num_avail_slots
) {
346 struct gve_rx_desc_dqo
*desc
= &bufq
->desc_ring
[bufq
->tail
];
347 struct gve_rx_buf_state_dqo
*buf_state
;
349 buf_state
= gve_get_recycled_buf_state(rx
);
350 if (unlikely(!buf_state
)) {
351 buf_state
= gve_alloc_buf_state(rx
);
352 if (unlikely(!buf_state
))
355 if (unlikely(gve_alloc_page_dqo(priv
, buf_state
))) {
356 u64_stats_update_begin(&rx
->statss
);
357 rx
->rx_buf_alloc_fail
++;
358 u64_stats_update_end(&rx
->statss
);
359 gve_free_buf_state(rx
, buf_state
);
364 desc
->buf_id
= cpu_to_le16(buf_state
- rx
->dqo
.buf_states
);
365 desc
->buf_addr
= cpu_to_le64(buf_state
->addr
+
366 buf_state
->page_info
.page_offset
);
368 bufq
->tail
= (bufq
->tail
+ 1) & bufq
->mask
;
369 complq
->num_free_slots
--;
372 if ((bufq
->tail
& (GVE_RX_BUF_THRESH_DQO
- 1)) == 0)
373 gve_rx_write_doorbell_dqo(priv
, rx
->q_num
);
376 rx
->fill_cnt
+= num_posted
;
379 static void gve_try_recycle_buf(struct gve_priv
*priv
, struct gve_rx_ring
*rx
,
380 struct gve_rx_buf_state_dqo
*buf_state
)
382 const int data_buffer_size
= priv
->data_buffer_size_dqo
;
385 /* Can't reuse if we only fit one buffer per page */
386 if (data_buffer_size
* 2 > PAGE_SIZE
)
389 pagecount
= gve_buf_ref_cnt(buf_state
);
391 /* Record the offset when we have a single remaining reference.
393 * When this happens, we know all of the other offsets of the page are
396 if (pagecount
== 1) {
397 buf_state
->last_single_ref_offset
=
398 buf_state
->page_info
.page_offset
;
401 /* Use the next buffer sized chunk in the page. */
402 buf_state
->page_info
.page_offset
+= data_buffer_size
;
403 buf_state
->page_info
.page_offset
&= (PAGE_SIZE
- 1);
405 /* If we wrap around to the same offset without ever dropping to 1
406 * reference, then we don't know if this offset was ever freed.
408 if (buf_state
->page_info
.page_offset
==
409 buf_state
->last_single_ref_offset
) {
413 gve_enqueue_buf_state(rx
, &rx
->dqo
.recycled_buf_states
, buf_state
);
417 gve_enqueue_buf_state(rx
, &rx
->dqo
.used_buf_states
, buf_state
);
420 static void gve_rx_skb_csum(struct sk_buff
*skb
,
421 const struct gve_rx_compl_desc_dqo
*desc
,
422 struct gve_ptype ptype
)
424 skb
->ip_summed
= CHECKSUM_NONE
;
426 /* HW did not identify and process L3 and L4 headers. */
427 if (unlikely(!desc
->l3_l4_processed
))
430 if (ptype
.l3_type
== GVE_L3_TYPE_IPV4
) {
431 if (unlikely(desc
->csum_ip_err
|| desc
->csum_external_ip_err
))
433 } else if (ptype
.l3_type
== GVE_L3_TYPE_IPV6
) {
434 /* Checksum should be skipped if this flag is set. */
435 if (unlikely(desc
->ipv6_ex_add
))
439 if (unlikely(desc
->csum_l4_err
))
442 switch (ptype
.l4_type
) {
443 case GVE_L4_TYPE_TCP
:
444 case GVE_L4_TYPE_UDP
:
445 case GVE_L4_TYPE_ICMP
:
446 case GVE_L4_TYPE_SCTP
:
447 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
454 static void gve_rx_skb_hash(struct sk_buff
*skb
,
455 const struct gve_rx_compl_desc_dqo
*compl_desc
,
456 struct gve_ptype ptype
)
458 enum pkt_hash_types hash_type
= PKT_HASH_TYPE_L2
;
460 if (ptype
.l4_type
!= GVE_L4_TYPE_UNKNOWN
)
461 hash_type
= PKT_HASH_TYPE_L4
;
462 else if (ptype
.l3_type
!= GVE_L3_TYPE_UNKNOWN
)
463 hash_type
= PKT_HASH_TYPE_L3
;
465 skb_set_hash(skb
, le32_to_cpu(compl_desc
->hash
), hash_type
);
468 static void gve_rx_free_skb(struct gve_rx_ring
*rx
)
473 dev_kfree_skb_any(rx
->skb_head
);
478 /* Chains multi skbs for single rx packet.
479 * Returns 0 if buffer is appended, -1 otherwise.
481 static int gve_rx_append_frags(struct napi_struct
*napi
,
482 struct gve_rx_buf_state_dqo
*buf_state
,
483 u16 buf_len
, struct gve_rx_ring
*rx
,
484 struct gve_priv
*priv
)
486 int num_frags
= skb_shinfo(rx
->skb_tail
)->nr_frags
;
488 if (unlikely(num_frags
== MAX_SKB_FRAGS
)) {
491 skb
= napi_alloc_skb(napi
, 0);
495 skb_shinfo(rx
->skb_tail
)->frag_list
= skb
;
499 if (rx
->skb_tail
!= rx
->skb_head
) {
500 rx
->skb_head
->len
+= buf_len
;
501 rx
->skb_head
->data_len
+= buf_len
;
502 rx
->skb_head
->truesize
+= priv
->data_buffer_size_dqo
;
505 skb_add_rx_frag(rx
->skb_tail
, num_frags
,
506 buf_state
->page_info
.page
,
507 buf_state
->page_info
.page_offset
,
508 buf_len
, priv
->data_buffer_size_dqo
);
509 gve_dec_pagecnt_bias(&buf_state
->page_info
);
514 /* Returns 0 if descriptor is completed successfully.
515 * Returns -EINVAL if descriptor is invalid.
516 * Returns -ENOMEM if data cannot be copied to skb.
518 static int gve_rx_dqo(struct napi_struct
*napi
, struct gve_rx_ring
*rx
,
519 const struct gve_rx_compl_desc_dqo
*compl_desc
,
522 const u16 buffer_id
= le16_to_cpu(compl_desc
->buf_id
);
523 const bool eop
= compl_desc
->end_of_packet
!= 0;
524 struct gve_rx_buf_state_dqo
*buf_state
;
525 struct gve_priv
*priv
= rx
->gve
;
528 if (unlikely(buffer_id
>= rx
->dqo
.num_buf_states
)) {
529 net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
530 priv
->dev
->name
, buffer_id
);
533 buf_state
= &rx
->dqo
.buf_states
[buffer_id
];
534 if (unlikely(!gve_buf_state_is_allocated(rx
, buf_state
))) {
535 net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n",
536 priv
->dev
->name
, buffer_id
);
540 if (unlikely(compl_desc
->rx_error
)) {
541 gve_enqueue_buf_state(rx
, &rx
->dqo
.recycled_buf_states
,
546 buf_len
= compl_desc
->packet_len
;
548 /* Page might have not been used for awhile and was likely last written
549 * by a different thread.
551 prefetch(buf_state
->page_info
.page
);
553 /* Sync the portion of dma buffer for CPU to read. */
554 dma_sync_single_range_for_cpu(&priv
->pdev
->dev
, buf_state
->addr
,
555 buf_state
->page_info
.page_offset
,
556 buf_len
, DMA_FROM_DEVICE
);
558 /* Append to current skb if one exists. */
560 if (unlikely(gve_rx_append_frags(napi
, buf_state
, buf_len
, rx
,
565 gve_try_recycle_buf(priv
, rx
, buf_state
);
569 if (eop
&& buf_len
<= priv
->rx_copybreak
) {
570 rx
->skb_head
= gve_rx_copy(priv
->dev
, napi
,
571 &buf_state
->page_info
, buf_len
, 0);
572 if (unlikely(!rx
->skb_head
))
574 rx
->skb_tail
= rx
->skb_head
;
576 u64_stats_update_begin(&rx
->statss
);
578 rx
->rx_copybreak_pkt
++;
579 u64_stats_update_end(&rx
->statss
);
581 gve_enqueue_buf_state(rx
, &rx
->dqo
.recycled_buf_states
,
586 rx
->skb_head
= napi_get_frags(napi
);
587 if (unlikely(!rx
->skb_head
))
589 rx
->skb_tail
= rx
->skb_head
;
591 skb_add_rx_frag(rx
->skb_head
, 0, buf_state
->page_info
.page
,
592 buf_state
->page_info
.page_offset
, buf_len
,
593 priv
->data_buffer_size_dqo
);
594 gve_dec_pagecnt_bias(&buf_state
->page_info
);
596 gve_try_recycle_buf(priv
, rx
, buf_state
);
600 gve_enqueue_buf_state(rx
, &rx
->dqo
.recycled_buf_states
, buf_state
);
604 static int gve_rx_complete_rsc(struct sk_buff
*skb
,
605 const struct gve_rx_compl_desc_dqo
*desc
,
606 struct gve_ptype ptype
)
608 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
610 /* Only TCP is supported right now. */
611 if (ptype
.l4_type
!= GVE_L4_TYPE_TCP
)
614 switch (ptype
.l3_type
) {
615 case GVE_L3_TYPE_IPV4
:
616 shinfo
->gso_type
= SKB_GSO_TCPV4
;
618 case GVE_L3_TYPE_IPV6
:
619 shinfo
->gso_type
= SKB_GSO_TCPV6
;
625 shinfo
->gso_size
= le16_to_cpu(desc
->rsc_seg_len
);
629 /* Returns 0 if skb is completed successfully, -1 otherwise. */
630 static int gve_rx_complete_skb(struct gve_rx_ring
*rx
, struct napi_struct
*napi
,
631 const struct gve_rx_compl_desc_dqo
*desc
,
632 netdev_features_t feat
)
634 struct gve_ptype ptype
=
635 rx
->gve
->ptype_lut_dqo
->ptypes
[desc
->packet_type
];
638 skb_record_rx_queue(rx
->skb_head
, rx
->q_num
);
640 if (feat
& NETIF_F_RXHASH
)
641 gve_rx_skb_hash(rx
->skb_head
, desc
, ptype
);
643 if (feat
& NETIF_F_RXCSUM
)
644 gve_rx_skb_csum(rx
->skb_head
, desc
, ptype
);
646 /* RSC packets must set gso_size otherwise the TCP stack will complain
647 * that packets are larger than MTU.
650 err
= gve_rx_complete_rsc(rx
->skb_head
, desc
, ptype
);
655 if (skb_headlen(rx
->skb_head
) == 0)
656 napi_gro_frags(napi
);
658 napi_gro_receive(napi
, rx
->skb_head
);
663 int gve_rx_poll_dqo(struct gve_notify_block
*block
, int budget
)
665 struct napi_struct
*napi
= &block
->napi
;
666 netdev_features_t feat
= napi
->dev
->features
;
668 struct gve_rx_ring
*rx
= block
->rx
;
669 struct gve_rx_compl_queue_dqo
*complq
= &rx
->dqo
.complq
;
675 while (work_done
< budget
) {
676 struct gve_rx_compl_desc_dqo
*compl_desc
=
677 &complq
->desc_ring
[complq
->head
];
680 /* No more new packets */
681 if (compl_desc
->generation
== complq
->cur_gen_bit
)
684 /* Prefetch the next two descriptors. */
685 prefetch(&complq
->desc_ring
[(complq
->head
+ 1) & complq
->mask
]);
686 prefetch(&complq
->desc_ring
[(complq
->head
+ 2) & complq
->mask
]);
688 /* Do not read data until we own the descriptor */
691 err
= gve_rx_dqo(napi
, rx
, compl_desc
, rx
->q_num
);
694 u64_stats_update_begin(&rx
->statss
);
696 rx
->rx_skb_alloc_fail
++;
697 else if (err
== -EINVAL
)
698 rx
->rx_desc_err_dropped_pkt
++;
699 u64_stats_update_end(&rx
->statss
);
702 complq
->head
= (complq
->head
+ 1) & complq
->mask
;
703 complq
->num_free_slots
++;
705 /* When the ring wraps, the generation bit is flipped. */
706 complq
->cur_gen_bit
^= (complq
->head
== 0);
708 /* Receiving a completion means we have space to post another
709 * buffer on the buffer queue.
712 struct gve_rx_buf_queue_dqo
*bufq
= &rx
->dqo
.bufq
;
714 bufq
->head
= (bufq
->head
+ 1) & bufq
->mask
;
717 /* Free running counter of completed descriptors */
723 if (!compl_desc
->end_of_packet
)
727 pkt_bytes
= rx
->skb_head
->len
;
728 /* The ethernet header (first ETH_HLEN bytes) is snipped off
731 if (skb_headlen(rx
->skb_head
))
732 pkt_bytes
+= ETH_HLEN
;
734 /* gve_rx_complete_skb() will consume skb if successful */
735 if (gve_rx_complete_skb(rx
, napi
, compl_desc
, feat
) != 0) {
737 u64_stats_update_begin(&rx
->statss
);
738 rx
->rx_desc_err_dropped_pkt
++;
739 u64_stats_update_end(&rx
->statss
);
748 gve_rx_post_buffers_dqo(rx
);
750 u64_stats_update_begin(&rx
->statss
);
751 rx
->rpackets
+= work_done
;
753 u64_stats_update_end(&rx
->statss
);