1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
7 #include <linux/vmalloc.h>
9 /* ENETC overhead: optional extension BD + 1 BD gap */
10 #define ENETC_TXBDS_NEEDED(val) ((val) + 2)
11 /* max # of chained Tx BDs is 15, including head and extension BD */
12 #define ENETC_MAX_SKB_FRAGS 13
13 #define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
15 static int enetc_map_tx_buffs(struct enetc_bdr
*tx_ring
, struct sk_buff
*skb
,
18 netdev_tx_t
enetc_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
20 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
21 struct enetc_bdr
*tx_ring
;
24 tx_ring
= priv
->tx_ring
[skb
->queue_mapping
];
26 if (unlikely(skb_shinfo(skb
)->nr_frags
> ENETC_MAX_SKB_FRAGS
))
27 if (unlikely(skb_linearize(skb
)))
30 count
= skb_shinfo(skb
)->nr_frags
+ 1; /* fragments + head */
31 if (enetc_bd_unused(tx_ring
) < ENETC_TXBDS_NEEDED(count
)) {
32 netif_stop_subqueue(ndev
, tx_ring
->index
);
33 return NETDEV_TX_BUSY
;
37 count
= enetc_map_tx_buffs(tx_ring
, skb
, priv
->active_offloads
);
43 if (enetc_bd_unused(tx_ring
) < ENETC_TXBDS_MAX_NEEDED
)
44 netif_stop_subqueue(ndev
, tx_ring
->index
);
49 dev_kfree_skb_any(skb
);
53 static void enetc_unmap_tx_buff(struct enetc_bdr
*tx_ring
,
54 struct enetc_tx_swbd
*tx_swbd
)
56 if (tx_swbd
->is_dma_page
)
57 dma_unmap_page(tx_ring
->dev
, tx_swbd
->dma
,
58 tx_swbd
->len
, DMA_TO_DEVICE
);
60 dma_unmap_single(tx_ring
->dev
, tx_swbd
->dma
,
61 tx_swbd
->len
, DMA_TO_DEVICE
);
65 static void enetc_free_tx_skb(struct enetc_bdr
*tx_ring
,
66 struct enetc_tx_swbd
*tx_swbd
)
69 enetc_unmap_tx_buff(tx_ring
, tx_swbd
);
72 dev_kfree_skb_any(tx_swbd
->skb
);
77 static int enetc_map_tx_buffs(struct enetc_bdr
*tx_ring
, struct sk_buff
*skb
,
80 struct enetc_tx_swbd
*tx_swbd
;
82 int len
= skb_headlen(skb
);
83 union enetc_tx_bd temp_bd
;
84 union enetc_tx_bd
*txbd
;
85 bool do_vlan
, do_tstamp
;
91 i
= tx_ring
->next_to_use
;
92 txbd
= ENETC_TXBD(*tx_ring
, i
);
95 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
96 if (unlikely(dma_mapping_error(tx_ring
->dev
, dma
)))
99 temp_bd
.addr
= cpu_to_le64(dma
);
100 temp_bd
.buf_len
= cpu_to_le16(len
);
103 tx_swbd
= &tx_ring
->tx_swbd
[i
];
106 tx_swbd
->is_dma_page
= 0;
109 do_vlan
= skb_vlan_tag_present(skb
);
110 do_tstamp
= (active_offloads
& ENETC_F_TX_TSTAMP
) &&
111 (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
);
112 tx_swbd
->do_tstamp
= do_tstamp
;
113 tx_swbd
->check_wb
= tx_swbd
->do_tstamp
;
115 if (do_vlan
|| do_tstamp
)
116 flags
|= ENETC_TXBD_FLAGS_EX
;
118 if (tx_ring
->tsd_enable
)
119 flags
|= ENETC_TXBD_FLAGS_TSE
| ENETC_TXBD_FLAGS_TXSTART
;
121 /* first BD needs frm_len and offload flags set */
122 temp_bd
.frm_len
= cpu_to_le16(skb
->len
);
123 temp_bd
.flags
= flags
;
125 if (flags
& ENETC_TXBD_FLAGS_TSE
)
126 temp_bd
.txstart
= enetc_txbd_set_tx_start(skb
->skb_mstamp_ns
,
129 if (flags
& ENETC_TXBD_FLAGS_EX
) {
132 enetc_clear_tx_bd(&temp_bd
);
134 /* add extension BD for VLAN and/or timestamping */
139 if (unlikely(i
== tx_ring
->bd_count
)) {
141 tx_swbd
= tx_ring
->tx_swbd
;
142 txbd
= ENETC_TXBD(*tx_ring
, 0);
147 temp_bd
.ext
.vid
= cpu_to_le16(skb_vlan_tag_get(skb
));
148 temp_bd
.ext
.tpid
= 0; /* < C-TAG */
149 e_flags
|= ENETC_TXBD_E_FLAGS_VLAN_INS
;
153 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
154 e_flags
|= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP
;
157 temp_bd
.ext
.e_flags
= e_flags
;
161 frag
= &skb_shinfo(skb
)->frags
[0];
162 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++, frag
++) {
163 len
= skb_frag_size(frag
);
164 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, len
,
166 if (dma_mapping_error(tx_ring
->dev
, dma
))
170 enetc_clear_tx_bd(&temp_bd
);
176 if (unlikely(i
== tx_ring
->bd_count
)) {
178 tx_swbd
= tx_ring
->tx_swbd
;
179 txbd
= ENETC_TXBD(*tx_ring
, 0);
183 temp_bd
.addr
= cpu_to_le64(dma
);
184 temp_bd
.buf_len
= cpu_to_le16(len
);
188 tx_swbd
->is_dma_page
= 1;
192 /* last BD needs 'F' bit set */
193 flags
|= ENETC_TXBD_FLAGS_F
;
194 temp_bd
.flags
= flags
;
197 tx_ring
->tx_swbd
[i
].skb
= skb
;
199 enetc_bdr_idx_inc(tx_ring
, &i
);
200 tx_ring
->next_to_use
= i
;
202 skb_tx_timestamp(skb
);
204 /* let H/W know BD ring has been updated */
205 enetc_wr_reg_hot(tx_ring
->tpir
, i
); /* includes wmb() */
210 dev_err(tx_ring
->dev
, "DMA map error");
213 tx_swbd
= &tx_ring
->tx_swbd
[i
];
214 enetc_free_tx_skb(tx_ring
, tx_swbd
);
216 i
= tx_ring
->bd_count
;
223 static irqreturn_t
enetc_msix(int irq
, void *data
)
225 struct enetc_int_vector
*v
= data
;
230 /* disable interrupts */
231 enetc_wr_reg_hot(v
->rbier
, 0);
232 enetc_wr_reg_hot(v
->ricr1
, v
->rx_ictt
);
234 for_each_set_bit(i
, &v
->tx_rings_map
, ENETC_MAX_NUM_TXQS
)
235 enetc_wr_reg_hot(v
->tbier_base
+ ENETC_BDR_OFF(i
), 0);
239 napi_schedule(&v
->napi
);
244 static bool enetc_clean_tx_ring(struct enetc_bdr
*tx_ring
, int napi_budget
);
245 static int enetc_clean_rx_ring(struct enetc_bdr
*rx_ring
,
246 struct napi_struct
*napi
, int work_limit
);
248 static void enetc_rx_dim_work(struct work_struct
*w
)
250 struct dim
*dim
= container_of(w
, struct dim
, work
);
251 struct dim_cq_moder moder
=
252 net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
253 struct enetc_int_vector
*v
=
254 container_of(dim
, struct enetc_int_vector
, rx_dim
);
256 v
->rx_ictt
= enetc_usecs_to_cycles(moder
.usec
);
257 dim
->state
= DIM_START_MEASURE
;
260 static void enetc_rx_net_dim(struct enetc_int_vector
*v
)
262 struct dim_sample dim_sample
;
266 if (!v
->rx_napi_work
)
269 dim_update_sample(v
->comp_cnt
,
270 v
->rx_ring
.stats
.packets
,
271 v
->rx_ring
.stats
.bytes
,
273 net_dim(&v
->rx_dim
, dim_sample
);
276 static int enetc_poll(struct napi_struct
*napi
, int budget
)
278 struct enetc_int_vector
279 *v
= container_of(napi
, struct enetc_int_vector
, napi
);
280 bool complete
= true;
286 for (i
= 0; i
< v
->count_tx_rings
; i
++)
287 if (!enetc_clean_tx_ring(&v
->tx_ring
[i
], budget
))
290 work_done
= enetc_clean_rx_ring(&v
->rx_ring
, napi
, budget
);
291 if (work_done
== budget
)
294 v
->rx_napi_work
= true;
301 napi_complete_done(napi
, work_done
);
303 if (likely(v
->rx_dim_en
))
306 v
->rx_napi_work
= false;
308 /* enable interrupts */
309 enetc_wr_reg_hot(v
->rbier
, ENETC_RBIER_RXTIE
);
311 for_each_set_bit(i
, &v
->tx_rings_map
, ENETC_MAX_NUM_TXQS
)
312 enetc_wr_reg_hot(v
->tbier_base
+ ENETC_BDR_OFF(i
),
320 static int enetc_bd_ready_count(struct enetc_bdr
*tx_ring
, int ci
)
322 int pi
= enetc_rd_reg_hot(tx_ring
->tcir
) & ENETC_TBCIR_IDX_MASK
;
324 return pi
>= ci
? pi
- ci
: tx_ring
->bd_count
- ci
+ pi
;
327 static void enetc_get_tx_tstamp(struct enetc_hw
*hw
, union enetc_tx_bd
*txbd
,
330 u32 lo
, hi
, tstamp_lo
;
332 lo
= enetc_rd_hot(hw
, ENETC_SICTR0
);
333 hi
= enetc_rd_hot(hw
, ENETC_SICTR1
);
334 tstamp_lo
= le32_to_cpu(txbd
->wb
.tstamp
);
337 *tstamp
= (u64
)hi
<< 32 | tstamp_lo
;
340 static void enetc_tstamp_tx(struct sk_buff
*skb
, u64 tstamp
)
342 struct skb_shared_hwtstamps shhwtstamps
;
344 if (skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) {
345 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
346 shhwtstamps
.hwtstamp
= ns_to_ktime(tstamp
);
347 skb_tstamp_tx(skb
, &shhwtstamps
);
351 static bool enetc_clean_tx_ring(struct enetc_bdr
*tx_ring
, int napi_budget
)
353 struct net_device
*ndev
= tx_ring
->ndev
;
354 int tx_frm_cnt
= 0, tx_byte_cnt
= 0;
355 struct enetc_tx_swbd
*tx_swbd
;
360 i
= tx_ring
->next_to_clean
;
361 tx_swbd
= &tx_ring
->tx_swbd
[i
];
363 bds_to_clean
= enetc_bd_ready_count(tx_ring
, i
);
367 while (bds_to_clean
&& tx_frm_cnt
< ENETC_DEFAULT_TX_WORK
) {
368 bool is_eof
= !!tx_swbd
->skb
;
370 if (unlikely(tx_swbd
->check_wb
)) {
371 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
372 union enetc_tx_bd
*txbd
;
374 txbd
= ENETC_TXBD(*tx_ring
, i
);
376 if (txbd
->flags
& ENETC_TXBD_FLAGS_W
&&
377 tx_swbd
->do_tstamp
) {
378 enetc_get_tx_tstamp(&priv
->si
->hw
, txbd
,
384 if (likely(tx_swbd
->dma
))
385 enetc_unmap_tx_buff(tx_ring
, tx_swbd
);
388 if (unlikely(do_tstamp
)) {
389 enetc_tstamp_tx(tx_swbd
->skb
, tstamp
);
392 napi_consume_skb(tx_swbd
->skb
, napi_budget
);
396 tx_byte_cnt
+= tx_swbd
->len
;
401 if (unlikely(i
== tx_ring
->bd_count
)) {
403 tx_swbd
= tx_ring
->tx_swbd
;
406 /* BD iteration loop end */
409 /* re-arm interrupt source */
410 enetc_wr_reg_hot(tx_ring
->idr
, BIT(tx_ring
->index
) |
411 BIT(16 + tx_ring
->index
));
414 if (unlikely(!bds_to_clean
))
415 bds_to_clean
= enetc_bd_ready_count(tx_ring
, i
);
418 tx_ring
->next_to_clean
= i
;
419 tx_ring
->stats
.packets
+= tx_frm_cnt
;
420 tx_ring
->stats
.bytes
+= tx_byte_cnt
;
422 if (unlikely(tx_frm_cnt
&& netif_carrier_ok(ndev
) &&
423 __netif_subqueue_stopped(ndev
, tx_ring
->index
) &&
424 (enetc_bd_unused(tx_ring
) >= ENETC_TXBDS_MAX_NEEDED
))) {
425 netif_wake_subqueue(ndev
, tx_ring
->index
);
428 return tx_frm_cnt
!= ENETC_DEFAULT_TX_WORK
;
431 static bool enetc_new_page(struct enetc_bdr
*rx_ring
,
432 struct enetc_rx_swbd
*rx_swbd
)
437 page
= dev_alloc_page();
441 addr
= dma_map_page(rx_ring
->dev
, page
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
442 if (unlikely(dma_mapping_error(rx_ring
->dev
, addr
))) {
449 rx_swbd
->page
= page
;
450 rx_swbd
->page_offset
= ENETC_RXB_PAD
;
455 static int enetc_refill_rx_ring(struct enetc_bdr
*rx_ring
, const int buff_cnt
)
457 struct enetc_rx_swbd
*rx_swbd
;
458 union enetc_rx_bd
*rxbd
;
461 i
= rx_ring
->next_to_use
;
462 rx_swbd
= &rx_ring
->rx_swbd
[i
];
463 rxbd
= enetc_rxbd(rx_ring
, i
);
465 for (j
= 0; j
< buff_cnt
; j
++) {
467 if (unlikely(!rx_swbd
->page
)) {
468 if (unlikely(!enetc_new_page(rx_ring
, rx_swbd
))) {
469 rx_ring
->stats
.rx_alloc_errs
++;
475 rxbd
->w
.addr
= cpu_to_le64(rx_swbd
->dma
+
476 rx_swbd
->page_offset
);
477 /* clear 'R" as well */
480 rxbd
= enetc_rxbd_next(rx_ring
, rxbd
, i
);
483 if (unlikely(i
== rx_ring
->bd_count
)) {
485 rx_swbd
= rx_ring
->rx_swbd
;
490 rx_ring
->next_to_alloc
= i
; /* keep track from page reuse */
491 rx_ring
->next_to_use
= i
;
497 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
498 static void enetc_get_rx_tstamp(struct net_device
*ndev
,
499 union enetc_rx_bd
*rxbd
,
502 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
503 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
504 struct enetc_hw
*hw
= &priv
->si
->hw
;
505 u32 lo
, hi
, tstamp_lo
;
508 if (le16_to_cpu(rxbd
->r
.flags
) & ENETC_RXBD_FLAG_TSTMP
) {
509 lo
= enetc_rd_reg_hot(hw
->reg
+ ENETC_SICTR0
);
510 hi
= enetc_rd_reg_hot(hw
->reg
+ ENETC_SICTR1
);
511 rxbd
= enetc_rxbd_ext(rxbd
);
512 tstamp_lo
= le32_to_cpu(rxbd
->ext
.tstamp
);
516 tstamp
= (u64
)hi
<< 32 | tstamp_lo
;
517 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
518 shhwtstamps
->hwtstamp
= ns_to_ktime(tstamp
);
523 static void enetc_get_offloads(struct enetc_bdr
*rx_ring
,
524 union enetc_rx_bd
*rxbd
, struct sk_buff
*skb
)
526 struct enetc_ndev_priv
*priv
= netdev_priv(rx_ring
->ndev
);
529 if (rx_ring
->ndev
->features
& NETIF_F_RXCSUM
) {
530 u16 inet_csum
= le16_to_cpu(rxbd
->r
.inet_csum
);
532 skb
->csum
= csum_unfold((__force __sum16
)~htons(inet_csum
));
533 skb
->ip_summed
= CHECKSUM_COMPLETE
;
536 if (le16_to_cpu(rxbd
->r
.flags
) & ENETC_RXBD_FLAG_VLAN
) {
539 switch (le16_to_cpu(rxbd
->r
.flags
) & ENETC_RXBD_FLAG_TPID
) {
541 tpid
= htons(ETH_P_8021Q
);
544 tpid
= htons(ETH_P_8021AD
);
547 tpid
= htons(enetc_port_rd(&priv
->si
->hw
,
551 tpid
= htons(enetc_port_rd(&priv
->si
->hw
,
558 __vlan_hwaccel_put_tag(skb
, tpid
, le16_to_cpu(rxbd
->r
.vlan_opt
));
561 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
562 if (priv
->active_offloads
& ENETC_F_RX_TSTAMP
)
563 enetc_get_rx_tstamp(rx_ring
->ndev
, rxbd
, skb
);
567 static void enetc_process_skb(struct enetc_bdr
*rx_ring
,
570 skb_record_rx_queue(skb
, rx_ring
->index
);
571 skb
->protocol
= eth_type_trans(skb
, rx_ring
->ndev
);
574 static bool enetc_page_reusable(struct page
*page
)
576 return (!page_is_pfmemalloc(page
) && page_ref_count(page
) == 1);
579 static void enetc_reuse_page(struct enetc_bdr
*rx_ring
,
580 struct enetc_rx_swbd
*old
)
582 struct enetc_rx_swbd
*new;
584 new = &rx_ring
->rx_swbd
[rx_ring
->next_to_alloc
];
586 /* next buf that may reuse a page */
587 enetc_bdr_idx_inc(rx_ring
, &rx_ring
->next_to_alloc
);
589 /* copy page reference */
593 static struct enetc_rx_swbd
*enetc_get_rx_buff(struct enetc_bdr
*rx_ring
,
596 struct enetc_rx_swbd
*rx_swbd
= &rx_ring
->rx_swbd
[i
];
598 dma_sync_single_range_for_cpu(rx_ring
->dev
, rx_swbd
->dma
,
599 rx_swbd
->page_offset
,
600 size
, DMA_FROM_DEVICE
);
604 static void enetc_put_rx_buff(struct enetc_bdr
*rx_ring
,
605 struct enetc_rx_swbd
*rx_swbd
)
607 if (likely(enetc_page_reusable(rx_swbd
->page
))) {
608 rx_swbd
->page_offset
^= ENETC_RXB_TRUESIZE
;
609 page_ref_inc(rx_swbd
->page
);
611 enetc_reuse_page(rx_ring
, rx_swbd
);
613 /* sync for use by the device */
614 dma_sync_single_range_for_device(rx_ring
->dev
, rx_swbd
->dma
,
615 rx_swbd
->page_offset
,
619 dma_unmap_page(rx_ring
->dev
, rx_swbd
->dma
,
620 PAGE_SIZE
, DMA_FROM_DEVICE
);
623 rx_swbd
->page
= NULL
;
626 static struct sk_buff
*enetc_map_rx_buff_to_skb(struct enetc_bdr
*rx_ring
,
629 struct enetc_rx_swbd
*rx_swbd
= enetc_get_rx_buff(rx_ring
, i
, size
);
633 ba
= page_address(rx_swbd
->page
) + rx_swbd
->page_offset
;
634 skb
= build_skb(ba
- ENETC_RXB_PAD
, ENETC_RXB_TRUESIZE
);
635 if (unlikely(!skb
)) {
636 rx_ring
->stats
.rx_alloc_errs
++;
640 skb_reserve(skb
, ENETC_RXB_PAD
);
641 __skb_put(skb
, size
);
643 enetc_put_rx_buff(rx_ring
, rx_swbd
);
648 static void enetc_add_rx_buff_to_skb(struct enetc_bdr
*rx_ring
, int i
,
649 u16 size
, struct sk_buff
*skb
)
651 struct enetc_rx_swbd
*rx_swbd
= enetc_get_rx_buff(rx_ring
, i
, size
);
653 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, rx_swbd
->page
,
654 rx_swbd
->page_offset
, size
, ENETC_RXB_TRUESIZE
);
656 enetc_put_rx_buff(rx_ring
, rx_swbd
);
659 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
661 static int enetc_clean_rx_ring(struct enetc_bdr
*rx_ring
,
662 struct napi_struct
*napi
, int work_limit
)
664 int rx_frm_cnt
= 0, rx_byte_cnt
= 0;
667 cleaned_cnt
= enetc_bd_unused(rx_ring
);
668 /* next descriptor to process */
669 i
= rx_ring
->next_to_clean
;
671 while (likely(rx_frm_cnt
< work_limit
)) {
672 union enetc_rx_bd
*rxbd
;
677 if (cleaned_cnt
>= ENETC_RXBD_BUNDLE
) {
678 int count
= enetc_refill_rx_ring(rx_ring
, cleaned_cnt
);
680 /* update ENETC's consumer index */
681 enetc_wr_reg_hot(rx_ring
->rcir
, rx_ring
->next_to_use
);
682 cleaned_cnt
-= count
;
685 rxbd
= enetc_rxbd(rx_ring
, i
);
686 bd_status
= le32_to_cpu(rxbd
->r
.lstatus
);
690 enetc_wr_reg_hot(rx_ring
->idr
, BIT(rx_ring
->index
));
691 dma_rmb(); /* for reading other rxbd fields */
692 size
= le16_to_cpu(rxbd
->r
.buf_len
);
693 skb
= enetc_map_rx_buff_to_skb(rx_ring
, i
, size
);
697 enetc_get_offloads(rx_ring
, rxbd
, skb
);
701 rxbd
= enetc_rxbd_next(rx_ring
, rxbd
, i
);
702 if (unlikely(++i
== rx_ring
->bd_count
))
705 if (unlikely(bd_status
&
706 ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK
))) {
708 while (!(bd_status
& ENETC_RXBD_LSTATUS_F
)) {
710 bd_status
= le32_to_cpu(rxbd
->r
.lstatus
);
712 rxbd
= enetc_rxbd_next(rx_ring
, rxbd
, i
);
713 if (unlikely(++i
== rx_ring
->bd_count
))
717 rx_ring
->ndev
->stats
.rx_dropped
++;
718 rx_ring
->ndev
->stats
.rx_errors
++;
723 /* not last BD in frame? */
724 while (!(bd_status
& ENETC_RXBD_LSTATUS_F
)) {
725 bd_status
= le32_to_cpu(rxbd
->r
.lstatus
);
726 size
= ENETC_RXB_DMA_SIZE
;
728 if (bd_status
& ENETC_RXBD_LSTATUS_F
) {
730 size
= le16_to_cpu(rxbd
->r
.buf_len
);
733 enetc_add_rx_buff_to_skb(rx_ring
, i
, size
, skb
);
737 rxbd
= enetc_rxbd_next(rx_ring
, rxbd
, i
);
738 if (unlikely(++i
== rx_ring
->bd_count
))
742 rx_byte_cnt
+= skb
->len
;
744 enetc_process_skb(rx_ring
, skb
);
746 napi_gro_receive(napi
, skb
);
751 rx_ring
->next_to_clean
= i
;
753 rx_ring
->stats
.packets
+= rx_frm_cnt
;
754 rx_ring
->stats
.bytes
+= rx_byte_cnt
;
759 /* Probing and Init */
760 #define ENETC_MAX_RFS_SIZE 64
761 void enetc_get_si_caps(struct enetc_si
*si
)
763 struct enetc_hw
*hw
= &si
->hw
;
766 /* find out how many of various resources we have to work with */
767 val
= enetc_rd(hw
, ENETC_SICAPR0
);
768 si
->num_rx_rings
= (val
>> 16) & 0xff;
769 si
->num_tx_rings
= val
& 0xff;
771 val
= enetc_rd(hw
, ENETC_SIRFSCAPR
);
772 si
->num_fs_entries
= ENETC_SIRFSCAPR_GET_NUM_RFS(val
);
773 si
->num_fs_entries
= min(si
->num_fs_entries
, ENETC_MAX_RFS_SIZE
);
776 val
= enetc_rd(hw
, ENETC_SIPCAPR0
);
777 if (val
& ENETC_SIPCAPR0_RSS
) {
780 rss
= enetc_rd(hw
, ENETC_SIRSSCAPR
);
781 si
->num_rss
= ENETC_SIRSSCAPR_GET_NUM_RSS(rss
);
784 if (val
& ENETC_SIPCAPR0_QBV
)
785 si
->hw_features
|= ENETC_SI_F_QBV
;
787 if (val
& ENETC_SIPCAPR0_PSFP
)
788 si
->hw_features
|= ENETC_SI_F_PSFP
;
791 static int enetc_dma_alloc_bdr(struct enetc_bdr
*r
, size_t bd_size
)
793 r
->bd_base
= dma_alloc_coherent(r
->dev
, r
->bd_count
* bd_size
,
794 &r
->bd_dma_base
, GFP_KERNEL
);
798 /* h/w requires 128B alignment */
799 if (!IS_ALIGNED(r
->bd_dma_base
, 128)) {
800 dma_free_coherent(r
->dev
, r
->bd_count
* bd_size
, r
->bd_base
,
808 static int enetc_alloc_txbdr(struct enetc_bdr
*txr
)
812 txr
->tx_swbd
= vzalloc(txr
->bd_count
* sizeof(struct enetc_tx_swbd
));
816 err
= enetc_dma_alloc_bdr(txr
, sizeof(union enetc_tx_bd
));
822 txr
->next_to_clean
= 0;
823 txr
->next_to_use
= 0;
828 static void enetc_free_txbdr(struct enetc_bdr
*txr
)
832 for (i
= 0; i
< txr
->bd_count
; i
++)
833 enetc_free_tx_skb(txr
, &txr
->tx_swbd
[i
]);
835 size
= txr
->bd_count
* sizeof(union enetc_tx_bd
);
837 dma_free_coherent(txr
->dev
, size
, txr
->bd_base
, txr
->bd_dma_base
);
844 static int enetc_alloc_tx_resources(struct enetc_ndev_priv
*priv
)
848 for (i
= 0; i
< priv
->num_tx_rings
; i
++) {
849 err
= enetc_alloc_txbdr(priv
->tx_ring
[i
]);
859 enetc_free_txbdr(priv
->tx_ring
[i
]);
864 static void enetc_free_tx_resources(struct enetc_ndev_priv
*priv
)
868 for (i
= 0; i
< priv
->num_tx_rings
; i
++)
869 enetc_free_txbdr(priv
->tx_ring
[i
]);
872 static int enetc_alloc_rxbdr(struct enetc_bdr
*rxr
, bool extended
)
874 size_t size
= sizeof(union enetc_rx_bd
);
877 rxr
->rx_swbd
= vzalloc(rxr
->bd_count
* sizeof(struct enetc_rx_swbd
));
884 err
= enetc_dma_alloc_bdr(rxr
, size
);
890 rxr
->next_to_clean
= 0;
891 rxr
->next_to_use
= 0;
892 rxr
->next_to_alloc
= 0;
893 rxr
->ext_en
= extended
;
898 static void enetc_free_rxbdr(struct enetc_bdr
*rxr
)
902 size
= rxr
->bd_count
* sizeof(union enetc_rx_bd
);
904 dma_free_coherent(rxr
->dev
, size
, rxr
->bd_base
, rxr
->bd_dma_base
);
911 static int enetc_alloc_rx_resources(struct enetc_ndev_priv
*priv
)
913 bool extended
= !!(priv
->active_offloads
& ENETC_F_RX_TSTAMP
);
916 for (i
= 0; i
< priv
->num_rx_rings
; i
++) {
917 err
= enetc_alloc_rxbdr(priv
->rx_ring
[i
], extended
);
927 enetc_free_rxbdr(priv
->rx_ring
[i
]);
932 static void enetc_free_rx_resources(struct enetc_ndev_priv
*priv
)
936 for (i
= 0; i
< priv
->num_rx_rings
; i
++)
937 enetc_free_rxbdr(priv
->rx_ring
[i
]);
940 static void enetc_free_tx_ring(struct enetc_bdr
*tx_ring
)
944 if (!tx_ring
->tx_swbd
)
947 for (i
= 0; i
< tx_ring
->bd_count
; i
++) {
948 struct enetc_tx_swbd
*tx_swbd
= &tx_ring
->tx_swbd
[i
];
950 enetc_free_tx_skb(tx_ring
, tx_swbd
);
953 tx_ring
->next_to_clean
= 0;
954 tx_ring
->next_to_use
= 0;
957 static void enetc_free_rx_ring(struct enetc_bdr
*rx_ring
)
961 if (!rx_ring
->rx_swbd
)
964 for (i
= 0; i
< rx_ring
->bd_count
; i
++) {
965 struct enetc_rx_swbd
*rx_swbd
= &rx_ring
->rx_swbd
[i
];
970 dma_unmap_page(rx_ring
->dev
, rx_swbd
->dma
,
971 PAGE_SIZE
, DMA_FROM_DEVICE
);
972 __free_page(rx_swbd
->page
);
973 rx_swbd
->page
= NULL
;
976 rx_ring
->next_to_clean
= 0;
977 rx_ring
->next_to_use
= 0;
978 rx_ring
->next_to_alloc
= 0;
981 static void enetc_free_rxtx_rings(struct enetc_ndev_priv
*priv
)
985 for (i
= 0; i
< priv
->num_rx_rings
; i
++)
986 enetc_free_rx_ring(priv
->rx_ring
[i
]);
988 for (i
= 0; i
< priv
->num_tx_rings
; i
++)
989 enetc_free_tx_ring(priv
->tx_ring
[i
]);
992 int enetc_alloc_cbdr(struct device
*dev
, struct enetc_cbdr
*cbdr
)
994 int size
= cbdr
->bd_count
* sizeof(struct enetc_cbd
);
996 cbdr
->bd_base
= dma_alloc_coherent(dev
, size
, &cbdr
->bd_dma_base
,
1001 /* h/w requires 128B alignment */
1002 if (!IS_ALIGNED(cbdr
->bd_dma_base
, 128)) {
1003 dma_free_coherent(dev
, size
, cbdr
->bd_base
, cbdr
->bd_dma_base
);
1007 cbdr
->next_to_clean
= 0;
1008 cbdr
->next_to_use
= 0;
1013 void enetc_free_cbdr(struct device
*dev
, struct enetc_cbdr
*cbdr
)
1015 int size
= cbdr
->bd_count
* sizeof(struct enetc_cbd
);
1017 dma_free_coherent(dev
, size
, cbdr
->bd_base
, cbdr
->bd_dma_base
);
1018 cbdr
->bd_base
= NULL
;
1021 void enetc_setup_cbdr(struct enetc_hw
*hw
, struct enetc_cbdr
*cbdr
)
1023 /* set CBDR cache attributes */
1024 enetc_wr(hw
, ENETC_SICAR2
,
1025 ENETC_SICAR_RD_COHERENT
| ENETC_SICAR_WR_COHERENT
);
1027 enetc_wr(hw
, ENETC_SICBDRBAR0
, lower_32_bits(cbdr
->bd_dma_base
));
1028 enetc_wr(hw
, ENETC_SICBDRBAR1
, upper_32_bits(cbdr
->bd_dma_base
));
1029 enetc_wr(hw
, ENETC_SICBDRLENR
, ENETC_RTBLENR_LEN(cbdr
->bd_count
));
1031 enetc_wr(hw
, ENETC_SICBDRPIR
, 0);
1032 enetc_wr(hw
, ENETC_SICBDRCIR
, 0);
1035 enetc_wr(hw
, ENETC_SICBDRMR
, BIT(31));
1037 cbdr
->pir
= hw
->reg
+ ENETC_SICBDRPIR
;
1038 cbdr
->cir
= hw
->reg
+ ENETC_SICBDRCIR
;
1041 void enetc_clear_cbdr(struct enetc_hw
*hw
)
1043 enetc_wr(hw
, ENETC_SICBDRMR
, 0);
1046 static int enetc_setup_default_rss_table(struct enetc_si
*si
, int num_groups
)
1051 rss_table
= kmalloc_array(si
->num_rss
, sizeof(*rss_table
), GFP_KERNEL
);
1055 /* Set up RSS table defaults */
1056 for (i
= 0; i
< si
->num_rss
; i
++)
1057 rss_table
[i
] = i
% num_groups
;
1059 enetc_set_rss_table(si
, rss_table
, si
->num_rss
);
1066 int enetc_configure_si(struct enetc_ndev_priv
*priv
)
1068 struct enetc_si
*si
= priv
->si
;
1069 struct enetc_hw
*hw
= &si
->hw
;
1072 /* set SI cache attributes */
1073 enetc_wr(hw
, ENETC_SICAR0
,
1074 ENETC_SICAR_RD_COHERENT
| ENETC_SICAR_WR_COHERENT
);
1075 enetc_wr(hw
, ENETC_SICAR1
, ENETC_SICAR_MSI
);
1077 enetc_wr(hw
, ENETC_SIMR
, ENETC_SIMR_EN
);
1080 err
= enetc_setup_default_rss_table(si
, priv
->num_rx_rings
);
1088 void enetc_init_si_rings_params(struct enetc_ndev_priv
*priv
)
1090 struct enetc_si
*si
= priv
->si
;
1091 int cpus
= num_online_cpus();
1093 priv
->tx_bd_count
= ENETC_TX_RING_DEFAULT_SIZE
;
1094 priv
->rx_bd_count
= ENETC_RX_RING_DEFAULT_SIZE
;
1096 /* Enable all available TX rings in order to configure as many
1097 * priorities as possible, when needed.
1098 * TODO: Make # of TX rings run-time configurable
1100 priv
->num_rx_rings
= min_t(int, cpus
, si
->num_rx_rings
);
1101 priv
->num_tx_rings
= si
->num_tx_rings
;
1102 priv
->bdr_int_num
= cpus
;
1103 priv
->ic_mode
= ENETC_IC_RX_ADAPTIVE
| ENETC_IC_TX_MANUAL
;
1104 priv
->tx_ictt
= ENETC_TXIC_TIMETHR
;
1107 si
->cbd_ring
.bd_count
= ENETC_CBDR_DEFAULT_SIZE
;
1110 int enetc_alloc_si_resources(struct enetc_ndev_priv
*priv
)
1112 struct enetc_si
*si
= priv
->si
;
1115 err
= enetc_alloc_cbdr(priv
->dev
, &si
->cbd_ring
);
1119 enetc_setup_cbdr(&si
->hw
, &si
->cbd_ring
);
1121 priv
->cls_rules
= kcalloc(si
->num_fs_entries
, sizeof(*priv
->cls_rules
),
1123 if (!priv
->cls_rules
) {
1131 enetc_clear_cbdr(&si
->hw
);
1132 enetc_free_cbdr(priv
->dev
, &si
->cbd_ring
);
1137 void enetc_free_si_resources(struct enetc_ndev_priv
*priv
)
1139 struct enetc_si
*si
= priv
->si
;
1141 enetc_clear_cbdr(&si
->hw
);
1142 enetc_free_cbdr(priv
->dev
, &si
->cbd_ring
);
1144 kfree(priv
->cls_rules
);
1147 static void enetc_setup_txbdr(struct enetc_hw
*hw
, struct enetc_bdr
*tx_ring
)
1149 int idx
= tx_ring
->index
;
1152 enetc_txbdr_wr(hw
, idx
, ENETC_TBBAR0
,
1153 lower_32_bits(tx_ring
->bd_dma_base
));
1155 enetc_txbdr_wr(hw
, idx
, ENETC_TBBAR1
,
1156 upper_32_bits(tx_ring
->bd_dma_base
));
1158 WARN_ON(!IS_ALIGNED(tx_ring
->bd_count
, 64)); /* multiple of 64 */
1159 enetc_txbdr_wr(hw
, idx
, ENETC_TBLENR
,
1160 ENETC_RTBLENR_LEN(tx_ring
->bd_count
));
1162 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1163 tx_ring
->next_to_use
= enetc_txbdr_rd(hw
, idx
, ENETC_TBPIR
);
1164 tx_ring
->next_to_clean
= enetc_txbdr_rd(hw
, idx
, ENETC_TBCIR
);
1166 /* enable Tx ints by setting pkt thr to 1 */
1167 enetc_txbdr_wr(hw
, idx
, ENETC_TBICR0
, ENETC_TBICR0_ICEN
| 0x1);
1169 tbmr
= ENETC_TBMR_EN
;
1170 if (tx_ring
->ndev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)
1171 tbmr
|= ENETC_TBMR_VIH
;
1174 enetc_txbdr_wr(hw
, idx
, ENETC_TBMR
, tbmr
);
1176 tx_ring
->tpir
= hw
->reg
+ ENETC_BDR(TX
, idx
, ENETC_TBPIR
);
1177 tx_ring
->tcir
= hw
->reg
+ ENETC_BDR(TX
, idx
, ENETC_TBCIR
);
1178 tx_ring
->idr
= hw
->reg
+ ENETC_SITXIDR
;
1181 static void enetc_setup_rxbdr(struct enetc_hw
*hw
, struct enetc_bdr
*rx_ring
)
1183 int idx
= rx_ring
->index
;
1186 enetc_rxbdr_wr(hw
, idx
, ENETC_RBBAR0
,
1187 lower_32_bits(rx_ring
->bd_dma_base
));
1189 enetc_rxbdr_wr(hw
, idx
, ENETC_RBBAR1
,
1190 upper_32_bits(rx_ring
->bd_dma_base
));
1192 WARN_ON(!IS_ALIGNED(rx_ring
->bd_count
, 64)); /* multiple of 64 */
1193 enetc_rxbdr_wr(hw
, idx
, ENETC_RBLENR
,
1194 ENETC_RTBLENR_LEN(rx_ring
->bd_count
));
1196 enetc_rxbdr_wr(hw
, idx
, ENETC_RBBSR
, ENETC_RXB_DMA_SIZE
);
1198 enetc_rxbdr_wr(hw
, idx
, ENETC_RBPIR
, 0);
1200 /* enable Rx ints by setting pkt thr to 1 */
1201 enetc_rxbdr_wr(hw
, idx
, ENETC_RBICR0
, ENETC_RBICR0_ICEN
| 0x1);
1203 rbmr
= ENETC_RBMR_EN
;
1205 if (rx_ring
->ext_en
)
1206 rbmr
|= ENETC_RBMR_BDS
;
1208 if (rx_ring
->ndev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
1209 rbmr
|= ENETC_RBMR_VTE
;
1211 rx_ring
->rcir
= hw
->reg
+ ENETC_BDR(RX
, idx
, ENETC_RBCIR
);
1212 rx_ring
->idr
= hw
->reg
+ ENETC_SIRXIDR
;
1214 enetc_refill_rx_ring(rx_ring
, enetc_bd_unused(rx_ring
));
1217 enetc_rxbdr_wr(hw
, idx
, ENETC_RBMR
, rbmr
);
1220 static void enetc_setup_bdrs(struct enetc_ndev_priv
*priv
)
1224 for (i
= 0; i
< priv
->num_tx_rings
; i
++)
1225 enetc_setup_txbdr(&priv
->si
->hw
, priv
->tx_ring
[i
]);
1227 for (i
= 0; i
< priv
->num_rx_rings
; i
++)
1228 enetc_setup_rxbdr(&priv
->si
->hw
, priv
->rx_ring
[i
]);
1231 static void enetc_clear_rxbdr(struct enetc_hw
*hw
, struct enetc_bdr
*rx_ring
)
1233 int idx
= rx_ring
->index
;
1235 /* disable EN bit on ring */
1236 enetc_rxbdr_wr(hw
, idx
, ENETC_RBMR
, 0);
1239 static void enetc_clear_txbdr(struct enetc_hw
*hw
, struct enetc_bdr
*tx_ring
)
1241 int delay
= 8, timeout
= 100;
1242 int idx
= tx_ring
->index
;
1244 /* disable EN bit on ring */
1245 enetc_txbdr_wr(hw
, idx
, ENETC_TBMR
, 0);
1247 /* wait for busy to clear */
1248 while (delay
< timeout
&&
1249 enetc_txbdr_rd(hw
, idx
, ENETC_TBSR
) & ENETC_TBSR_BUSY
) {
1254 if (delay
>= timeout
)
1255 netdev_warn(tx_ring
->ndev
, "timeout for tx ring #%d clear\n",
1259 static void enetc_clear_bdrs(struct enetc_ndev_priv
*priv
)
1263 for (i
= 0; i
< priv
->num_tx_rings
; i
++)
1264 enetc_clear_txbdr(&priv
->si
->hw
, priv
->tx_ring
[i
]);
1266 for (i
= 0; i
< priv
->num_rx_rings
; i
++)
1267 enetc_clear_rxbdr(&priv
->si
->hw
, priv
->rx_ring
[i
]);
1272 static int enetc_setup_irqs(struct enetc_ndev_priv
*priv
)
1274 struct pci_dev
*pdev
= priv
->si
->pdev
;
1278 for (i
= 0; i
< priv
->bdr_int_num
; i
++) {
1279 int irq
= pci_irq_vector(pdev
, ENETC_BDR_INT_BASE_IDX
+ i
);
1280 struct enetc_int_vector
*v
= priv
->int_vector
[i
];
1281 int entry
= ENETC_BDR_INT_BASE_IDX
+ i
;
1282 struct enetc_hw
*hw
= &priv
->si
->hw
;
1284 snprintf(v
->name
, sizeof(v
->name
), "%s-rxtx%d",
1285 priv
->ndev
->name
, i
);
1286 err
= request_irq(irq
, enetc_msix
, 0, v
->name
, v
);
1288 dev_err(priv
->dev
, "request_irq() failed!\n");
1293 v
->tbier_base
= hw
->reg
+ ENETC_BDR(TX
, 0, ENETC_TBIER
);
1294 v
->rbier
= hw
->reg
+ ENETC_BDR(RX
, i
, ENETC_RBIER
);
1295 v
->ricr1
= hw
->reg
+ ENETC_BDR(RX
, i
, ENETC_RBICR1
);
1297 enetc_wr(hw
, ENETC_SIMSIRRV(i
), entry
);
1299 for (j
= 0; j
< v
->count_tx_rings
; j
++) {
1300 int idx
= v
->tx_ring
[j
].index
;
1302 enetc_wr(hw
, ENETC_SIMSITRV(idx
), entry
);
1304 cpumask_clear(&cpu_mask
);
1305 cpumask_set_cpu(i
% num_online_cpus(), &cpu_mask
);
1306 irq_set_affinity_hint(irq
, &cpu_mask
);
1313 int irq
= pci_irq_vector(pdev
, ENETC_BDR_INT_BASE_IDX
+ i
);
1315 irq_set_affinity_hint(irq
, NULL
);
1316 free_irq(irq
, priv
->int_vector
[i
]);
1322 static void enetc_free_irqs(struct enetc_ndev_priv
*priv
)
1324 struct pci_dev
*pdev
= priv
->si
->pdev
;
1327 for (i
= 0; i
< priv
->bdr_int_num
; i
++) {
1328 int irq
= pci_irq_vector(pdev
, ENETC_BDR_INT_BASE_IDX
+ i
);
1330 irq_set_affinity_hint(irq
, NULL
);
1331 free_irq(irq
, priv
->int_vector
[i
]);
1335 static void enetc_setup_interrupts(struct enetc_ndev_priv
*priv
)
1337 struct enetc_hw
*hw
= &priv
->si
->hw
;
1341 /* enable Tx & Rx event indication */
1343 (ENETC_IC_RX_MANUAL
| ENETC_IC_RX_ADAPTIVE
)) {
1344 icpt
= ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR
);
1345 /* init to non-0 minimum, will be adjusted later */
1348 icpt
= 0x1; /* enable Rx ints by setting pkt thr to 1 */
1352 for (i
= 0; i
< priv
->num_rx_rings
; i
++) {
1353 enetc_rxbdr_wr(hw
, i
, ENETC_RBICR1
, ictt
);
1354 enetc_rxbdr_wr(hw
, i
, ENETC_RBICR0
, ENETC_RBICR0_ICEN
| icpt
);
1355 enetc_rxbdr_wr(hw
, i
, ENETC_RBIER
, ENETC_RBIER_RXTIE
);
1358 if (priv
->ic_mode
& ENETC_IC_TX_MANUAL
)
1359 icpt
= ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR
);
1361 icpt
= 0x1; /* enable Tx ints by setting pkt thr to 1 */
1363 for (i
= 0; i
< priv
->num_tx_rings
; i
++) {
1364 enetc_txbdr_wr(hw
, i
, ENETC_TBICR1
, priv
->tx_ictt
);
1365 enetc_txbdr_wr(hw
, i
, ENETC_TBICR0
, ENETC_TBICR0_ICEN
| icpt
);
1366 enetc_txbdr_wr(hw
, i
, ENETC_TBIER
, ENETC_TBIER_TXTIE
);
1370 static void enetc_clear_interrupts(struct enetc_ndev_priv
*priv
)
1374 for (i
= 0; i
< priv
->num_tx_rings
; i
++)
1375 enetc_txbdr_wr(&priv
->si
->hw
, i
, ENETC_TBIER
, 0);
1377 for (i
= 0; i
< priv
->num_rx_rings
; i
++)
1378 enetc_rxbdr_wr(&priv
->si
->hw
, i
, ENETC_RBIER
, 0);
1381 static int enetc_phylink_connect(struct net_device
*ndev
)
1383 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1384 struct ethtool_eee edata
;
1388 return 0; /* phy-less mode */
1390 err
= phylink_of_phy_connect(priv
->phylink
, priv
->dev
->of_node
, 0);
1392 dev_err(&ndev
->dev
, "could not attach to PHY\n");
1396 /* disable EEE autoneg, until ENETC driver supports it */
1397 memset(&edata
, 0, sizeof(struct ethtool_eee
));
1398 phylink_ethtool_set_eee(priv
->phylink
, &edata
);
1403 void enetc_start(struct net_device
*ndev
)
1405 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1408 enetc_setup_interrupts(priv
);
1410 for (i
= 0; i
< priv
->bdr_int_num
; i
++) {
1411 int irq
= pci_irq_vector(priv
->si
->pdev
,
1412 ENETC_BDR_INT_BASE_IDX
+ i
);
1414 napi_enable(&priv
->int_vector
[i
]->napi
);
1419 phylink_start(priv
->phylink
);
1421 netif_carrier_on(ndev
);
1423 netif_tx_start_all_queues(ndev
);
1426 int enetc_open(struct net_device
*ndev
)
1428 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1431 err
= enetc_setup_irqs(priv
);
1435 err
= enetc_phylink_connect(ndev
);
1437 goto err_phy_connect
;
1439 err
= enetc_alloc_tx_resources(priv
);
1443 err
= enetc_alloc_rx_resources(priv
);
1447 err
= netif_set_real_num_tx_queues(ndev
, priv
->num_tx_rings
);
1449 goto err_set_queues
;
1451 err
= netif_set_real_num_rx_queues(ndev
, priv
->num_rx_rings
);
1453 goto err_set_queues
;
1455 enetc_setup_bdrs(priv
);
1461 enetc_free_rx_resources(priv
);
1463 enetc_free_tx_resources(priv
);
1466 phylink_disconnect_phy(priv
->phylink
);
1468 enetc_free_irqs(priv
);
1473 void enetc_stop(struct net_device
*ndev
)
1475 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1478 netif_tx_stop_all_queues(ndev
);
1480 for (i
= 0; i
< priv
->bdr_int_num
; i
++) {
1481 int irq
= pci_irq_vector(priv
->si
->pdev
,
1482 ENETC_BDR_INT_BASE_IDX
+ i
);
1485 napi_synchronize(&priv
->int_vector
[i
]->napi
);
1486 napi_disable(&priv
->int_vector
[i
]->napi
);
1490 phylink_stop(priv
->phylink
);
1492 netif_carrier_off(ndev
);
1494 enetc_clear_interrupts(priv
);
1497 int enetc_close(struct net_device
*ndev
)
1499 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1502 enetc_clear_bdrs(priv
);
1505 phylink_disconnect_phy(priv
->phylink
);
1506 enetc_free_rxtx_rings(priv
);
1507 enetc_free_rx_resources(priv
);
1508 enetc_free_tx_resources(priv
);
1509 enetc_free_irqs(priv
);
1514 static int enetc_setup_tc_mqprio(struct net_device
*ndev
, void *type_data
)
1516 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1517 struct tc_mqprio_qopt
*mqprio
= type_data
;
1518 struct enetc_bdr
*tx_ring
;
1522 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
1523 num_tc
= mqprio
->num_tc
;
1526 netdev_reset_tc(ndev
);
1527 netif_set_real_num_tx_queues(ndev
, priv
->num_tx_rings
);
1529 /* Reset all ring priorities to 0 */
1530 for (i
= 0; i
< priv
->num_tx_rings
; i
++) {
1531 tx_ring
= priv
->tx_ring
[i
];
1532 enetc_set_bdr_prio(&priv
->si
->hw
, tx_ring
->index
, 0);
1538 /* Check if we have enough BD rings available to accommodate all TCs */
1539 if (num_tc
> priv
->num_tx_rings
) {
1540 netdev_err(ndev
, "Max %d traffic classes supported\n",
1541 priv
->num_tx_rings
);
1545 /* For the moment, we use only one BD ring per TC.
1547 * Configure num_tc BD rings with increasing priorities.
1549 for (i
= 0; i
< num_tc
; i
++) {
1550 tx_ring
= priv
->tx_ring
[i
];
1551 enetc_set_bdr_prio(&priv
->si
->hw
, tx_ring
->index
, i
);
1554 /* Reset the number of netdev queues based on the TC count */
1555 netif_set_real_num_tx_queues(ndev
, num_tc
);
1557 netdev_set_num_tc(ndev
, num_tc
);
1559 /* Each TC is associated with one netdev queue */
1560 for (i
= 0; i
< num_tc
; i
++)
1561 netdev_set_tc_queue(ndev
, i
, 1, i
);
1566 int enetc_setup_tc(struct net_device
*ndev
, enum tc_setup_type type
,
1570 case TC_SETUP_QDISC_MQPRIO
:
1571 return enetc_setup_tc_mqprio(ndev
, type_data
);
1572 case TC_SETUP_QDISC_TAPRIO
:
1573 return enetc_setup_tc_taprio(ndev
, type_data
);
1574 case TC_SETUP_QDISC_CBS
:
1575 return enetc_setup_tc_cbs(ndev
, type_data
);
1576 case TC_SETUP_QDISC_ETF
:
1577 return enetc_setup_tc_txtime(ndev
, type_data
);
1578 case TC_SETUP_BLOCK
:
1579 return enetc_setup_tc_psfp(ndev
, type_data
);
1585 struct net_device_stats
*enetc_get_stats(struct net_device
*ndev
)
1587 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1588 struct net_device_stats
*stats
= &ndev
->stats
;
1589 unsigned long packets
= 0, bytes
= 0;
1592 for (i
= 0; i
< priv
->num_rx_rings
; i
++) {
1593 packets
+= priv
->rx_ring
[i
]->stats
.packets
;
1594 bytes
+= priv
->rx_ring
[i
]->stats
.bytes
;
1597 stats
->rx_packets
= packets
;
1598 stats
->rx_bytes
= bytes
;
1602 for (i
= 0; i
< priv
->num_tx_rings
; i
++) {
1603 packets
+= priv
->tx_ring
[i
]->stats
.packets
;
1604 bytes
+= priv
->tx_ring
[i
]->stats
.bytes
;
1607 stats
->tx_packets
= packets
;
1608 stats
->tx_bytes
= bytes
;
1613 static int enetc_set_rss(struct net_device
*ndev
, int en
)
1615 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1616 struct enetc_hw
*hw
= &priv
->si
->hw
;
1619 enetc_wr(hw
, ENETC_SIRBGCR
, priv
->num_rx_rings
);
1621 reg
= enetc_rd(hw
, ENETC_SIMR
);
1622 reg
&= ~ENETC_SIMR_RSSE
;
1623 reg
|= (en
) ? ENETC_SIMR_RSSE
: 0;
1624 enetc_wr(hw
, ENETC_SIMR
, reg
);
1629 static int enetc_set_psfp(struct net_device
*ndev
, int en
)
1631 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1635 err
= enetc_psfp_enable(priv
);
1639 priv
->active_offloads
|= ENETC_F_QCI
;
1643 err
= enetc_psfp_disable(priv
);
1647 priv
->active_offloads
&= ~ENETC_F_QCI
;
1652 static void enetc_enable_rxvlan(struct net_device
*ndev
, bool en
)
1654 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1657 for (i
= 0; i
< priv
->num_rx_rings
; i
++)
1658 enetc_bdr_enable_rxvlan(&priv
->si
->hw
, i
, en
);
1661 static void enetc_enable_txvlan(struct net_device
*ndev
, bool en
)
1663 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1666 for (i
= 0; i
< priv
->num_tx_rings
; i
++)
1667 enetc_bdr_enable_txvlan(&priv
->si
->hw
, i
, en
);
1670 int enetc_set_features(struct net_device
*ndev
,
1671 netdev_features_t features
)
1673 netdev_features_t changed
= ndev
->features
^ features
;
1676 if (changed
& NETIF_F_RXHASH
)
1677 enetc_set_rss(ndev
, !!(features
& NETIF_F_RXHASH
));
1679 if (changed
& NETIF_F_HW_VLAN_CTAG_RX
)
1680 enetc_enable_rxvlan(ndev
,
1681 !!(features
& NETIF_F_HW_VLAN_CTAG_RX
));
1683 if (changed
& NETIF_F_HW_VLAN_CTAG_TX
)
1684 enetc_enable_txvlan(ndev
,
1685 !!(features
& NETIF_F_HW_VLAN_CTAG_TX
));
1687 if (changed
& NETIF_F_HW_TC
)
1688 err
= enetc_set_psfp(ndev
, !!(features
& NETIF_F_HW_TC
));
1693 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1694 static int enetc_hwtstamp_set(struct net_device
*ndev
, struct ifreq
*ifr
)
1696 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1697 struct hwtstamp_config config
;
1700 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
1703 switch (config
.tx_type
) {
1704 case HWTSTAMP_TX_OFF
:
1705 priv
->active_offloads
&= ~ENETC_F_TX_TSTAMP
;
1707 case HWTSTAMP_TX_ON
:
1708 priv
->active_offloads
|= ENETC_F_TX_TSTAMP
;
1714 ao
= priv
->active_offloads
;
1715 switch (config
.rx_filter
) {
1716 case HWTSTAMP_FILTER_NONE
:
1717 priv
->active_offloads
&= ~ENETC_F_RX_TSTAMP
;
1720 priv
->active_offloads
|= ENETC_F_RX_TSTAMP
;
1721 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1724 if (netif_running(ndev
) && ao
!= priv
->active_offloads
) {
1729 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
1733 static int enetc_hwtstamp_get(struct net_device
*ndev
, struct ifreq
*ifr
)
1735 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1736 struct hwtstamp_config config
;
1740 if (priv
->active_offloads
& ENETC_F_TX_TSTAMP
)
1741 config
.tx_type
= HWTSTAMP_TX_ON
;
1743 config
.tx_type
= HWTSTAMP_TX_OFF
;
1745 config
.rx_filter
= (priv
->active_offloads
& ENETC_F_RX_TSTAMP
) ?
1746 HWTSTAMP_FILTER_ALL
: HWTSTAMP_FILTER_NONE
;
1748 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
1753 int enetc_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
1755 struct enetc_ndev_priv
*priv
= netdev_priv(ndev
);
1756 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1757 if (cmd
== SIOCSHWTSTAMP
)
1758 return enetc_hwtstamp_set(ndev
, rq
);
1759 if (cmd
== SIOCGHWTSTAMP
)
1760 return enetc_hwtstamp_get(ndev
, rq
);
1766 return phylink_mii_ioctl(priv
->phylink
, rq
, cmd
);
1769 int enetc_alloc_msix(struct enetc_ndev_priv
*priv
)
1771 struct pci_dev
*pdev
= priv
->si
->pdev
;
1773 int i
, n
, err
, nvec
;
1775 nvec
= ENETC_BDR_INT_BASE_IDX
+ priv
->bdr_int_num
;
1776 /* allocate MSIX for both messaging and Rx/Tx interrupts */
1777 n
= pci_alloc_irq_vectors(pdev
, nvec
, nvec
, PCI_IRQ_MSIX
);
1785 /* # of tx rings per int vector */
1786 v_tx_rings
= priv
->num_tx_rings
/ priv
->bdr_int_num
;
1788 for (i
= 0; i
< priv
->bdr_int_num
; i
++) {
1789 struct enetc_int_vector
*v
;
1790 struct enetc_bdr
*bdr
;
1793 v
= kzalloc(struct_size(v
, tx_ring
, v_tx_rings
), GFP_KERNEL
);
1799 priv
->int_vector
[i
] = v
;
1801 /* init defaults for adaptive IC */
1802 if (priv
->ic_mode
& ENETC_IC_RX_ADAPTIVE
) {
1804 v
->rx_dim_en
= true;
1806 INIT_WORK(&v
->rx_dim
.work
, enetc_rx_dim_work
);
1807 netif_napi_add(priv
->ndev
, &v
->napi
, enetc_poll
,
1809 v
->count_tx_rings
= v_tx_rings
;
1811 for (j
= 0; j
< v_tx_rings
; j
++) {
1814 /* default tx ring mapping policy */
1815 if (priv
->bdr_int_num
== ENETC_MAX_BDR_INT
)
1816 idx
= 2 * j
+ i
; /* 2 CPUs */
1818 idx
= j
+ i
* v_tx_rings
; /* default */
1820 __set_bit(idx
, &v
->tx_rings_map
);
1821 bdr
= &v
->tx_ring
[j
];
1823 bdr
->ndev
= priv
->ndev
;
1824 bdr
->dev
= priv
->dev
;
1825 bdr
->bd_count
= priv
->tx_bd_count
;
1826 priv
->tx_ring
[idx
] = bdr
;
1831 bdr
->ndev
= priv
->ndev
;
1832 bdr
->dev
= priv
->dev
;
1833 bdr
->bd_count
= priv
->rx_bd_count
;
1834 priv
->rx_ring
[i
] = bdr
;
1841 netif_napi_del(&priv
->int_vector
[i
]->napi
);
1842 cancel_work_sync(&priv
->int_vector
[i
]->rx_dim
.work
);
1843 kfree(priv
->int_vector
[i
]);
1846 pci_free_irq_vectors(pdev
);
1851 void enetc_free_msix(struct enetc_ndev_priv
*priv
)
1855 for (i
= 0; i
< priv
->bdr_int_num
; i
++) {
1856 struct enetc_int_vector
*v
= priv
->int_vector
[i
];
1858 netif_napi_del(&v
->napi
);
1859 cancel_work_sync(&v
->rx_dim
.work
);
1862 for (i
= 0; i
< priv
->num_rx_rings
; i
++)
1863 priv
->rx_ring
[i
] = NULL
;
1865 for (i
= 0; i
< priv
->num_tx_rings
; i
++)
1866 priv
->tx_ring
[i
] = NULL
;
1868 for (i
= 0; i
< priv
->bdr_int_num
; i
++) {
1869 kfree(priv
->int_vector
[i
]);
1870 priv
->int_vector
[i
] = NULL
;
1873 /* disable all MSIX for this device */
1874 pci_free_irq_vectors(priv
->si
->pdev
);
1877 static void enetc_kfree_si(struct enetc_si
*si
)
1879 char *p
= (char *)si
- si
->pad
;
1884 static void enetc_detect_errata(struct enetc_si
*si
)
1886 if (si
->pdev
->revision
== ENETC_REV1
)
1887 si
->errata
= ENETC_ERR_VLAN_ISOL
| ENETC_ERR_UCMCSWP
;
1890 int enetc_pci_probe(struct pci_dev
*pdev
, const char *name
, int sizeof_priv
)
1892 struct enetc_si
*si
, *p
;
1893 struct enetc_hw
*hw
;
1898 err
= pci_enable_device_mem(pdev
);
1900 dev_err(&pdev
->dev
, "device enable failed\n");
1904 /* set up for high or low dma */
1905 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
1907 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1910 "DMA configuration failed: 0x%x\n", err
);
1915 err
= pci_request_mem_regions(pdev
, name
);
1917 dev_err(&pdev
->dev
, "pci_request_regions failed err=%d\n", err
);
1918 goto err_pci_mem_reg
;
1921 pci_set_master(pdev
);
1923 alloc_size
= sizeof(struct enetc_si
);
1925 /* align priv to 32B */
1926 alloc_size
= ALIGN(alloc_size
, ENETC_SI_ALIGN
);
1927 alloc_size
+= sizeof_priv
;
1929 /* force 32B alignment for enetc_si */
1930 alloc_size
+= ENETC_SI_ALIGN
- 1;
1932 p
= kzalloc(alloc_size
, GFP_KERNEL
);
1938 si
= PTR_ALIGN(p
, ENETC_SI_ALIGN
);
1939 si
->pad
= (char *)si
- (char *)p
;
1941 pci_set_drvdata(pdev
, si
);
1945 len
= pci_resource_len(pdev
, ENETC_BAR_REGS
);
1946 hw
->reg
= ioremap(pci_resource_start(pdev
, ENETC_BAR_REGS
), len
);
1949 dev_err(&pdev
->dev
, "ioremap() failed\n");
1952 if (len
> ENETC_PORT_BASE
)
1953 hw
->port
= hw
->reg
+ ENETC_PORT_BASE
;
1954 if (len
> ENETC_GLOBAL_BASE
)
1955 hw
->global
= hw
->reg
+ ENETC_GLOBAL_BASE
;
1957 enetc_detect_errata(si
);
1964 pci_release_mem_regions(pdev
);
1967 pci_disable_device(pdev
);
1972 void enetc_pci_remove(struct pci_dev
*pdev
)
1974 struct enetc_si
*si
= pci_get_drvdata(pdev
);
1975 struct enetc_hw
*hw
= &si
->hw
;
1979 pci_release_mem_regions(pdev
);
1980 pci_disable_device(pdev
);