]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/freescale/enetc/enetc.c
abb29ee8146374c12658e39e0b75d8fc343ef2e2
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / freescale / enetc / enetc.c
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3
4 #include "enetc.h"
5 #include <linux/tcp.h>
6 #include <linux/udp.h>
7 #include <linux/vmalloc.h>
8
9 /* ENETC overhead: optional extension BD + 1 BD gap */
10 #define ENETC_TXBDS_NEEDED(val) ((val) + 2)
11 /* max # of chained Tx BDs is 15, including head and extension BD */
12 #define ENETC_MAX_SKB_FRAGS 13
13 #define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
14
15 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
16 int active_offloads);
17
18 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
19 {
20 struct enetc_ndev_priv *priv = netdev_priv(ndev);
21 struct enetc_bdr *tx_ring;
22 int count;
23
24 tx_ring = priv->tx_ring[skb->queue_mapping];
25
26 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
27 if (unlikely(skb_linearize(skb)))
28 goto drop_packet_err;
29
30 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
31 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
32 netif_stop_subqueue(ndev, tx_ring->index);
33 return NETDEV_TX_BUSY;
34 }
35
36 enetc_lock_mdio();
37 count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
38 enetc_unlock_mdio();
39
40 if (unlikely(!count))
41 goto drop_packet_err;
42
43 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
44 netif_stop_subqueue(ndev, tx_ring->index);
45
46 return NETDEV_TX_OK;
47
48 drop_packet_err:
49 dev_kfree_skb_any(skb);
50 return NETDEV_TX_OK;
51 }
52
53 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
54 struct enetc_tx_swbd *tx_swbd)
55 {
56 if (tx_swbd->is_dma_page)
57 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
58 tx_swbd->len, DMA_TO_DEVICE);
59 else
60 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
61 tx_swbd->len, DMA_TO_DEVICE);
62 tx_swbd->dma = 0;
63 }
64
65 static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
66 struct enetc_tx_swbd *tx_swbd)
67 {
68 if (tx_swbd->dma)
69 enetc_unmap_tx_buff(tx_ring, tx_swbd);
70
71 if (tx_swbd->skb) {
72 dev_kfree_skb_any(tx_swbd->skb);
73 tx_swbd->skb = NULL;
74 }
75 }
76
77 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
78 int active_offloads)
79 {
80 struct enetc_tx_swbd *tx_swbd;
81 skb_frag_t *frag;
82 int len = skb_headlen(skb);
83 union enetc_tx_bd temp_bd;
84 union enetc_tx_bd *txbd;
85 bool do_vlan, do_tstamp;
86 int i, count = 0;
87 unsigned int f;
88 dma_addr_t dma;
89 u8 flags = 0;
90
91 i = tx_ring->next_to_use;
92 txbd = ENETC_TXBD(*tx_ring, i);
93 prefetchw(txbd);
94
95 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
96 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
97 goto dma_err;
98
99 temp_bd.addr = cpu_to_le64(dma);
100 temp_bd.buf_len = cpu_to_le16(len);
101 temp_bd.lstatus = 0;
102
103 tx_swbd = &tx_ring->tx_swbd[i];
104 tx_swbd->dma = dma;
105 tx_swbd->len = len;
106 tx_swbd->is_dma_page = 0;
107 count++;
108
109 do_vlan = skb_vlan_tag_present(skb);
110 do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
111 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
112 tx_swbd->do_tstamp = do_tstamp;
113 tx_swbd->check_wb = tx_swbd->do_tstamp;
114
115 if (do_vlan || do_tstamp)
116 flags |= ENETC_TXBD_FLAGS_EX;
117
118 if (tx_ring->tsd_enable)
119 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
120
121 /* first BD needs frm_len and offload flags set */
122 temp_bd.frm_len = cpu_to_le16(skb->len);
123 temp_bd.flags = flags;
124
125 if (flags & ENETC_TXBD_FLAGS_TSE)
126 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
127 flags);
128
129 if (flags & ENETC_TXBD_FLAGS_EX) {
130 u8 e_flags = 0;
131 *txbd = temp_bd;
132 enetc_clear_tx_bd(&temp_bd);
133
134 /* add extension BD for VLAN and/or timestamping */
135 flags = 0;
136 tx_swbd++;
137 txbd++;
138 i++;
139 if (unlikely(i == tx_ring->bd_count)) {
140 i = 0;
141 tx_swbd = tx_ring->tx_swbd;
142 txbd = ENETC_TXBD(*tx_ring, 0);
143 }
144 prefetchw(txbd);
145
146 if (do_vlan) {
147 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
148 temp_bd.ext.tpid = 0; /* < C-TAG */
149 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
150 }
151
152 if (do_tstamp) {
153 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
154 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
155 }
156
157 temp_bd.ext.e_flags = e_flags;
158 count++;
159 }
160
161 frag = &skb_shinfo(skb)->frags[0];
162 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
163 len = skb_frag_size(frag);
164 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
165 DMA_TO_DEVICE);
166 if (dma_mapping_error(tx_ring->dev, dma))
167 goto dma_err;
168
169 *txbd = temp_bd;
170 enetc_clear_tx_bd(&temp_bd);
171
172 flags = 0;
173 tx_swbd++;
174 txbd++;
175 i++;
176 if (unlikely(i == tx_ring->bd_count)) {
177 i = 0;
178 tx_swbd = tx_ring->tx_swbd;
179 txbd = ENETC_TXBD(*tx_ring, 0);
180 }
181 prefetchw(txbd);
182
183 temp_bd.addr = cpu_to_le64(dma);
184 temp_bd.buf_len = cpu_to_le16(len);
185
186 tx_swbd->dma = dma;
187 tx_swbd->len = len;
188 tx_swbd->is_dma_page = 1;
189 count++;
190 }
191
192 /* last BD needs 'F' bit set */
193 flags |= ENETC_TXBD_FLAGS_F;
194 temp_bd.flags = flags;
195 *txbd = temp_bd;
196
197 tx_ring->tx_swbd[i].skb = skb;
198
199 enetc_bdr_idx_inc(tx_ring, &i);
200 tx_ring->next_to_use = i;
201
202 skb_tx_timestamp(skb);
203
204 /* let H/W know BD ring has been updated */
205 enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
206
207 return count;
208
209 dma_err:
210 dev_err(tx_ring->dev, "DMA map error");
211
212 do {
213 tx_swbd = &tx_ring->tx_swbd[i];
214 enetc_free_tx_skb(tx_ring, tx_swbd);
215 if (i == 0)
216 i = tx_ring->bd_count;
217 i--;
218 } while (count--);
219
220 return 0;
221 }
222
223 static irqreturn_t enetc_msix(int irq, void *data)
224 {
225 struct enetc_int_vector *v = data;
226 int i;
227
228 enetc_lock_mdio();
229
230 /* disable interrupts */
231 enetc_wr_reg_hot(v->rbier, 0);
232 enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
233
234 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
235 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
236
237 enetc_unlock_mdio();
238
239 napi_schedule(&v->napi);
240
241 return IRQ_HANDLED;
242 }
243
244 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
245 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
246 struct napi_struct *napi, int work_limit);
247
248 static void enetc_rx_dim_work(struct work_struct *w)
249 {
250 struct dim *dim = container_of(w, struct dim, work);
251 struct dim_cq_moder moder =
252 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
253 struct enetc_int_vector *v =
254 container_of(dim, struct enetc_int_vector, rx_dim);
255
256 v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
257 dim->state = DIM_START_MEASURE;
258 }
259
260 static void enetc_rx_net_dim(struct enetc_int_vector *v)
261 {
262 struct dim_sample dim_sample;
263
264 v->comp_cnt++;
265
266 if (!v->rx_napi_work)
267 return;
268
269 dim_update_sample(v->comp_cnt,
270 v->rx_ring.stats.packets,
271 v->rx_ring.stats.bytes,
272 &dim_sample);
273 net_dim(&v->rx_dim, dim_sample);
274 }
275
276 static int enetc_poll(struct napi_struct *napi, int budget)
277 {
278 struct enetc_int_vector
279 *v = container_of(napi, struct enetc_int_vector, napi);
280 bool complete = true;
281 int work_done;
282 int i;
283
284 enetc_lock_mdio();
285
286 for (i = 0; i < v->count_tx_rings; i++)
287 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
288 complete = false;
289
290 work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
291 if (work_done == budget)
292 complete = false;
293 if (work_done)
294 v->rx_napi_work = true;
295
296 if (!complete) {
297 enetc_unlock_mdio();
298 return budget;
299 }
300
301 napi_complete_done(napi, work_done);
302
303 if (likely(v->rx_dim_en))
304 enetc_rx_net_dim(v);
305
306 v->rx_napi_work = false;
307
308 /* enable interrupts */
309 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
310
311 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
312 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
313 ENETC_TBIER_TXTIE);
314
315 enetc_unlock_mdio();
316
317 return work_done;
318 }
319
320 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
321 {
322 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
323
324 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
325 }
326
327 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
328 u64 *tstamp)
329 {
330 u32 lo, hi, tstamp_lo;
331
332 lo = enetc_rd_hot(hw, ENETC_SICTR0);
333 hi = enetc_rd_hot(hw, ENETC_SICTR1);
334 tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
335 if (lo <= tstamp_lo)
336 hi -= 1;
337 *tstamp = (u64)hi << 32 | tstamp_lo;
338 }
339
340 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
341 {
342 struct skb_shared_hwtstamps shhwtstamps;
343
344 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
345 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
346 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
347 skb_tstamp_tx(skb, &shhwtstamps);
348 }
349 }
350
351 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
352 {
353 struct net_device *ndev = tx_ring->ndev;
354 int tx_frm_cnt = 0, tx_byte_cnt = 0;
355 struct enetc_tx_swbd *tx_swbd;
356 int i, bds_to_clean;
357 bool do_tstamp;
358 u64 tstamp = 0;
359
360 i = tx_ring->next_to_clean;
361 tx_swbd = &tx_ring->tx_swbd[i];
362
363 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
364
365 do_tstamp = false;
366
367 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
368 bool is_eof = !!tx_swbd->skb;
369
370 if (unlikely(tx_swbd->check_wb)) {
371 struct enetc_ndev_priv *priv = netdev_priv(ndev);
372 union enetc_tx_bd *txbd;
373
374 txbd = ENETC_TXBD(*tx_ring, i);
375
376 if (txbd->flags & ENETC_TXBD_FLAGS_W &&
377 tx_swbd->do_tstamp) {
378 enetc_get_tx_tstamp(&priv->si->hw, txbd,
379 &tstamp);
380 do_tstamp = true;
381 }
382 }
383
384 if (likely(tx_swbd->dma))
385 enetc_unmap_tx_buff(tx_ring, tx_swbd);
386
387 if (is_eof) {
388 if (unlikely(do_tstamp)) {
389 enetc_tstamp_tx(tx_swbd->skb, tstamp);
390 do_tstamp = false;
391 }
392 napi_consume_skb(tx_swbd->skb, napi_budget);
393 tx_swbd->skb = NULL;
394 }
395
396 tx_byte_cnt += tx_swbd->len;
397
398 bds_to_clean--;
399 tx_swbd++;
400 i++;
401 if (unlikely(i == tx_ring->bd_count)) {
402 i = 0;
403 tx_swbd = tx_ring->tx_swbd;
404 }
405
406 /* BD iteration loop end */
407 if (is_eof) {
408 tx_frm_cnt++;
409 /* re-arm interrupt source */
410 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
411 BIT(16 + tx_ring->index));
412 }
413
414 if (unlikely(!bds_to_clean))
415 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
416 }
417
418 tx_ring->next_to_clean = i;
419 tx_ring->stats.packets += tx_frm_cnt;
420 tx_ring->stats.bytes += tx_byte_cnt;
421
422 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
423 __netif_subqueue_stopped(ndev, tx_ring->index) &&
424 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
425 netif_wake_subqueue(ndev, tx_ring->index);
426 }
427
428 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
429 }
430
431 static bool enetc_new_page(struct enetc_bdr *rx_ring,
432 struct enetc_rx_swbd *rx_swbd)
433 {
434 struct page *page;
435 dma_addr_t addr;
436
437 page = dev_alloc_page();
438 if (unlikely(!page))
439 return false;
440
441 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
442 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
443 __free_page(page);
444
445 return false;
446 }
447
448 rx_swbd->dma = addr;
449 rx_swbd->page = page;
450 rx_swbd->page_offset = ENETC_RXB_PAD;
451
452 return true;
453 }
454
455 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
456 {
457 struct enetc_rx_swbd *rx_swbd;
458 union enetc_rx_bd *rxbd;
459 int i, j;
460
461 i = rx_ring->next_to_use;
462 rx_swbd = &rx_ring->rx_swbd[i];
463 rxbd = enetc_rxbd(rx_ring, i);
464
465 for (j = 0; j < buff_cnt; j++) {
466 /* try reuse page */
467 if (unlikely(!rx_swbd->page)) {
468 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
469 rx_ring->stats.rx_alloc_errs++;
470 break;
471 }
472 }
473
474 /* update RxBD */
475 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
476 rx_swbd->page_offset);
477 /* clear 'R" as well */
478 rxbd->r.lstatus = 0;
479
480 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
481 rx_swbd++;
482 i++;
483 if (unlikely(i == rx_ring->bd_count)) {
484 i = 0;
485 rx_swbd = rx_ring->rx_swbd;
486 }
487 }
488
489 if (likely(j)) {
490 rx_ring->next_to_alloc = i; /* keep track from page reuse */
491 rx_ring->next_to_use = i;
492 }
493
494 return j;
495 }
496
497 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
498 static void enetc_get_rx_tstamp(struct net_device *ndev,
499 union enetc_rx_bd *rxbd,
500 struct sk_buff *skb)
501 {
502 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
503 struct enetc_ndev_priv *priv = netdev_priv(ndev);
504 struct enetc_hw *hw = &priv->si->hw;
505 u32 lo, hi, tstamp_lo;
506 u64 tstamp;
507
508 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
509 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
510 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
511 rxbd = enetc_rxbd_ext(rxbd);
512 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
513 if (lo <= tstamp_lo)
514 hi -= 1;
515
516 tstamp = (u64)hi << 32 | tstamp_lo;
517 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
518 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
519 }
520 }
521 #endif
522
523 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
524 union enetc_rx_bd *rxbd, struct sk_buff *skb)
525 {
526 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
527
528 /* TODO: hashing */
529 if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
530 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
531
532 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
533 skb->ip_summed = CHECKSUM_COMPLETE;
534 }
535
536 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
537 __be16 tpid = 0;
538
539 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
540 case 0:
541 tpid = htons(ETH_P_8021Q);
542 break;
543 case 1:
544 tpid = htons(ETH_P_8021AD);
545 break;
546 case 2:
547 tpid = htons(enetc_port_rd(&priv->si->hw,
548 ENETC_PCVLANR1));
549 break;
550 case 3:
551 tpid = htons(enetc_port_rd(&priv->si->hw,
552 ENETC_PCVLANR2));
553 break;
554 default:
555 break;
556 }
557
558 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
559 }
560
561 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
562 if (priv->active_offloads & ENETC_F_RX_TSTAMP)
563 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
564 #endif
565 }
566
567 static void enetc_process_skb(struct enetc_bdr *rx_ring,
568 struct sk_buff *skb)
569 {
570 skb_record_rx_queue(skb, rx_ring->index);
571 skb->protocol = eth_type_trans(skb, rx_ring->ndev);
572 }
573
574 static bool enetc_page_reusable(struct page *page)
575 {
576 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
577 }
578
579 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
580 struct enetc_rx_swbd *old)
581 {
582 struct enetc_rx_swbd *new;
583
584 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
585
586 /* next buf that may reuse a page */
587 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
588
589 /* copy page reference */
590 *new = *old;
591 }
592
593 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
594 int i, u16 size)
595 {
596 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
597
598 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
599 rx_swbd->page_offset,
600 size, DMA_FROM_DEVICE);
601 return rx_swbd;
602 }
603
604 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
605 struct enetc_rx_swbd *rx_swbd)
606 {
607 if (likely(enetc_page_reusable(rx_swbd->page))) {
608 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
609 page_ref_inc(rx_swbd->page);
610
611 enetc_reuse_page(rx_ring, rx_swbd);
612
613 /* sync for use by the device */
614 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
615 rx_swbd->page_offset,
616 ENETC_RXB_DMA_SIZE,
617 DMA_FROM_DEVICE);
618 } else {
619 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
620 PAGE_SIZE, DMA_FROM_DEVICE);
621 }
622
623 rx_swbd->page = NULL;
624 }
625
626 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
627 int i, u16 size)
628 {
629 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
630 struct sk_buff *skb;
631 void *ba;
632
633 ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
634 skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
635 if (unlikely(!skb)) {
636 rx_ring->stats.rx_alloc_errs++;
637 return NULL;
638 }
639
640 skb_reserve(skb, ENETC_RXB_PAD);
641 __skb_put(skb, size);
642
643 enetc_put_rx_buff(rx_ring, rx_swbd);
644
645 return skb;
646 }
647
648 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
649 u16 size, struct sk_buff *skb)
650 {
651 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
652
653 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
654 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
655
656 enetc_put_rx_buff(rx_ring, rx_swbd);
657 }
658
659 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
660
661 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
662 struct napi_struct *napi, int work_limit)
663 {
664 int rx_frm_cnt = 0, rx_byte_cnt = 0;
665 int cleaned_cnt, i;
666
667 cleaned_cnt = enetc_bd_unused(rx_ring);
668 /* next descriptor to process */
669 i = rx_ring->next_to_clean;
670
671 while (likely(rx_frm_cnt < work_limit)) {
672 union enetc_rx_bd *rxbd;
673 struct sk_buff *skb;
674 u32 bd_status;
675 u16 size;
676
677 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
678 int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
679
680 /* update ENETC's consumer index */
681 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
682 cleaned_cnt -= count;
683 }
684
685 rxbd = enetc_rxbd(rx_ring, i);
686 bd_status = le32_to_cpu(rxbd->r.lstatus);
687 if (!bd_status)
688 break;
689
690 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
691 dma_rmb(); /* for reading other rxbd fields */
692 size = le16_to_cpu(rxbd->r.buf_len);
693 skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
694 if (!skb)
695 break;
696
697 enetc_get_offloads(rx_ring, rxbd, skb);
698
699 cleaned_cnt++;
700
701 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
702 if (unlikely(++i == rx_ring->bd_count))
703 i = 0;
704
705 if (unlikely(bd_status &
706 ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
707 dev_kfree_skb(skb);
708 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
709 dma_rmb();
710 bd_status = le32_to_cpu(rxbd->r.lstatus);
711
712 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
713 if (unlikely(++i == rx_ring->bd_count))
714 i = 0;
715 }
716
717 rx_ring->ndev->stats.rx_dropped++;
718 rx_ring->ndev->stats.rx_errors++;
719
720 break;
721 }
722
723 /* not last BD in frame? */
724 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
725 bd_status = le32_to_cpu(rxbd->r.lstatus);
726 size = ENETC_RXB_DMA_SIZE;
727
728 if (bd_status & ENETC_RXBD_LSTATUS_F) {
729 dma_rmb();
730 size = le16_to_cpu(rxbd->r.buf_len);
731 }
732
733 enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
734
735 cleaned_cnt++;
736
737 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
738 if (unlikely(++i == rx_ring->bd_count))
739 i = 0;
740 }
741
742 rx_byte_cnt += skb->len;
743
744 enetc_process_skb(rx_ring, skb);
745
746 napi_gro_receive(napi, skb);
747
748 rx_frm_cnt++;
749 }
750
751 rx_ring->next_to_clean = i;
752
753 rx_ring->stats.packets += rx_frm_cnt;
754 rx_ring->stats.bytes += rx_byte_cnt;
755
756 return rx_frm_cnt;
757 }
758
759 /* Probing and Init */
760 #define ENETC_MAX_RFS_SIZE 64
761 void enetc_get_si_caps(struct enetc_si *si)
762 {
763 struct enetc_hw *hw = &si->hw;
764 u32 val;
765
766 /* find out how many of various resources we have to work with */
767 val = enetc_rd(hw, ENETC_SICAPR0);
768 si->num_rx_rings = (val >> 16) & 0xff;
769 si->num_tx_rings = val & 0xff;
770
771 val = enetc_rd(hw, ENETC_SIRFSCAPR);
772 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
773 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
774
775 si->num_rss = 0;
776 val = enetc_rd(hw, ENETC_SIPCAPR0);
777 if (val & ENETC_SIPCAPR0_RSS) {
778 u32 rss;
779
780 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
781 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
782 }
783
784 if (val & ENETC_SIPCAPR0_QBV)
785 si->hw_features |= ENETC_SI_F_QBV;
786
787 if (val & ENETC_SIPCAPR0_PSFP)
788 si->hw_features |= ENETC_SI_F_PSFP;
789 }
790
791 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
792 {
793 r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
794 &r->bd_dma_base, GFP_KERNEL);
795 if (!r->bd_base)
796 return -ENOMEM;
797
798 /* h/w requires 128B alignment */
799 if (!IS_ALIGNED(r->bd_dma_base, 128)) {
800 dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
801 r->bd_dma_base);
802 return -EINVAL;
803 }
804
805 return 0;
806 }
807
808 static int enetc_alloc_txbdr(struct enetc_bdr *txr)
809 {
810 int err;
811
812 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
813 if (!txr->tx_swbd)
814 return -ENOMEM;
815
816 err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
817 if (err) {
818 vfree(txr->tx_swbd);
819 return err;
820 }
821
822 txr->next_to_clean = 0;
823 txr->next_to_use = 0;
824
825 return 0;
826 }
827
828 static void enetc_free_txbdr(struct enetc_bdr *txr)
829 {
830 int size, i;
831
832 for (i = 0; i < txr->bd_count; i++)
833 enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
834
835 size = txr->bd_count * sizeof(union enetc_tx_bd);
836
837 dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
838 txr->bd_base = NULL;
839
840 vfree(txr->tx_swbd);
841 txr->tx_swbd = NULL;
842 }
843
844 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
845 {
846 int i, err;
847
848 for (i = 0; i < priv->num_tx_rings; i++) {
849 err = enetc_alloc_txbdr(priv->tx_ring[i]);
850
851 if (err)
852 goto fail;
853 }
854
855 return 0;
856
857 fail:
858 while (i-- > 0)
859 enetc_free_txbdr(priv->tx_ring[i]);
860
861 return err;
862 }
863
864 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
865 {
866 int i;
867
868 for (i = 0; i < priv->num_tx_rings; i++)
869 enetc_free_txbdr(priv->tx_ring[i]);
870 }
871
872 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
873 {
874 size_t size = sizeof(union enetc_rx_bd);
875 int err;
876
877 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
878 if (!rxr->rx_swbd)
879 return -ENOMEM;
880
881 if (extended)
882 size *= 2;
883
884 err = enetc_dma_alloc_bdr(rxr, size);
885 if (err) {
886 vfree(rxr->rx_swbd);
887 return err;
888 }
889
890 rxr->next_to_clean = 0;
891 rxr->next_to_use = 0;
892 rxr->next_to_alloc = 0;
893 rxr->ext_en = extended;
894
895 return 0;
896 }
897
898 static void enetc_free_rxbdr(struct enetc_bdr *rxr)
899 {
900 int size;
901
902 size = rxr->bd_count * sizeof(union enetc_rx_bd);
903
904 dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
905 rxr->bd_base = NULL;
906
907 vfree(rxr->rx_swbd);
908 rxr->rx_swbd = NULL;
909 }
910
911 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
912 {
913 bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
914 int i, err;
915
916 for (i = 0; i < priv->num_rx_rings; i++) {
917 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
918
919 if (err)
920 goto fail;
921 }
922
923 return 0;
924
925 fail:
926 while (i-- > 0)
927 enetc_free_rxbdr(priv->rx_ring[i]);
928
929 return err;
930 }
931
932 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
933 {
934 int i;
935
936 for (i = 0; i < priv->num_rx_rings; i++)
937 enetc_free_rxbdr(priv->rx_ring[i]);
938 }
939
940 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
941 {
942 int i;
943
944 if (!tx_ring->tx_swbd)
945 return;
946
947 for (i = 0; i < tx_ring->bd_count; i++) {
948 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
949
950 enetc_free_tx_skb(tx_ring, tx_swbd);
951 }
952
953 tx_ring->next_to_clean = 0;
954 tx_ring->next_to_use = 0;
955 }
956
957 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
958 {
959 int i;
960
961 if (!rx_ring->rx_swbd)
962 return;
963
964 for (i = 0; i < rx_ring->bd_count; i++) {
965 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
966
967 if (!rx_swbd->page)
968 continue;
969
970 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
971 PAGE_SIZE, DMA_FROM_DEVICE);
972 __free_page(rx_swbd->page);
973 rx_swbd->page = NULL;
974 }
975
976 rx_ring->next_to_clean = 0;
977 rx_ring->next_to_use = 0;
978 rx_ring->next_to_alloc = 0;
979 }
980
981 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
982 {
983 int i;
984
985 for (i = 0; i < priv->num_rx_rings; i++)
986 enetc_free_rx_ring(priv->rx_ring[i]);
987
988 for (i = 0; i < priv->num_tx_rings; i++)
989 enetc_free_tx_ring(priv->tx_ring[i]);
990 }
991
992 int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
993 {
994 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
995
996 cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
997 GFP_KERNEL);
998 if (!cbdr->bd_base)
999 return -ENOMEM;
1000
1001 /* h/w requires 128B alignment */
1002 if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
1003 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
1004 return -EINVAL;
1005 }
1006
1007 cbdr->next_to_clean = 0;
1008 cbdr->next_to_use = 0;
1009
1010 return 0;
1011 }
1012
1013 void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1014 {
1015 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1016
1017 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
1018 cbdr->bd_base = NULL;
1019 }
1020
1021 void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
1022 {
1023 /* set CBDR cache attributes */
1024 enetc_wr(hw, ENETC_SICAR2,
1025 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1026
1027 enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
1028 enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
1029 enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
1030
1031 enetc_wr(hw, ENETC_SICBDRPIR, 0);
1032 enetc_wr(hw, ENETC_SICBDRCIR, 0);
1033
1034 /* enable ring */
1035 enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
1036
1037 cbdr->pir = hw->reg + ENETC_SICBDRPIR;
1038 cbdr->cir = hw->reg + ENETC_SICBDRCIR;
1039 }
1040
1041 void enetc_clear_cbdr(struct enetc_hw *hw)
1042 {
1043 enetc_wr(hw, ENETC_SICBDRMR, 0);
1044 }
1045
1046 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
1047 {
1048 int *rss_table;
1049 int i;
1050
1051 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
1052 if (!rss_table)
1053 return -ENOMEM;
1054
1055 /* Set up RSS table defaults */
1056 for (i = 0; i < si->num_rss; i++)
1057 rss_table[i] = i % num_groups;
1058
1059 enetc_set_rss_table(si, rss_table, si->num_rss);
1060
1061 kfree(rss_table);
1062
1063 return 0;
1064 }
1065
1066 int enetc_configure_si(struct enetc_ndev_priv *priv)
1067 {
1068 struct enetc_si *si = priv->si;
1069 struct enetc_hw *hw = &si->hw;
1070 int err;
1071
1072 /* set SI cache attributes */
1073 enetc_wr(hw, ENETC_SICAR0,
1074 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1075 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
1076 /* enable SI */
1077 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
1078
1079 if (si->num_rss) {
1080 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
1081 if (err)
1082 return err;
1083 }
1084
1085 return 0;
1086 }
1087
1088 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
1089 {
1090 struct enetc_si *si = priv->si;
1091 int cpus = num_online_cpus();
1092
1093 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
1094 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
1095
1096 /* Enable all available TX rings in order to configure as many
1097 * priorities as possible, when needed.
1098 * TODO: Make # of TX rings run-time configurable
1099 */
1100 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
1101 priv->num_tx_rings = si->num_tx_rings;
1102 priv->bdr_int_num = cpus;
1103 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
1104 priv->tx_ictt = ENETC_TXIC_TIMETHR;
1105
1106 /* SI specific */
1107 si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
1108 }
1109
1110 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1111 {
1112 struct enetc_si *si = priv->si;
1113 int err;
1114
1115 err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
1116 if (err)
1117 return err;
1118
1119 enetc_setup_cbdr(&si->hw, &si->cbd_ring);
1120
1121 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
1122 GFP_KERNEL);
1123 if (!priv->cls_rules) {
1124 err = -ENOMEM;
1125 goto err_alloc_cls;
1126 }
1127
1128 return 0;
1129
1130 err_alloc_cls:
1131 enetc_clear_cbdr(&si->hw);
1132 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1133
1134 return err;
1135 }
1136
1137 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1138 {
1139 struct enetc_si *si = priv->si;
1140
1141 enetc_clear_cbdr(&si->hw);
1142 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1143
1144 kfree(priv->cls_rules);
1145 }
1146
1147 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1148 {
1149 int idx = tx_ring->index;
1150 u32 tbmr;
1151
1152 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1153 lower_32_bits(tx_ring->bd_dma_base));
1154
1155 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1156 upper_32_bits(tx_ring->bd_dma_base));
1157
1158 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
1159 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1160 ENETC_RTBLENR_LEN(tx_ring->bd_count));
1161
1162 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1163 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1164 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1165
1166 /* enable Tx ints by setting pkt thr to 1 */
1167 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
1168
1169 tbmr = ENETC_TBMR_EN;
1170 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1171 tbmr |= ENETC_TBMR_VIH;
1172
1173 /* enable ring */
1174 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1175
1176 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1177 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1178 tx_ring->idr = hw->reg + ENETC_SITXIDR;
1179 }
1180
1181 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1182 {
1183 int idx = rx_ring->index;
1184 u32 rbmr;
1185
1186 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1187 lower_32_bits(rx_ring->bd_dma_base));
1188
1189 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1190 upper_32_bits(rx_ring->bd_dma_base));
1191
1192 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1193 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1194 ENETC_RTBLENR_LEN(rx_ring->bd_count));
1195
1196 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1197
1198 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1199
1200 /* enable Rx ints by setting pkt thr to 1 */
1201 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
1202
1203 rbmr = ENETC_RBMR_EN;
1204
1205 if (rx_ring->ext_en)
1206 rbmr |= ENETC_RBMR_BDS;
1207
1208 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1209 rbmr |= ENETC_RBMR_VTE;
1210
1211 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1212 rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1213
1214 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1215
1216 /* enable ring */
1217 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1218 }
1219
1220 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1221 {
1222 int i;
1223
1224 for (i = 0; i < priv->num_tx_rings; i++)
1225 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1226
1227 for (i = 0; i < priv->num_rx_rings; i++)
1228 enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1229 }
1230
1231 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1232 {
1233 int idx = rx_ring->index;
1234
1235 /* disable EN bit on ring */
1236 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1237 }
1238
1239 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1240 {
1241 int delay = 8, timeout = 100;
1242 int idx = tx_ring->index;
1243
1244 /* disable EN bit on ring */
1245 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1246
1247 /* wait for busy to clear */
1248 while (delay < timeout &&
1249 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1250 msleep(delay);
1251 delay *= 2;
1252 }
1253
1254 if (delay >= timeout)
1255 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1256 idx);
1257 }
1258
1259 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1260 {
1261 int i;
1262
1263 for (i = 0; i < priv->num_tx_rings; i++)
1264 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1265
1266 for (i = 0; i < priv->num_rx_rings; i++)
1267 enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1268
1269 udelay(1);
1270 }
1271
1272 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1273 {
1274 struct pci_dev *pdev = priv->si->pdev;
1275 cpumask_t cpu_mask;
1276 int i, j, err;
1277
1278 for (i = 0; i < priv->bdr_int_num; i++) {
1279 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1280 struct enetc_int_vector *v = priv->int_vector[i];
1281 int entry = ENETC_BDR_INT_BASE_IDX + i;
1282 struct enetc_hw *hw = &priv->si->hw;
1283
1284 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1285 priv->ndev->name, i);
1286 err = request_irq(irq, enetc_msix, 0, v->name, v);
1287 if (err) {
1288 dev_err(priv->dev, "request_irq() failed!\n");
1289 goto irq_err;
1290 }
1291 disable_irq(irq);
1292
1293 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1294 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1295 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
1296
1297 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1298
1299 for (j = 0; j < v->count_tx_rings; j++) {
1300 int idx = v->tx_ring[j].index;
1301
1302 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1303 }
1304 cpumask_clear(&cpu_mask);
1305 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1306 irq_set_affinity_hint(irq, &cpu_mask);
1307 }
1308
1309 return 0;
1310
1311 irq_err:
1312 while (i--) {
1313 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1314
1315 irq_set_affinity_hint(irq, NULL);
1316 free_irq(irq, priv->int_vector[i]);
1317 }
1318
1319 return err;
1320 }
1321
1322 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1323 {
1324 struct pci_dev *pdev = priv->si->pdev;
1325 int i;
1326
1327 for (i = 0; i < priv->bdr_int_num; i++) {
1328 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1329
1330 irq_set_affinity_hint(irq, NULL);
1331 free_irq(irq, priv->int_vector[i]);
1332 }
1333 }
1334
1335 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
1336 {
1337 struct enetc_hw *hw = &priv->si->hw;
1338 u32 icpt, ictt;
1339 int i;
1340
1341 /* enable Tx & Rx event indication */
1342 if (priv->ic_mode &
1343 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
1344 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
1345 /* init to non-0 minimum, will be adjusted later */
1346 ictt = 0x1;
1347 } else {
1348 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
1349 ictt = 0;
1350 }
1351
1352 for (i = 0; i < priv->num_rx_rings; i++) {
1353 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
1354 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
1355 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1356 }
1357
1358 if (priv->ic_mode & ENETC_IC_TX_MANUAL)
1359 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
1360 else
1361 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
1362
1363 for (i = 0; i < priv->num_tx_rings; i++) {
1364 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
1365 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
1366 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
1367 }
1368 }
1369
1370 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
1371 {
1372 int i;
1373
1374 for (i = 0; i < priv->num_tx_rings; i++)
1375 enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1376
1377 for (i = 0; i < priv->num_rx_rings; i++)
1378 enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1379 }
1380
1381 static int enetc_phylink_connect(struct net_device *ndev)
1382 {
1383 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1384 struct ethtool_eee edata;
1385 int err;
1386
1387 if (!priv->phylink)
1388 return 0; /* phy-less mode */
1389
1390 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
1391 if (err) {
1392 dev_err(&ndev->dev, "could not attach to PHY\n");
1393 return err;
1394 }
1395
1396 /* disable EEE autoneg, until ENETC driver supports it */
1397 memset(&edata, 0, sizeof(struct ethtool_eee));
1398 phylink_ethtool_set_eee(priv->phylink, &edata);
1399
1400 return 0;
1401 }
1402
1403 void enetc_start(struct net_device *ndev)
1404 {
1405 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1406 int i;
1407
1408 enetc_setup_interrupts(priv);
1409
1410 for (i = 0; i < priv->bdr_int_num; i++) {
1411 int irq = pci_irq_vector(priv->si->pdev,
1412 ENETC_BDR_INT_BASE_IDX + i);
1413
1414 napi_enable(&priv->int_vector[i]->napi);
1415 enable_irq(irq);
1416 }
1417
1418 if (priv->phylink)
1419 phylink_start(priv->phylink);
1420 else
1421 netif_carrier_on(ndev);
1422
1423 netif_tx_start_all_queues(ndev);
1424 }
1425
1426 int enetc_open(struct net_device *ndev)
1427 {
1428 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1429 int err;
1430
1431 err = enetc_setup_irqs(priv);
1432 if (err)
1433 return err;
1434
1435 err = enetc_phylink_connect(ndev);
1436 if (err)
1437 goto err_phy_connect;
1438
1439 err = enetc_alloc_tx_resources(priv);
1440 if (err)
1441 goto err_alloc_tx;
1442
1443 err = enetc_alloc_rx_resources(priv);
1444 if (err)
1445 goto err_alloc_rx;
1446
1447 err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1448 if (err)
1449 goto err_set_queues;
1450
1451 err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
1452 if (err)
1453 goto err_set_queues;
1454
1455 enetc_setup_bdrs(priv);
1456 enetc_start(ndev);
1457
1458 return 0;
1459
1460 err_set_queues:
1461 enetc_free_rx_resources(priv);
1462 err_alloc_rx:
1463 enetc_free_tx_resources(priv);
1464 err_alloc_tx:
1465 if (priv->phylink)
1466 phylink_disconnect_phy(priv->phylink);
1467 err_phy_connect:
1468 enetc_free_irqs(priv);
1469
1470 return err;
1471 }
1472
1473 void enetc_stop(struct net_device *ndev)
1474 {
1475 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1476 int i;
1477
1478 netif_tx_stop_all_queues(ndev);
1479
1480 for (i = 0; i < priv->bdr_int_num; i++) {
1481 int irq = pci_irq_vector(priv->si->pdev,
1482 ENETC_BDR_INT_BASE_IDX + i);
1483
1484 disable_irq(irq);
1485 napi_synchronize(&priv->int_vector[i]->napi);
1486 napi_disable(&priv->int_vector[i]->napi);
1487 }
1488
1489 if (priv->phylink)
1490 phylink_stop(priv->phylink);
1491 else
1492 netif_carrier_off(ndev);
1493
1494 enetc_clear_interrupts(priv);
1495 }
1496
1497 int enetc_close(struct net_device *ndev)
1498 {
1499 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1500
1501 enetc_stop(ndev);
1502 enetc_clear_bdrs(priv);
1503
1504 if (priv->phylink)
1505 phylink_disconnect_phy(priv->phylink);
1506 enetc_free_rxtx_rings(priv);
1507 enetc_free_rx_resources(priv);
1508 enetc_free_tx_resources(priv);
1509 enetc_free_irqs(priv);
1510
1511 return 0;
1512 }
1513
1514 static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
1515 {
1516 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1517 struct tc_mqprio_qopt *mqprio = type_data;
1518 struct enetc_bdr *tx_ring;
1519 u8 num_tc;
1520 int i;
1521
1522 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1523 num_tc = mqprio->num_tc;
1524
1525 if (!num_tc) {
1526 netdev_reset_tc(ndev);
1527 netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1528
1529 /* Reset all ring priorities to 0 */
1530 for (i = 0; i < priv->num_tx_rings; i++) {
1531 tx_ring = priv->tx_ring[i];
1532 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
1533 }
1534
1535 return 0;
1536 }
1537
1538 /* Check if we have enough BD rings available to accommodate all TCs */
1539 if (num_tc > priv->num_tx_rings) {
1540 netdev_err(ndev, "Max %d traffic classes supported\n",
1541 priv->num_tx_rings);
1542 return -EINVAL;
1543 }
1544
1545 /* For the moment, we use only one BD ring per TC.
1546 *
1547 * Configure num_tc BD rings with increasing priorities.
1548 */
1549 for (i = 0; i < num_tc; i++) {
1550 tx_ring = priv->tx_ring[i];
1551 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
1552 }
1553
1554 /* Reset the number of netdev queues based on the TC count */
1555 netif_set_real_num_tx_queues(ndev, num_tc);
1556
1557 netdev_set_num_tc(ndev, num_tc);
1558
1559 /* Each TC is associated with one netdev queue */
1560 for (i = 0; i < num_tc; i++)
1561 netdev_set_tc_queue(ndev, i, 1, i);
1562
1563 return 0;
1564 }
1565
1566 int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1567 void *type_data)
1568 {
1569 switch (type) {
1570 case TC_SETUP_QDISC_MQPRIO:
1571 return enetc_setup_tc_mqprio(ndev, type_data);
1572 case TC_SETUP_QDISC_TAPRIO:
1573 return enetc_setup_tc_taprio(ndev, type_data);
1574 case TC_SETUP_QDISC_CBS:
1575 return enetc_setup_tc_cbs(ndev, type_data);
1576 case TC_SETUP_QDISC_ETF:
1577 return enetc_setup_tc_txtime(ndev, type_data);
1578 case TC_SETUP_BLOCK:
1579 return enetc_setup_tc_psfp(ndev, type_data);
1580 default:
1581 return -EOPNOTSUPP;
1582 }
1583 }
1584
1585 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
1586 {
1587 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1588 struct net_device_stats *stats = &ndev->stats;
1589 unsigned long packets = 0, bytes = 0;
1590 int i;
1591
1592 for (i = 0; i < priv->num_rx_rings; i++) {
1593 packets += priv->rx_ring[i]->stats.packets;
1594 bytes += priv->rx_ring[i]->stats.bytes;
1595 }
1596
1597 stats->rx_packets = packets;
1598 stats->rx_bytes = bytes;
1599 bytes = 0;
1600 packets = 0;
1601
1602 for (i = 0; i < priv->num_tx_rings; i++) {
1603 packets += priv->tx_ring[i]->stats.packets;
1604 bytes += priv->tx_ring[i]->stats.bytes;
1605 }
1606
1607 stats->tx_packets = packets;
1608 stats->tx_bytes = bytes;
1609
1610 return stats;
1611 }
1612
1613 static int enetc_set_rss(struct net_device *ndev, int en)
1614 {
1615 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1616 struct enetc_hw *hw = &priv->si->hw;
1617 u32 reg;
1618
1619 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
1620
1621 reg = enetc_rd(hw, ENETC_SIMR);
1622 reg &= ~ENETC_SIMR_RSSE;
1623 reg |= (en) ? ENETC_SIMR_RSSE : 0;
1624 enetc_wr(hw, ENETC_SIMR, reg);
1625
1626 return 0;
1627 }
1628
1629 static int enetc_set_psfp(struct net_device *ndev, int en)
1630 {
1631 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1632 int err;
1633
1634 if (en) {
1635 err = enetc_psfp_enable(priv);
1636 if (err)
1637 return err;
1638
1639 priv->active_offloads |= ENETC_F_QCI;
1640 return 0;
1641 }
1642
1643 err = enetc_psfp_disable(priv);
1644 if (err)
1645 return err;
1646
1647 priv->active_offloads &= ~ENETC_F_QCI;
1648
1649 return 0;
1650 }
1651
1652 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
1653 {
1654 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1655 int i;
1656
1657 for (i = 0; i < priv->num_rx_rings; i++)
1658 enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
1659 }
1660
1661 static void enetc_enable_txvlan(struct net_device *ndev, bool en)
1662 {
1663 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1664 int i;
1665
1666 for (i = 0; i < priv->num_tx_rings; i++)
1667 enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
1668 }
1669
1670 int enetc_set_features(struct net_device *ndev,
1671 netdev_features_t features)
1672 {
1673 netdev_features_t changed = ndev->features ^ features;
1674 int err = 0;
1675
1676 if (changed & NETIF_F_RXHASH)
1677 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
1678
1679 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1680 enetc_enable_rxvlan(ndev,
1681 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
1682
1683 if (changed & NETIF_F_HW_VLAN_CTAG_TX)
1684 enetc_enable_txvlan(ndev,
1685 !!(features & NETIF_F_HW_VLAN_CTAG_TX));
1686
1687 if (changed & NETIF_F_HW_TC)
1688 err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
1689
1690 return err;
1691 }
1692
1693 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1694 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
1695 {
1696 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1697 struct hwtstamp_config config;
1698 int ao;
1699
1700 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1701 return -EFAULT;
1702
1703 switch (config.tx_type) {
1704 case HWTSTAMP_TX_OFF:
1705 priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
1706 break;
1707 case HWTSTAMP_TX_ON:
1708 priv->active_offloads |= ENETC_F_TX_TSTAMP;
1709 break;
1710 default:
1711 return -ERANGE;
1712 }
1713
1714 ao = priv->active_offloads;
1715 switch (config.rx_filter) {
1716 case HWTSTAMP_FILTER_NONE:
1717 priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
1718 break;
1719 default:
1720 priv->active_offloads |= ENETC_F_RX_TSTAMP;
1721 config.rx_filter = HWTSTAMP_FILTER_ALL;
1722 }
1723
1724 if (netif_running(ndev) && ao != priv->active_offloads) {
1725 enetc_close(ndev);
1726 enetc_open(ndev);
1727 }
1728
1729 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1730 -EFAULT : 0;
1731 }
1732
1733 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
1734 {
1735 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1736 struct hwtstamp_config config;
1737
1738 config.flags = 0;
1739
1740 if (priv->active_offloads & ENETC_F_TX_TSTAMP)
1741 config.tx_type = HWTSTAMP_TX_ON;
1742 else
1743 config.tx_type = HWTSTAMP_TX_OFF;
1744
1745 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
1746 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1747
1748 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1749 -EFAULT : 0;
1750 }
1751 #endif
1752
1753 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1754 {
1755 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1756 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1757 if (cmd == SIOCSHWTSTAMP)
1758 return enetc_hwtstamp_set(ndev, rq);
1759 if (cmd == SIOCGHWTSTAMP)
1760 return enetc_hwtstamp_get(ndev, rq);
1761 #endif
1762
1763 if (!priv->phylink)
1764 return -EOPNOTSUPP;
1765
1766 return phylink_mii_ioctl(priv->phylink, rq, cmd);
1767 }
1768
1769 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
1770 {
1771 struct pci_dev *pdev = priv->si->pdev;
1772 int v_tx_rings;
1773 int i, n, err, nvec;
1774
1775 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
1776 /* allocate MSIX for both messaging and Rx/Tx interrupts */
1777 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1778
1779 if (n < 0)
1780 return n;
1781
1782 if (n != nvec)
1783 return -EPERM;
1784
1785 /* # of tx rings per int vector */
1786 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
1787
1788 for (i = 0; i < priv->bdr_int_num; i++) {
1789 struct enetc_int_vector *v;
1790 struct enetc_bdr *bdr;
1791 int j;
1792
1793 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
1794 if (!v) {
1795 err = -ENOMEM;
1796 goto fail;
1797 }
1798
1799 priv->int_vector[i] = v;
1800
1801 /* init defaults for adaptive IC */
1802 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
1803 v->rx_ictt = 0x1;
1804 v->rx_dim_en = true;
1805 }
1806 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
1807 netif_napi_add(priv->ndev, &v->napi, enetc_poll,
1808 NAPI_POLL_WEIGHT);
1809 v->count_tx_rings = v_tx_rings;
1810
1811 for (j = 0; j < v_tx_rings; j++) {
1812 int idx;
1813
1814 /* default tx ring mapping policy */
1815 if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
1816 idx = 2 * j + i; /* 2 CPUs */
1817 else
1818 idx = j + i * v_tx_rings; /* default */
1819
1820 __set_bit(idx, &v->tx_rings_map);
1821 bdr = &v->tx_ring[j];
1822 bdr->index = idx;
1823 bdr->ndev = priv->ndev;
1824 bdr->dev = priv->dev;
1825 bdr->bd_count = priv->tx_bd_count;
1826 priv->tx_ring[idx] = bdr;
1827 }
1828
1829 bdr = &v->rx_ring;
1830 bdr->index = i;
1831 bdr->ndev = priv->ndev;
1832 bdr->dev = priv->dev;
1833 bdr->bd_count = priv->rx_bd_count;
1834 priv->rx_ring[i] = bdr;
1835 }
1836
1837 return 0;
1838
1839 fail:
1840 while (i--) {
1841 netif_napi_del(&priv->int_vector[i]->napi);
1842 cancel_work_sync(&priv->int_vector[i]->rx_dim.work);
1843 kfree(priv->int_vector[i]);
1844 }
1845
1846 pci_free_irq_vectors(pdev);
1847
1848 return err;
1849 }
1850
1851 void enetc_free_msix(struct enetc_ndev_priv *priv)
1852 {
1853 int i;
1854
1855 for (i = 0; i < priv->bdr_int_num; i++) {
1856 struct enetc_int_vector *v = priv->int_vector[i];
1857
1858 netif_napi_del(&v->napi);
1859 cancel_work_sync(&v->rx_dim.work);
1860 }
1861
1862 for (i = 0; i < priv->num_rx_rings; i++)
1863 priv->rx_ring[i] = NULL;
1864
1865 for (i = 0; i < priv->num_tx_rings; i++)
1866 priv->tx_ring[i] = NULL;
1867
1868 for (i = 0; i < priv->bdr_int_num; i++) {
1869 kfree(priv->int_vector[i]);
1870 priv->int_vector[i] = NULL;
1871 }
1872
1873 /* disable all MSIX for this device */
1874 pci_free_irq_vectors(priv->si->pdev);
1875 }
1876
1877 static void enetc_kfree_si(struct enetc_si *si)
1878 {
1879 char *p = (char *)si - si->pad;
1880
1881 kfree(p);
1882 }
1883
1884 static void enetc_detect_errata(struct enetc_si *si)
1885 {
1886 if (si->pdev->revision == ENETC_REV1)
1887 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
1888 }
1889
1890 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
1891 {
1892 struct enetc_si *si, *p;
1893 struct enetc_hw *hw;
1894 size_t alloc_size;
1895 int err, len;
1896
1897 pcie_flr(pdev);
1898 err = pci_enable_device_mem(pdev);
1899 if (err) {
1900 dev_err(&pdev->dev, "device enable failed\n");
1901 return err;
1902 }
1903
1904 /* set up for high or low dma */
1905 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1906 if (err) {
1907 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1908 if (err) {
1909 dev_err(&pdev->dev,
1910 "DMA configuration failed: 0x%x\n", err);
1911 goto err_dma;
1912 }
1913 }
1914
1915 err = pci_request_mem_regions(pdev, name);
1916 if (err) {
1917 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
1918 goto err_pci_mem_reg;
1919 }
1920
1921 pci_set_master(pdev);
1922
1923 alloc_size = sizeof(struct enetc_si);
1924 if (sizeof_priv) {
1925 /* align priv to 32B */
1926 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
1927 alloc_size += sizeof_priv;
1928 }
1929 /* force 32B alignment for enetc_si */
1930 alloc_size += ENETC_SI_ALIGN - 1;
1931
1932 p = kzalloc(alloc_size, GFP_KERNEL);
1933 if (!p) {
1934 err = -ENOMEM;
1935 goto err_alloc_si;
1936 }
1937
1938 si = PTR_ALIGN(p, ENETC_SI_ALIGN);
1939 si->pad = (char *)si - (char *)p;
1940
1941 pci_set_drvdata(pdev, si);
1942 si->pdev = pdev;
1943 hw = &si->hw;
1944
1945 len = pci_resource_len(pdev, ENETC_BAR_REGS);
1946 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
1947 if (!hw->reg) {
1948 err = -ENXIO;
1949 dev_err(&pdev->dev, "ioremap() failed\n");
1950 goto err_ioremap;
1951 }
1952 if (len > ENETC_PORT_BASE)
1953 hw->port = hw->reg + ENETC_PORT_BASE;
1954 if (len > ENETC_GLOBAL_BASE)
1955 hw->global = hw->reg + ENETC_GLOBAL_BASE;
1956
1957 enetc_detect_errata(si);
1958
1959 return 0;
1960
1961 err_ioremap:
1962 enetc_kfree_si(si);
1963 err_alloc_si:
1964 pci_release_mem_regions(pdev);
1965 err_pci_mem_reg:
1966 err_dma:
1967 pci_disable_device(pdev);
1968
1969 return err;
1970 }
1971
1972 void enetc_pci_remove(struct pci_dev *pdev)
1973 {
1974 struct enetc_si *si = pci_get_drvdata(pdev);
1975 struct enetc_hw *hw = &si->hw;
1976
1977 iounmap(hw->reg);
1978 enetc_kfree_si(si);
1979 pci_release_mem_regions(pdev);
1980 pci_disable_device(pdev);
1981 }