1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Freescale QUICC Engine HDLC Device Driver
4 * Copyright 2016 Freescale Semiconductor Inc.
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/hdlc.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/sched.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/stddef.h>
26 #include <soc/fsl/qe/qe_tdm.h>
27 #include <uapi/linux/if_arp.h>
29 #include "fsl_ucc_hdlc.h"
31 #define DRV_DESC "Freescale QE UCC HDLC Driver"
32 #define DRV_NAME "ucc_hdlc"
34 #define TDM_PPPOHT_SLIC_MAXIN
35 #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
37 static struct ucc_tdm_info utdm_primary_info
= {
52 .mode
= UCC_FAST_PROTOCOL_MODE_HDLC
,
53 .ttx_trx
= UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL
,
54 .tenc
= UCC_FAST_TX_ENCODING_NRZ
,
55 .renc
= UCC_FAST_RX_ENCODING_NRZ
,
56 .tcrc
= UCC_FAST_16_BIT_CRC
,
57 .synl
= UCC_FAST_SYNC_LEN_NOT_USED
,
61 #ifdef TDM_PPPOHT_SLIC_MAXIN
76 static struct ucc_tdm_info utdm_info
[UCC_MAX_NUM
];
78 static int uhdlc_init(struct ucc_hdlc_private
*priv
)
80 struct ucc_tdm_info
*ut_info
;
81 struct ucc_fast_info
*uf_info
;
86 dma_addr_t bd_dma_addr
;
91 ut_info
= priv
->ut_info
;
92 uf_info
= &ut_info
->uf_info
;
105 /* This sets HPM register in CMXUCR register which configures a
106 * open drain connected HDLC bus
109 uf_info
->brkpt_support
= 1;
111 uf_info
->uccm_mask
= ((UCC_HDLC_UCCE_RXB
| UCC_HDLC_UCCE_RXF
|
112 UCC_HDLC_UCCE_TXB
) << 16);
114 ret
= ucc_fast_init(uf_info
, &priv
->uccf
);
116 dev_err(priv
->dev
, "Failed to init uccf.");
120 priv
->uf_regs
= priv
->uccf
->uf_regs
;
121 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
124 if (priv
->loopback
) {
125 dev_info(priv
->dev
, "Loopback Mode\n");
126 /* use the same clock when work in loopback */
127 qe_setbrg(ut_info
->uf_info
.rx_clock
, 20000000, 1);
129 gumr
= ioread32be(&priv
->uf_regs
->gumr
);
130 gumr
|= (UCC_FAST_GUMR_LOOPBACK
| UCC_FAST_GUMR_CDS
|
132 gumr
&= ~(UCC_FAST_GUMR_CTSP
| UCC_FAST_GUMR_RSYN
);
133 iowrite32be(gumr
, &priv
->uf_regs
->gumr
);
138 ucc_tdm_init(priv
->utdm
, priv
->ut_info
);
140 /* Write to QE CECR, UCCx channel to Stop Transmission */
141 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
142 ret
= qe_issue_cmd(QE_STOP_TX
, cecr_subblock
,
143 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
145 /* Set UPSMR normal mode (need fixed)*/
146 iowrite32be(0, &priv
->uf_regs
->upsmr
);
149 if (priv
->hdlc_bus
) {
152 dev_info(priv
->dev
, "HDLC bus Mode\n");
153 upsmr
= ioread32be(&priv
->uf_regs
->upsmr
);
155 /* bus mode and retransmit enable, with collision window
158 upsmr
|= UCC_HDLC_UPSMR_RTE
| UCC_HDLC_UPSMR_BUS
|
160 iowrite32be(upsmr
, &priv
->uf_regs
->upsmr
);
162 /* explicitly disable CDS & CTSP */
163 gumr
= ioread32be(&priv
->uf_regs
->gumr
);
164 gumr
&= ~(UCC_FAST_GUMR_CDS
| UCC_FAST_GUMR_CTSP
);
165 /* set automatic sync to explicitly ignore CD signal */
166 gumr
|= UCC_FAST_GUMR_SYNL_AUTO
;
167 iowrite32be(gumr
, &priv
->uf_regs
->gumr
);
170 priv
->rx_ring_size
= RX_BD_RING_LEN
;
171 priv
->tx_ring_size
= TX_BD_RING_LEN
;
173 priv
->rx_bd_base
= dma_alloc_coherent(priv
->dev
,
174 RX_BD_RING_LEN
* sizeof(struct qe_bd
),
175 &priv
->dma_rx_bd
, GFP_KERNEL
);
177 if (!priv
->rx_bd_base
) {
178 dev_err(priv
->dev
, "Cannot allocate MURAM memory for RxBDs\n");
184 priv
->tx_bd_base
= dma_alloc_coherent(priv
->dev
,
185 TX_BD_RING_LEN
* sizeof(struct qe_bd
),
186 &priv
->dma_tx_bd
, GFP_KERNEL
);
188 if (!priv
->tx_bd_base
) {
189 dev_err(priv
->dev
, "Cannot allocate MURAM memory for TxBDs\n");
194 /* Alloc parameter ram for ucc hdlc */
195 priv
->ucc_pram_offset
= qe_muram_alloc(sizeof(struct ucc_hdlc_param
),
196 ALIGNMENT_OF_UCC_HDLC_PRAM
);
198 if (IS_ERR_VALUE(priv
->ucc_pram_offset
)) {
199 dev_err(priv
->dev
, "Can not allocate MURAM for hdlc parameter.\n");
204 priv
->rx_skbuff
= kcalloc(priv
->rx_ring_size
,
205 sizeof(*priv
->rx_skbuff
),
207 if (!priv
->rx_skbuff
) {
212 priv
->tx_skbuff
= kcalloc(priv
->tx_ring_size
,
213 sizeof(*priv
->tx_skbuff
),
215 if (!priv
->tx_skbuff
) {
221 priv
->skb_dirtytx
= 0;
222 priv
->curtx_bd
= priv
->tx_bd_base
;
223 priv
->dirty_tx
= priv
->tx_bd_base
;
224 priv
->currx_bd
= priv
->rx_bd_base
;
225 priv
->currx_bdnum
= 0;
227 /* init parameter base */
228 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
229 ret
= qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE
, cecr_subblock
,
230 QE_CR_PROTOCOL_UNSPECIFIED
, priv
->ucc_pram_offset
);
232 priv
->ucc_pram
= (struct ucc_hdlc_param __iomem
*)
233 qe_muram_addr(priv
->ucc_pram_offset
);
235 /* Zero out parameter ram */
236 memset_io(priv
->ucc_pram
, 0, sizeof(struct ucc_hdlc_param
));
238 /* Alloc riptr, tiptr */
239 riptr
= qe_muram_alloc(32, 32);
240 if (IS_ERR_VALUE(riptr
)) {
241 dev_err(priv
->dev
, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
246 tiptr
= qe_muram_alloc(32, 32);
247 if (IS_ERR_VALUE(tiptr
)) {
248 dev_err(priv
->dev
, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
252 if (riptr
!= (u16
)riptr
|| tiptr
!= (u16
)tiptr
) {
253 dev_err(priv
->dev
, "MURAM allocation out of addressable range\n");
258 /* Set RIPTR, TIPTR */
259 iowrite16be(riptr
, &priv
->ucc_pram
->riptr
);
260 iowrite16be(tiptr
, &priv
->ucc_pram
->tiptr
);
263 iowrite16be(MAX_RX_BUF_LENGTH
, &priv
->ucc_pram
->mrblr
);
265 /* Set RBASE, TBASE */
266 iowrite32be(priv
->dma_rx_bd
, &priv
->ucc_pram
->rbase
);
267 iowrite32be(priv
->dma_tx_bd
, &priv
->ucc_pram
->tbase
);
269 /* Set RSTATE, TSTATE */
270 iowrite32be(BMR_GBL
| BMR_BIG_ENDIAN
, &priv
->ucc_pram
->rstate
);
271 iowrite32be(BMR_GBL
| BMR_BIG_ENDIAN
, &priv
->ucc_pram
->tstate
);
273 /* Set C_MASK, C_PRES for 16bit CRC */
274 iowrite32be(CRC_16BIT_MASK
, &priv
->ucc_pram
->c_mask
);
275 iowrite32be(CRC_16BIT_PRES
, &priv
->ucc_pram
->c_pres
);
277 iowrite16be(MAX_FRAME_LENGTH
, &priv
->ucc_pram
->mflr
);
278 iowrite16be(DEFAULT_RFTHR
, &priv
->ucc_pram
->rfthr
);
279 iowrite16be(DEFAULT_RFTHR
, &priv
->ucc_pram
->rfcnt
);
280 iowrite16be(priv
->hmask
, &priv
->ucc_pram
->hmask
);
281 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr1
);
282 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr2
);
283 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr3
);
284 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr4
);
287 bd_buffer
= dma_alloc_coherent(priv
->dev
,
288 (RX_BD_RING_LEN
+ TX_BD_RING_LEN
) * MAX_RX_BUF_LENGTH
,
289 &bd_dma_addr
, GFP_KERNEL
);
292 dev_err(priv
->dev
, "Could not allocate buffer descriptors\n");
297 priv
->rx_buffer
= bd_buffer
;
298 priv
->tx_buffer
= bd_buffer
+ RX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
;
300 priv
->dma_rx_addr
= bd_dma_addr
;
301 priv
->dma_tx_addr
= bd_dma_addr
+ RX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
;
303 for (i
= 0; i
< RX_BD_RING_LEN
; i
++) {
304 if (i
< (RX_BD_RING_LEN
- 1))
305 bd_status
= R_E_S
| R_I_S
;
307 bd_status
= R_E_S
| R_I_S
| R_W_S
;
309 iowrite16be(bd_status
, &priv
->rx_bd_base
[i
].status
);
310 iowrite32be(priv
->dma_rx_addr
+ i
* MAX_RX_BUF_LENGTH
,
311 &priv
->rx_bd_base
[i
].buf
);
314 for (i
= 0; i
< TX_BD_RING_LEN
; i
++) {
315 if (i
< (TX_BD_RING_LEN
- 1))
316 bd_status
= T_I_S
| T_TC_S
;
318 bd_status
= T_I_S
| T_TC_S
| T_W_S
;
320 iowrite16be(bd_status
, &priv
->tx_bd_base
[i
].status
);
321 iowrite32be(priv
->dma_tx_addr
+ i
* MAX_RX_BUF_LENGTH
,
322 &priv
->tx_bd_base
[i
].buf
);
328 qe_muram_free(tiptr
);
330 qe_muram_free(riptr
);
332 kfree(priv
->tx_skbuff
);
334 kfree(priv
->rx_skbuff
);
336 qe_muram_free(priv
->ucc_pram_offset
);
338 dma_free_coherent(priv
->dev
,
339 TX_BD_RING_LEN
* sizeof(struct qe_bd
),
340 priv
->tx_bd_base
, priv
->dma_tx_bd
);
342 dma_free_coherent(priv
->dev
,
343 RX_BD_RING_LEN
* sizeof(struct qe_bd
),
344 priv
->rx_bd_base
, priv
->dma_rx_bd
);
346 ucc_fast_free(priv
->uccf
);
351 static netdev_tx_t
ucc_hdlc_tx(struct sk_buff
*skb
, struct net_device
*dev
)
353 hdlc_device
*hdlc
= dev_to_hdlc(dev
);
354 struct ucc_hdlc_private
*priv
= (struct ucc_hdlc_private
*)hdlc
->priv
;
355 struct qe_bd __iomem
*bd
;
362 if (skb_headroom(skb
) < HDLC_HEAD_LEN
) {
363 dev
->stats
.tx_dropped
++;
365 netdev_err(dev
, "No enough space for hdlc head\n");
369 skb_push(skb
, HDLC_HEAD_LEN
);
371 proto_head
= (u16
*)skb
->data
;
372 *proto_head
= htons(DEFAULT_HDLC_HEAD
);
374 dev
->stats
.tx_bytes
+= skb
->len
;
378 proto_head
= (u16
*)skb
->data
;
379 if (*proto_head
!= htons(DEFAULT_PPP_HEAD
)) {
380 dev
->stats
.tx_dropped
++;
382 netdev_err(dev
, "Wrong ppp header\n");
386 dev
->stats
.tx_bytes
+= skb
->len
;
390 dev
->stats
.tx_bytes
+= skb
->len
;
394 dev
->stats
.tx_dropped
++;
398 netdev_sent_queue(dev
, skb
->len
);
399 spin_lock_irqsave(&priv
->lock
, flags
);
401 /* Start from the next BD that should be filled */
403 bd_status
= ioread16be(&bd
->status
);
404 /* Save the skb pointer so we can free it later */
405 priv
->tx_skbuff
[priv
->skb_curtx
] = skb
;
407 /* Update the current skb pointer (wrapping if this was the last) */
409 (priv
->skb_curtx
+ 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN
);
411 /* copy skb data to tx buffer for sdma processing */
412 memcpy(priv
->tx_buffer
+ (be32_to_cpu(bd
->buf
) - priv
->dma_tx_addr
),
413 skb
->data
, skb
->len
);
415 /* set bd status and length */
416 bd_status
= (bd_status
& T_W_S
) | T_R_S
| T_I_S
| T_L_S
| T_TC_S
;
418 iowrite16be(skb
->len
, &bd
->length
);
419 iowrite16be(bd_status
, &bd
->status
);
421 /* Move to next BD in the ring */
422 if (!(bd_status
& T_W_S
))
425 bd
= priv
->tx_bd_base
;
427 if (bd
== priv
->dirty_tx
) {
428 if (!netif_queue_stopped(dev
))
429 netif_stop_queue(dev
);
434 spin_unlock_irqrestore(&priv
->lock
, flags
);
439 static int hdlc_tx_restart(struct ucc_hdlc_private
*priv
)
444 ucc_fast_get_qe_cr_subblock(priv
->ut_info
->uf_info
.ucc_num
);
446 qe_issue_cmd(QE_RESTART_TX
, cecr_subblock
,
447 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
451 static int hdlc_tx_done(struct ucc_hdlc_private
*priv
)
453 /* Start from the next BD that should be filled */
454 struct net_device
*dev
= priv
->ndev
;
455 unsigned int bytes_sent
= 0;
457 struct qe_bd
*bd
; /* BD pointer */
462 bd_status
= ioread16be(&bd
->status
);
464 /* Normal processing. */
465 while ((bd_status
& T_R_S
) == 0) {
468 if (bd_status
& T_UN_S
) { /* Underrun */
469 dev
->stats
.tx_fifo_errors
++;
472 if (bd_status
& T_CT_S
) { /* Carrier lost */
473 dev
->stats
.tx_carrier_errors
++;
477 /* BD contains already transmitted buffer. */
478 /* Handle the transmitted buffer and release */
479 /* the BD to be used with the current frame */
481 skb
= priv
->tx_skbuff
[priv
->skb_dirtytx
];
485 bytes_sent
+= skb
->len
;
486 dev
->stats
.tx_packets
++;
487 memset(priv
->tx_buffer
+
488 (be32_to_cpu(bd
->buf
) - priv
->dma_tx_addr
),
490 dev_consume_skb_irq(skb
);
492 priv
->tx_skbuff
[priv
->skb_dirtytx
] = NULL
;
495 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN
);
497 /* We freed a buffer, so now we can restart transmission */
498 if (netif_queue_stopped(dev
))
499 netif_wake_queue(dev
);
501 /* Advance the confirmation BD pointer */
502 if (!(bd_status
& T_W_S
))
505 bd
= priv
->tx_bd_base
;
506 bd_status
= ioread16be(&bd
->status
);
511 hdlc_tx_restart(priv
);
513 netdev_completed_queue(dev
, howmany
, bytes_sent
);
517 static int hdlc_rx_done(struct ucc_hdlc_private
*priv
, int rx_work_limit
)
519 struct net_device
*dev
= priv
->ndev
;
520 struct sk_buff
*skb
= NULL
;
521 hdlc_device
*hdlc
= dev_to_hdlc(dev
);
524 u16 length
, howmany
= 0;
528 bd_status
= ioread16be(&bd
->status
);
530 /* while there are received buffers and BD is full (~R_E) */
531 while (!((bd_status
& (R_E_S
)) || (--rx_work_limit
< 0))) {
532 if (bd_status
& (RX_BD_ERRORS
)) {
533 dev
->stats
.rx_errors
++;
535 if (bd_status
& R_CD_S
)
536 dev
->stats
.collisions
++;
537 if (bd_status
& R_OV_S
)
538 dev
->stats
.rx_fifo_errors
++;
539 if (bd_status
& R_CR_S
)
540 dev
->stats
.rx_crc_errors
++;
541 if (bd_status
& R_AB_S
)
542 dev
->stats
.rx_over_errors
++;
543 if (bd_status
& R_NO_S
)
544 dev
->stats
.rx_frame_errors
++;
545 if (bd_status
& R_LG_S
)
546 dev
->stats
.rx_length_errors
++;
550 bdbuffer
= priv
->rx_buffer
+
551 (priv
->currx_bdnum
* MAX_RX_BUF_LENGTH
);
552 length
= ioread16be(&bd
->length
);
556 bdbuffer
+= HDLC_HEAD_LEN
;
557 length
-= (HDLC_HEAD_LEN
+ HDLC_CRC_SIZE
);
559 skb
= dev_alloc_skb(length
);
561 dev
->stats
.rx_dropped
++;
565 skb_put(skb
, length
);
568 memcpy(skb
->data
, bdbuffer
, length
);
573 length
-= HDLC_CRC_SIZE
;
575 skb
= dev_alloc_skb(length
);
577 dev
->stats
.rx_dropped
++;
581 skb_put(skb
, length
);
584 memcpy(skb
->data
, bdbuffer
, length
);
588 dev
->stats
.rx_packets
++;
589 dev
->stats
.rx_bytes
+= skb
->len
;
592 skb
->protocol
= hdlc_type_trans(skb
, dev
);
593 netif_receive_skb(skb
);
596 iowrite16be((bd_status
& R_W_S
) | R_E_S
| R_I_S
, &bd
->status
);
598 /* update to point at the next bd */
599 if (bd_status
& R_W_S
) {
600 priv
->currx_bdnum
= 0;
601 bd
= priv
->rx_bd_base
;
603 if (priv
->currx_bdnum
< (RX_BD_RING_LEN
- 1))
604 priv
->currx_bdnum
+= 1;
606 priv
->currx_bdnum
= RX_BD_RING_LEN
- 1;
611 bd_status
= ioread16be(&bd
->status
);
618 static int ucc_hdlc_poll(struct napi_struct
*napi
, int budget
)
620 struct ucc_hdlc_private
*priv
= container_of(napi
,
621 struct ucc_hdlc_private
,
625 /* Tx event processing */
626 spin_lock(&priv
->lock
);
628 spin_unlock(&priv
->lock
);
631 howmany
+= hdlc_rx_done(priv
, budget
- howmany
);
633 if (howmany
< budget
) {
634 napi_complete_done(napi
, howmany
);
635 qe_setbits32(priv
->uccf
->p_uccm
,
636 (UCCE_HDLC_RX_EVENTS
| UCCE_HDLC_TX_EVENTS
) << 16);
642 static irqreturn_t
ucc_hdlc_irq_handler(int irq
, void *dev_id
)
644 struct ucc_hdlc_private
*priv
= (struct ucc_hdlc_private
*)dev_id
;
645 struct net_device
*dev
= priv
->ndev
;
646 struct ucc_fast_private
*uccf
;
647 struct ucc_tdm_info
*ut_info
;
651 ut_info
= priv
->ut_info
;
654 ucce
= ioread32be(uccf
->p_ucce
);
655 uccm
= ioread32be(uccf
->p_uccm
);
657 iowrite32be(ucce
, uccf
->p_ucce
);
661 if ((ucce
>> 16) & (UCCE_HDLC_RX_EVENTS
| UCCE_HDLC_TX_EVENTS
)) {
662 if (napi_schedule_prep(&priv
->napi
)) {
663 uccm
&= ~((UCCE_HDLC_RX_EVENTS
| UCCE_HDLC_TX_EVENTS
)
665 iowrite32be(uccm
, uccf
->p_uccm
);
666 __napi_schedule(&priv
->napi
);
670 /* Errors and other events */
671 if (ucce
>> 16 & UCC_HDLC_UCCE_BSY
)
672 dev
->stats
.rx_missed_errors
++;
673 if (ucce
>> 16 & UCC_HDLC_UCCE_TXE
)
674 dev
->stats
.tx_errors
++;
679 static int uhdlc_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
681 const size_t size
= sizeof(te1_settings
);
683 struct ucc_hdlc_private
*priv
= netdev_priv(dev
);
685 if (cmd
!= SIOCWANDEV
)
686 return hdlc_ioctl(dev
, ifr
, cmd
);
688 switch (ifr
->ifr_settings
.type
) {
690 ifr
->ifr_settings
.type
= IF_IFACE_E1
;
691 if (ifr
->ifr_settings
.size
< size
) {
692 ifr
->ifr_settings
.size
= size
; /* data size wanted */
695 memset(&line
, 0, sizeof(line
));
696 line
.clock_type
= priv
->clocking
;
698 if (copy_to_user(ifr
->ifr_settings
.ifs_ifsu
.sync
, &line
, size
))
703 return hdlc_ioctl(dev
, ifr
, cmd
);
707 static int uhdlc_open(struct net_device
*dev
)
710 hdlc_device
*hdlc
= dev_to_hdlc(dev
);
711 struct ucc_hdlc_private
*priv
= hdlc
->priv
;
712 struct ucc_tdm
*utdm
= priv
->utdm
;
714 if (priv
->hdlc_busy
!= 1) {
715 if (request_irq(priv
->ut_info
->uf_info
.irq
,
716 ucc_hdlc_irq_handler
, 0, "hdlc", priv
))
719 cecr_subblock
= ucc_fast_get_qe_cr_subblock(
720 priv
->ut_info
->uf_info
.ucc_num
);
722 qe_issue_cmd(QE_INIT_TX_RX
, cecr_subblock
,
723 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
725 ucc_fast_enable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
727 /* Enable the TDM port */
729 utdm
->si_regs
->siglmr1_h
|= (0x1 << utdm
->tdm_port
);
732 netif_device_attach(priv
->ndev
);
733 napi_enable(&priv
->napi
);
734 netdev_reset_queue(dev
);
735 netif_start_queue(dev
);
742 static void uhdlc_memclean(struct ucc_hdlc_private
*priv
)
744 qe_muram_free(priv
->ucc_pram
->riptr
);
745 qe_muram_free(priv
->ucc_pram
->tiptr
);
747 if (priv
->rx_bd_base
) {
748 dma_free_coherent(priv
->dev
,
749 RX_BD_RING_LEN
* sizeof(struct qe_bd
),
750 priv
->rx_bd_base
, priv
->dma_rx_bd
);
752 priv
->rx_bd_base
= NULL
;
756 if (priv
->tx_bd_base
) {
757 dma_free_coherent(priv
->dev
,
758 TX_BD_RING_LEN
* sizeof(struct qe_bd
),
759 priv
->tx_bd_base
, priv
->dma_tx_bd
);
761 priv
->tx_bd_base
= NULL
;
765 if (priv
->ucc_pram
) {
766 qe_muram_free(priv
->ucc_pram_offset
);
767 priv
->ucc_pram
= NULL
;
768 priv
->ucc_pram_offset
= 0;
771 kfree(priv
->rx_skbuff
);
772 priv
->rx_skbuff
= NULL
;
774 kfree(priv
->tx_skbuff
);
775 priv
->tx_skbuff
= NULL
;
778 iounmap(priv
->uf_regs
);
779 priv
->uf_regs
= NULL
;
783 ucc_fast_free(priv
->uccf
);
787 if (priv
->rx_buffer
) {
788 dma_free_coherent(priv
->dev
,
789 RX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
,
790 priv
->rx_buffer
, priv
->dma_rx_addr
);
791 priv
->rx_buffer
= NULL
;
792 priv
->dma_rx_addr
= 0;
795 if (priv
->tx_buffer
) {
796 dma_free_coherent(priv
->dev
,
797 TX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
,
798 priv
->tx_buffer
, priv
->dma_tx_addr
);
799 priv
->tx_buffer
= NULL
;
800 priv
->dma_tx_addr
= 0;
804 static int uhdlc_close(struct net_device
*dev
)
806 struct ucc_hdlc_private
*priv
= dev_to_hdlc(dev
)->priv
;
807 struct ucc_tdm
*utdm
= priv
->utdm
;
810 napi_disable(&priv
->napi
);
811 cecr_subblock
= ucc_fast_get_qe_cr_subblock(
812 priv
->ut_info
->uf_info
.ucc_num
);
814 qe_issue_cmd(QE_GRACEFUL_STOP_TX
, cecr_subblock
,
815 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
816 qe_issue_cmd(QE_CLOSE_RX_BD
, cecr_subblock
,
817 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
820 utdm
->si_regs
->siglmr1_h
&= ~(0x1 << utdm
->tdm_port
);
822 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
824 free_irq(priv
->ut_info
->uf_info
.irq
, priv
);
825 netif_stop_queue(dev
);
826 netdev_reset_queue(dev
);
832 static int ucc_hdlc_attach(struct net_device
*dev
, unsigned short encoding
,
833 unsigned short parity
)
835 struct ucc_hdlc_private
*priv
= dev_to_hdlc(dev
)->priv
;
837 if (encoding
!= ENCODING_NRZ
&&
838 encoding
!= ENCODING_NRZI
)
841 if (parity
!= PARITY_NONE
&&
842 parity
!= PARITY_CRC32_PR1_CCITT
&&
843 parity
!= PARITY_CRC16_PR0_CCITT
&&
844 parity
!= PARITY_CRC16_PR1_CCITT
)
847 priv
->encoding
= encoding
;
848 priv
->parity
= parity
;
854 static void store_clk_config(struct ucc_hdlc_private
*priv
)
856 struct qe_mux
*qe_mux_reg
= &qe_immr
->qmx
;
859 priv
->cmxsi1cr_h
= ioread32be(&qe_mux_reg
->cmxsi1cr_h
);
860 priv
->cmxsi1cr_l
= ioread32be(&qe_mux_reg
->cmxsi1cr_l
);
863 priv
->cmxsi1syr
= ioread32be(&qe_mux_reg
->cmxsi1syr
);
866 memcpy_fromio(priv
->cmxucr
, qe_mux_reg
->cmxucr
, 4 * sizeof(u32
));
869 static void resume_clk_config(struct ucc_hdlc_private
*priv
)
871 struct qe_mux
*qe_mux_reg
= &qe_immr
->qmx
;
873 memcpy_toio(qe_mux_reg
->cmxucr
, priv
->cmxucr
, 4 * sizeof(u32
));
875 iowrite32be(priv
->cmxsi1cr_h
, &qe_mux_reg
->cmxsi1cr_h
);
876 iowrite32be(priv
->cmxsi1cr_l
, &qe_mux_reg
->cmxsi1cr_l
);
878 iowrite32be(priv
->cmxsi1syr
, &qe_mux_reg
->cmxsi1syr
);
881 static int uhdlc_suspend(struct device
*dev
)
883 struct ucc_hdlc_private
*priv
= dev_get_drvdata(dev
);
884 struct ucc_tdm_info
*ut_info
;
885 struct ucc_fast __iomem
*uf_regs
;
890 if (!netif_running(priv
->ndev
))
893 netif_device_detach(priv
->ndev
);
894 napi_disable(&priv
->napi
);
896 ut_info
= priv
->ut_info
;
897 uf_regs
= priv
->uf_regs
;
899 /* backup gumr guemr*/
900 priv
->gumr
= ioread32be(&uf_regs
->gumr
);
901 priv
->guemr
= ioread8(&uf_regs
->guemr
);
903 priv
->ucc_pram_bak
= kmalloc(sizeof(*priv
->ucc_pram_bak
),
905 if (!priv
->ucc_pram_bak
)
908 /* backup HDLC parameter */
909 memcpy_fromio(priv
->ucc_pram_bak
, priv
->ucc_pram
,
910 sizeof(struct ucc_hdlc_param
));
912 /* store the clk configuration */
913 store_clk_config(priv
);
916 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
921 static int uhdlc_resume(struct device
*dev
)
923 struct ucc_hdlc_private
*priv
= dev_get_drvdata(dev
);
924 struct ucc_tdm
*utdm
;
925 struct ucc_tdm_info
*ut_info
;
926 struct ucc_fast __iomem
*uf_regs
;
927 struct ucc_fast_private
*uccf
;
928 struct ucc_fast_info
*uf_info
;
936 if (!netif_running(priv
->ndev
))
940 ut_info
= priv
->ut_info
;
941 uf_info
= &ut_info
->uf_info
;
942 uf_regs
= priv
->uf_regs
;
945 /* restore gumr guemr */
946 iowrite8(priv
->guemr
, &uf_regs
->guemr
);
947 iowrite32be(priv
->gumr
, &uf_regs
->gumr
);
949 /* Set Virtual Fifo registers */
950 iowrite16be(uf_info
->urfs
, &uf_regs
->urfs
);
951 iowrite16be(uf_info
->urfet
, &uf_regs
->urfet
);
952 iowrite16be(uf_info
->urfset
, &uf_regs
->urfset
);
953 iowrite16be(uf_info
->utfs
, &uf_regs
->utfs
);
954 iowrite16be(uf_info
->utfet
, &uf_regs
->utfet
);
955 iowrite16be(uf_info
->utftt
, &uf_regs
->utftt
);
956 /* utfb, urfb are offsets from MURAM base */
957 iowrite32be(uccf
->ucc_fast_tx_virtual_fifo_base_offset
, &uf_regs
->utfb
);
958 iowrite32be(uccf
->ucc_fast_rx_virtual_fifo_base_offset
, &uf_regs
->urfb
);
960 /* Rx Tx and sync clock routing */
961 resume_clk_config(priv
);
963 iowrite32be(uf_info
->uccm_mask
, &uf_regs
->uccm
);
964 iowrite32be(0xffffffff, &uf_regs
->ucce
);
966 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
970 ucc_tdm_init(priv
->utdm
, priv
->ut_info
);
972 /* Write to QE CECR, UCCx channel to Stop Transmission */
973 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
974 ret
= qe_issue_cmd(QE_STOP_TX
, cecr_subblock
,
975 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
977 /* Set UPSMR normal mode */
978 iowrite32be(0, &uf_regs
->upsmr
);
980 /* init parameter base */
981 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
982 ret
= qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE
, cecr_subblock
,
983 QE_CR_PROTOCOL_UNSPECIFIED
, priv
->ucc_pram_offset
);
985 priv
->ucc_pram
= (struct ucc_hdlc_param __iomem
*)
986 qe_muram_addr(priv
->ucc_pram_offset
);
988 /* restore ucc parameter */
989 memcpy_toio(priv
->ucc_pram
, priv
->ucc_pram_bak
,
990 sizeof(struct ucc_hdlc_param
));
991 kfree(priv
->ucc_pram_bak
);
993 /* rebuild BD entry */
994 for (i
= 0; i
< RX_BD_RING_LEN
; i
++) {
995 if (i
< (RX_BD_RING_LEN
- 1))
996 bd_status
= R_E_S
| R_I_S
;
998 bd_status
= R_E_S
| R_I_S
| R_W_S
;
1000 iowrite16be(bd_status
, &priv
->rx_bd_base
[i
].status
);
1001 iowrite32be(priv
->dma_rx_addr
+ i
* MAX_RX_BUF_LENGTH
,
1002 &priv
->rx_bd_base
[i
].buf
);
1005 for (i
= 0; i
< TX_BD_RING_LEN
; i
++) {
1006 if (i
< (TX_BD_RING_LEN
- 1))
1007 bd_status
= T_I_S
| T_TC_S
;
1009 bd_status
= T_I_S
| T_TC_S
| T_W_S
;
1011 iowrite16be(bd_status
, &priv
->tx_bd_base
[i
].status
);
1012 iowrite32be(priv
->dma_tx_addr
+ i
* MAX_RX_BUF_LENGTH
,
1013 &priv
->tx_bd_base
[i
].buf
);
1016 /* if hdlc is busy enable TX and RX */
1017 if (priv
->hdlc_busy
== 1) {
1018 cecr_subblock
= ucc_fast_get_qe_cr_subblock(
1019 priv
->ut_info
->uf_info
.ucc_num
);
1021 qe_issue_cmd(QE_INIT_TX_RX
, cecr_subblock
,
1022 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
1024 ucc_fast_enable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
1026 /* Enable the TDM port */
1028 utdm
->si_regs
->siglmr1_h
|= (0x1 << utdm
->tdm_port
);
1031 napi_enable(&priv
->napi
);
1032 netif_device_attach(priv
->ndev
);
1037 static const struct dev_pm_ops uhdlc_pm_ops
= {
1038 .suspend
= uhdlc_suspend
,
1039 .resume
= uhdlc_resume
,
1040 .freeze
= uhdlc_suspend
,
1041 .thaw
= uhdlc_resume
,
1044 #define HDLC_PM_OPS (&uhdlc_pm_ops)
1048 #define HDLC_PM_OPS NULL
1051 static void uhdlc_tx_timeout(struct net_device
*ndev
)
1053 netdev_err(ndev
, "%s\n", __func__
);
1056 static const struct net_device_ops uhdlc_ops
= {
1057 .ndo_open
= uhdlc_open
,
1058 .ndo_stop
= uhdlc_close
,
1059 .ndo_start_xmit
= hdlc_start_xmit
,
1060 .ndo_do_ioctl
= uhdlc_ioctl
,
1061 .ndo_tx_timeout
= uhdlc_tx_timeout
,
1064 static int hdlc_map_iomem(char *name
, int init_flag
, void __iomem
**ptr
)
1066 struct device_node
*np
;
1067 struct platform_device
*pdev
;
1068 struct resource
*res
;
1069 static int siram_init_flag
;
1072 np
= of_find_compatible_node(NULL
, NULL
, name
);
1076 pdev
= of_find_device_by_node(np
);
1078 pr_err("%pOFn: failed to lookup pdev\n", np
);
1084 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1087 goto error_put_device
;
1089 *ptr
= ioremap(res
->start
, resource_size(res
));
1092 goto error_put_device
;
1095 /* We've remapped the addresses, and we don't need the device any
1096 * more, so we should release it.
1098 put_device(&pdev
->dev
);
1100 if (init_flag
&& siram_init_flag
== 0) {
1101 memset_io(*ptr
, 0, resource_size(res
));
1102 siram_init_flag
= 1;
1107 put_device(&pdev
->dev
);
1112 static int ucc_hdlc_probe(struct platform_device
*pdev
)
1114 struct device_node
*np
= pdev
->dev
.of_node
;
1115 struct ucc_hdlc_private
*uhdlc_priv
= NULL
;
1116 struct ucc_tdm_info
*ut_info
;
1117 struct ucc_tdm
*utdm
= NULL
;
1118 struct resource res
;
1119 struct net_device
*dev
;
1126 ret
= of_property_read_u32_index(np
, "cell-index", 0, &val
);
1128 dev_err(&pdev
->dev
, "Invalid ucc property\n");
1133 if (ucc_num
> (UCC_MAX_NUM
- 1) || ucc_num
< 0) {
1134 dev_err(&pdev
->dev
, ": Invalid UCC num\n");
1138 memcpy(&utdm_info
[ucc_num
], &utdm_primary_info
,
1139 sizeof(utdm_primary_info
));
1141 ut_info
= &utdm_info
[ucc_num
];
1142 ut_info
->uf_info
.ucc_num
= ucc_num
;
1144 sprop
= of_get_property(np
, "rx-clock-name", NULL
);
1146 ut_info
->uf_info
.rx_clock
= qe_clock_source(sprop
);
1147 if ((ut_info
->uf_info
.rx_clock
< QE_CLK_NONE
) ||
1148 (ut_info
->uf_info
.rx_clock
> QE_CLK24
)) {
1149 dev_err(&pdev
->dev
, "Invalid rx-clock-name property\n");
1153 dev_err(&pdev
->dev
, "Invalid rx-clock-name property\n");
1157 sprop
= of_get_property(np
, "tx-clock-name", NULL
);
1159 ut_info
->uf_info
.tx_clock
= qe_clock_source(sprop
);
1160 if ((ut_info
->uf_info
.tx_clock
< QE_CLK_NONE
) ||
1161 (ut_info
->uf_info
.tx_clock
> QE_CLK24
)) {
1162 dev_err(&pdev
->dev
, "Invalid tx-clock-name property\n");
1166 dev_err(&pdev
->dev
, "Invalid tx-clock-name property\n");
1170 ret
= of_address_to_resource(np
, 0, &res
);
1174 ut_info
->uf_info
.regs
= res
.start
;
1175 ut_info
->uf_info
.irq
= irq_of_parse_and_map(np
, 0);
1177 uhdlc_priv
= kzalloc(sizeof(*uhdlc_priv
), GFP_KERNEL
);
1182 dev_set_drvdata(&pdev
->dev
, uhdlc_priv
);
1183 uhdlc_priv
->dev
= &pdev
->dev
;
1184 uhdlc_priv
->ut_info
= ut_info
;
1186 if (of_get_property(np
, "fsl,tdm-interface", NULL
))
1187 uhdlc_priv
->tsa
= 1;
1189 if (of_get_property(np
, "fsl,ucc-internal-loopback", NULL
))
1190 uhdlc_priv
->loopback
= 1;
1192 if (of_get_property(np
, "fsl,hdlc-bus", NULL
))
1193 uhdlc_priv
->hdlc_bus
= 1;
1195 if (uhdlc_priv
->tsa
== 1) {
1196 utdm
= kzalloc(sizeof(*utdm
), GFP_KERNEL
);
1199 dev_err(&pdev
->dev
, "No mem to alloc ucc tdm data\n");
1200 goto free_uhdlc_priv
;
1202 uhdlc_priv
->utdm
= utdm
;
1203 ret
= ucc_of_parse_tdm(np
, utdm
, ut_info
);
1207 ret
= hdlc_map_iomem("fsl,t1040-qe-si", 0,
1208 (void __iomem
**)&utdm
->si_regs
);
1211 ret
= hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1212 (void __iomem
**)&utdm
->siram
);
1217 if (of_property_read_u16(np
, "fsl,hmask", &uhdlc_priv
->hmask
))
1218 uhdlc_priv
->hmask
= DEFAULT_ADDR_MASK
;
1220 ret
= uhdlc_init(uhdlc_priv
);
1222 dev_err(&pdev
->dev
, "Failed to init uhdlc\n");
1223 goto undo_uhdlc_init
;
1226 dev
= alloc_hdlcdev(uhdlc_priv
);
1229 pr_err("ucc_hdlc: unable to allocate memory\n");
1230 goto undo_uhdlc_init
;
1233 uhdlc_priv
->ndev
= dev
;
1234 hdlc
= dev_to_hdlc(dev
);
1235 dev
->tx_queue_len
= 16;
1236 dev
->netdev_ops
= &uhdlc_ops
;
1237 dev
->watchdog_timeo
= 2 * HZ
;
1238 hdlc
->attach
= ucc_hdlc_attach
;
1239 hdlc
->xmit
= ucc_hdlc_tx
;
1240 netif_napi_add(dev
, &uhdlc_priv
->napi
, ucc_hdlc_poll
, 32);
1241 if (register_hdlc_device(dev
)) {
1243 pr_err("ucc_hdlc: unable to register hdlc device\n");
1252 iounmap(utdm
->siram
);
1254 iounmap(utdm
->si_regs
);
1256 if (uhdlc_priv
->tsa
)
1263 static int ucc_hdlc_remove(struct platform_device
*pdev
)
1265 struct ucc_hdlc_private
*priv
= dev_get_drvdata(&pdev
->dev
);
1267 uhdlc_memclean(priv
);
1269 if (priv
->utdm
->si_regs
) {
1270 iounmap(priv
->utdm
->si_regs
);
1271 priv
->utdm
->si_regs
= NULL
;
1274 if (priv
->utdm
->siram
) {
1275 iounmap(priv
->utdm
->siram
);
1276 priv
->utdm
->siram
= NULL
;
1280 dev_info(&pdev
->dev
, "UCC based hdlc module removed\n");
1285 static const struct of_device_id fsl_ucc_hdlc_of_match
[] = {
1287 .compatible
= "fsl,ucc-hdlc",
1292 MODULE_DEVICE_TABLE(of
, fsl_ucc_hdlc_of_match
);
1294 static struct platform_driver ucc_hdlc_driver
= {
1295 .probe
= ucc_hdlc_probe
,
1296 .remove
= ucc_hdlc_remove
,
1300 .of_match_table
= fsl_ucc_hdlc_of_match
,
1304 module_platform_driver(ucc_hdlc_driver
);
1305 MODULE_LICENSE("GPL");