]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/wan/fsl_ucc_hdlc.c
Merge tag 'omap-for-v5.0/fixes-rc7-signed' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / wan / fsl_ucc_hdlc.c
CommitLineData
c19b6d24
ZQ
1/* Freescale QUICC Engine HDLC Device Driver
2 *
3 * Copyright 2016 Freescale Semiconductor Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/hdlc.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/netdevice.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/of_platform.h>
24#include <linux/platform_device.h>
25#include <linux/sched.h>
26#include <linux/skbuff.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/stddef.h>
30#include <soc/fsl/qe/qe_tdm.h>
31#include <uapi/linux/if_arp.h>
32
33#include "fsl_ucc_hdlc.h"
34
35#define DRV_DESC "Freescale QE UCC HDLC Driver"
36#define DRV_NAME "ucc_hdlc"
37
38#define TDM_PPPOHT_SLIC_MAXIN
ba59d570 39#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
c19b6d24
ZQ
40
41static struct ucc_tdm_info utdm_primary_info = {
42 .uf_info = {
43 .tsa = 0,
44 .cdp = 0,
45 .cds = 1,
46 .ctsp = 1,
47 .ctss = 1,
48 .revd = 0,
49 .urfs = 256,
50 .utfs = 256,
51 .urfet = 128,
52 .urfset = 192,
53 .utfet = 128,
54 .utftt = 0x40,
55 .ufpt = 256,
56 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
57 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
58 .tenc = UCC_FAST_TX_ENCODING_NRZ,
59 .renc = UCC_FAST_RX_ENCODING_NRZ,
60 .tcrc = UCC_FAST_16_BIT_CRC,
61 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
62 },
63
64 .si_info = {
65#ifdef TDM_PPPOHT_SLIC_MAXIN
66 .simr_rfsd = 1,
67 .simr_tfsd = 2,
68#else
69 .simr_rfsd = 0,
70 .simr_tfsd = 0,
71#endif
72 .simr_crt = 0,
73 .simr_sl = 0,
74 .simr_ce = 1,
75 .simr_fe = 1,
76 .simr_gm = 0,
77 },
78};
79
80static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
81
82static int uhdlc_init(struct ucc_hdlc_private *priv)
83{
84 struct ucc_tdm_info *ut_info;
85 struct ucc_fast_info *uf_info;
86 u32 cecr_subblock;
87 u16 bd_status;
88 int ret, i;
89 void *bd_buffer;
90 dma_addr_t bd_dma_addr;
91 u32 riptr;
92 u32 tiptr;
93 u32 gumr;
94
95 ut_info = priv->ut_info;
96 uf_info = &ut_info->uf_info;
97
98 if (priv->tsa) {
99 uf_info->tsa = 1;
100 uf_info->ctsp = 1;
040b7c94
DG
101 uf_info->cds = 1;
102 uf_info->ctss = 1;
103 } else {
104 uf_info->cds = 0;
105 uf_info->ctsp = 0;
106 uf_info->ctss = 0;
c19b6d24 107 }
067bb938
HB
108
109 /* This sets HPM register in CMXUCR register which configures a
110 * open drain connected HDLC bus
111 */
112 if (priv->hdlc_bus)
113 uf_info->brkpt_support = 1;
114
c19b6d24
ZQ
115 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
116 UCC_HDLC_UCCE_TXB) << 16);
117
118 ret = ucc_fast_init(uf_info, &priv->uccf);
119 if (ret) {
120 dev_err(priv->dev, "Failed to init uccf.");
121 return ret;
122 }
123
124 priv->uf_regs = priv->uccf->uf_regs;
125 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
126
127 /* Loopback mode */
128 if (priv->loopback) {
129 dev_info(priv->dev, "Loopback Mode\n");
54e9e087
HB
130 /* use the same clock when work in loopback */
131 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
132
c19b6d24
ZQ
133 gumr = ioread32be(&priv->uf_regs->gumr);
134 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
135 UCC_FAST_GUMR_TCI);
136 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
137 iowrite32be(gumr, &priv->uf_regs->gumr);
138 }
139
140 /* Initialize SI */
141 if (priv->tsa)
142 ucc_tdm_init(priv->utdm, priv->ut_info);
143
144 /* Write to QE CECR, UCCx channel to Stop Transmission */
145 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
146 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
147 QE_CR_PROTOCOL_UNSPECIFIED, 0);
148
149 /* Set UPSMR normal mode (need fixed)*/
150 iowrite32be(0, &priv->uf_regs->upsmr);
151
067bb938
HB
152 /* hdlc_bus mode */
153 if (priv->hdlc_bus) {
154 u32 upsmr;
155
156 dev_info(priv->dev, "HDLC bus Mode\n");
157 upsmr = ioread32be(&priv->uf_regs->upsmr);
158
159 /* bus mode and retransmit enable, with collision window
160 * set to 8 bytes
161 */
162 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
163 UCC_HDLC_UPSMR_CW8;
164 iowrite32be(upsmr, &priv->uf_regs->upsmr);
165
166 /* explicitly disable CDS & CTSP */
167 gumr = ioread32be(&priv->uf_regs->gumr);
168 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
169 /* set automatic sync to explicitly ignore CD signal */
170 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
171 iowrite32be(gumr, &priv->uf_regs->gumr);
172 }
173
c19b6d24
ZQ
174 priv->rx_ring_size = RX_BD_RING_LEN;
175 priv->tx_ring_size = TX_BD_RING_LEN;
176 /* Alloc Rx BD */
177 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
5b8aad93 178 RX_BD_RING_LEN * sizeof(struct qe_bd),
c19b6d24
ZQ
179 &priv->dma_rx_bd, GFP_KERNEL);
180
181 if (!priv->rx_bd_base) {
182 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
183 ret = -ENOMEM;
1efb597d 184 goto free_uccf;
c19b6d24
ZQ
185 }
186
187 /* Alloc Tx BD */
188 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
5b8aad93 189 TX_BD_RING_LEN * sizeof(struct qe_bd),
c19b6d24
ZQ
190 &priv->dma_tx_bd, GFP_KERNEL);
191
192 if (!priv->tx_bd_base) {
193 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
194 ret = -ENOMEM;
1efb597d 195 goto free_rx_bd;
c19b6d24
ZQ
196 }
197
198 /* Alloc parameter ram for ucc hdlc */
85deed56 199 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
c19b6d24
ZQ
200 ALIGNMENT_OF_UCC_HDLC_PRAM);
201
fd800f64 202 if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
24a24d07 203 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
c19b6d24 204 ret = -ENOMEM;
1efb597d 205 goto free_tx_bd;
c19b6d24
ZQ
206 }
207
6396bb22
KC
208 priv->rx_skbuff = kcalloc(priv->rx_ring_size,
209 sizeof(*priv->rx_skbuff),
c19b6d24
ZQ
210 GFP_KERNEL);
211 if (!priv->rx_skbuff)
1efb597d 212 goto free_ucc_pram;
c19b6d24 213
6396bb22
KC
214 priv->tx_skbuff = kcalloc(priv->tx_ring_size,
215 sizeof(*priv->tx_skbuff),
c19b6d24
ZQ
216 GFP_KERNEL);
217 if (!priv->tx_skbuff)
1efb597d 218 goto free_rx_skbuff;
c19b6d24
ZQ
219
220 priv->skb_curtx = 0;
221 priv->skb_dirtytx = 0;
222 priv->curtx_bd = priv->tx_bd_base;
223 priv->dirty_tx = priv->tx_bd_base;
224 priv->currx_bd = priv->rx_bd_base;
225 priv->currx_bdnum = 0;
226
227 /* init parameter base */
228 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
229 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
230 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
231
232 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
233 qe_muram_addr(priv->ucc_pram_offset);
234
235 /* Zero out parameter ram */
236 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
237
238 /* Alloc riptr, tiptr */
239 riptr = qe_muram_alloc(32, 32);
fd800f64 240 if (IS_ERR_VALUE(riptr)) {
c19b6d24
ZQ
241 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
242 ret = -ENOMEM;
1efb597d 243 goto free_tx_skbuff;
c19b6d24
ZQ
244 }
245
246 tiptr = qe_muram_alloc(32, 32);
fd800f64 247 if (IS_ERR_VALUE(tiptr)) {
c19b6d24
ZQ
248 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
249 ret = -ENOMEM;
1efb597d 250 goto free_riptr;
c19b6d24
ZQ
251 }
252
253 /* Set RIPTR, TIPTR */
254 iowrite16be(riptr, &priv->ucc_pram->riptr);
255 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
256
257 /* Set MRBLR */
258 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
259
260 /* Set RBASE, TBASE */
261 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
262 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
263
264 /* Set RSTATE, TSTATE */
265 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
266 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
267
268 /* Set C_MASK, C_PRES for 16bit CRC */
269 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
270 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
271
272 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
273 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
274 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
045f77ba 275 iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
c19b6d24
ZQ
276 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
277 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
278 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
280
281 /* Get BD buffer */
750afb08
LC
282 bd_buffer = dma_alloc_coherent(priv->dev,
283 (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
284 &bd_dma_addr, GFP_KERNEL);
c19b6d24
ZQ
285
286 if (!bd_buffer) {
287 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
288 ret = -ENOMEM;
1efb597d 289 goto free_tiptr;
c19b6d24
ZQ
290 }
291
c19b6d24
ZQ
292 priv->rx_buffer = bd_buffer;
293 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
294
295 priv->dma_rx_addr = bd_dma_addr;
296 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
297
298 for (i = 0; i < RX_BD_RING_LEN; i++) {
299 if (i < (RX_BD_RING_LEN - 1))
300 bd_status = R_E_S | R_I_S;
301 else
302 bd_status = R_E_S | R_I_S | R_W_S;
303
304 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
305 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
306 &priv->rx_bd_base[i].buf);
307 }
308
309 for (i = 0; i < TX_BD_RING_LEN; i++) {
310 if (i < (TX_BD_RING_LEN - 1))
311 bd_status = T_I_S | T_TC_S;
312 else
313 bd_status = T_I_S | T_TC_S | T_W_S;
314
315 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
316 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
317 &priv->tx_bd_base[i].buf);
318 }
319
320 return 0;
321
1efb597d 322free_tiptr:
c19b6d24 323 qe_muram_free(tiptr);
1efb597d 324free_riptr:
c19b6d24 325 qe_muram_free(riptr);
1efb597d 326free_tx_skbuff:
c19b6d24 327 kfree(priv->tx_skbuff);
1efb597d 328free_rx_skbuff:
c19b6d24 329 kfree(priv->rx_skbuff);
1efb597d 330free_ucc_pram:
c19b6d24 331 qe_muram_free(priv->ucc_pram_offset);
1efb597d 332free_tx_bd:
c19b6d24 333 dma_free_coherent(priv->dev,
5b8aad93 334 TX_BD_RING_LEN * sizeof(struct qe_bd),
c19b6d24 335 priv->tx_bd_base, priv->dma_tx_bd);
1efb597d 336free_rx_bd:
c19b6d24 337 dma_free_coherent(priv->dev,
5b8aad93 338 RX_BD_RING_LEN * sizeof(struct qe_bd),
c19b6d24 339 priv->rx_bd_base, priv->dma_rx_bd);
1efb597d 340free_uccf:
c19b6d24
ZQ
341 ucc_fast_free(priv->uccf);
342
343 return ret;
344}
345
346static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
347{
348 hdlc_device *hdlc = dev_to_hdlc(dev);
349 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
350 struct qe_bd __iomem *bd;
351 u16 bd_status;
352 unsigned long flags;
c19b6d24
ZQ
353 u16 *proto_head;
354
355 switch (dev->type) {
356 case ARPHRD_RAWHDLC:
357 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
358 dev->stats.tx_dropped++;
359 dev_kfree_skb(skb);
360 netdev_err(dev, "No enough space for hdlc head\n");
361 return -ENOMEM;
362 }
363
364 skb_push(skb, HDLC_HEAD_LEN);
365
366 proto_head = (u16 *)skb->data;
367 *proto_head = htons(DEFAULT_HDLC_HEAD);
368
369 dev->stats.tx_bytes += skb->len;
370 break;
371
372 case ARPHRD_PPP:
373 proto_head = (u16 *)skb->data;
374 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
375 dev->stats.tx_dropped++;
376 dev_kfree_skb(skb);
377 netdev_err(dev, "Wrong ppp header\n");
378 return -ENOMEM;
379 }
380
381 dev->stats.tx_bytes += skb->len;
382 break;
383
8978ca7c
DG
384 case ARPHRD_ETHER:
385 dev->stats.tx_bytes += skb->len;
386 break;
387
c19b6d24
ZQ
388 default:
389 dev->stats.tx_dropped++;
390 dev_kfree_skb(skb);
391 return -ENOMEM;
392 }
2e7ad56a 393 netdev_sent_queue(dev, skb->len);
c19b6d24
ZQ
394 spin_lock_irqsave(&priv->lock, flags);
395
396 /* Start from the next BD that should be filled */
397 bd = priv->curtx_bd;
398 bd_status = ioread16be(&bd->status);
399 /* Save the skb pointer so we can free it later */
400 priv->tx_skbuff[priv->skb_curtx] = skb;
401
402 /* Update the current skb pointer (wrapping if this was the last) */
403 priv->skb_curtx =
404 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
405
406 /* copy skb data to tx buffer for sdma processing */
407 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
408 skb->data, skb->len);
409
410 /* set bd status and length */
411 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
412
c19b6d24 413 iowrite16be(skb->len, &bd->length);
02bb56dd 414 iowrite16be(bd_status, &bd->status);
c19b6d24
ZQ
415
416 /* Move to next BD in the ring */
417 if (!(bd_status & T_W_S))
418 bd += 1;
419 else
420 bd = priv->tx_bd_base;
421
422 if (bd == priv->dirty_tx) {
423 if (!netif_queue_stopped(dev))
424 netif_stop_queue(dev);
425 }
426
427 priv->curtx_bd = bd;
428
429 spin_unlock_irqrestore(&priv->lock, flags);
430
431 return NETDEV_TX_OK;
432}
433
ba59d570
MT
434static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
435{
436 u32 cecr_subblock;
437
438 cecr_subblock =
439 ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
440
441 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
442 QE_CR_PROTOCOL_UNSPECIFIED, 0);
443 return 0;
444}
445
c19b6d24
ZQ
446static int hdlc_tx_done(struct ucc_hdlc_private *priv)
447{
448 /* Start from the next BD that should be filled */
449 struct net_device *dev = priv->ndev;
2e7ad56a
MT
450 unsigned int bytes_sent = 0;
451 int howmany = 0;
c19b6d24
ZQ
452 struct qe_bd *bd; /* BD pointer */
453 u16 bd_status;
ba59d570 454 int tx_restart = 0;
c19b6d24
ZQ
455
456 bd = priv->dirty_tx;
457 bd_status = ioread16be(&bd->status);
458
459 /* Normal processing. */
460 while ((bd_status & T_R_S) == 0) {
461 struct sk_buff *skb;
462
ba59d570
MT
463 if (bd_status & T_UN_S) { /* Underrun */
464 dev->stats.tx_fifo_errors++;
465 tx_restart = 1;
466 }
467 if (bd_status & T_CT_S) { /* Carrier lost */
468 dev->stats.tx_carrier_errors++;
469 tx_restart = 1;
470 }
471
c19b6d24
ZQ
472 /* BD contains already transmitted buffer. */
473 /* Handle the transmitted buffer and release */
474 /* the BD to be used with the current frame */
475
476 skb = priv->tx_skbuff[priv->skb_dirtytx];
477 if (!skb)
478 break;
2e7ad56a
MT
479 howmany++;
480 bytes_sent += skb->len;
c19b6d24
ZQ
481 dev->stats.tx_packets++;
482 memset(priv->tx_buffer +
483 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
484 0, skb->len);
7c3850ad 485 dev_consume_skb_irq(skb);
c19b6d24
ZQ
486
487 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
488 priv->skb_dirtytx =
489 (priv->skb_dirtytx +
490 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
491
492 /* We freed a buffer, so now we can restart transmission */
493 if (netif_queue_stopped(dev))
494 netif_wake_queue(dev);
495
496 /* Advance the confirmation BD pointer */
497 if (!(bd_status & T_W_S))
498 bd += 1;
499 else
500 bd = priv->tx_bd_base;
501 bd_status = ioread16be(&bd->status);
502 }
503 priv->dirty_tx = bd;
504
ba59d570
MT
505 if (tx_restart)
506 hdlc_tx_restart(priv);
507
2e7ad56a 508 netdev_completed_queue(dev, howmany, bytes_sent);
c19b6d24
ZQ
509 return 0;
510}
511
512static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
513{
514 struct net_device *dev = priv->ndev;
66bb144b 515 struct sk_buff *skb = NULL;
c19b6d24
ZQ
516 hdlc_device *hdlc = dev_to_hdlc(dev);
517 struct qe_bd *bd;
02bb56dd 518 u16 bd_status;
c19b6d24
ZQ
519 u16 length, howmany = 0;
520 u8 *bdbuffer;
c19b6d24
ZQ
521
522 bd = priv->currx_bd;
523 bd_status = ioread16be(&bd->status);
524
525 /* while there are received buffers and BD is full (~R_E) */
526 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
ba59d570
MT
527 if (bd_status & (RX_BD_ERRORS)) {
528 dev->stats.rx_errors++;
529
530 if (bd_status & R_CD_S)
531 dev->stats.collisions++;
532 if (bd_status & R_OV_S)
533 dev->stats.rx_fifo_errors++;
534 if (bd_status & R_CR_S)
535 dev->stats.rx_crc_errors++;
536 if (bd_status & R_AB_S)
537 dev->stats.rx_over_errors++;
538 if (bd_status & R_NO_S)
539 dev->stats.rx_frame_errors++;
540 if (bd_status & R_LG_S)
541 dev->stats.rx_length_errors++;
542
c19b6d24
ZQ
543 goto recycle;
544 }
545 bdbuffer = priv->rx_buffer +
546 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
547 length = ioread16be(&bd->length);
548
c19b6d24
ZQ
549 switch (dev->type) {
550 case ARPHRD_RAWHDLC:
551 bdbuffer += HDLC_HEAD_LEN;
552 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
553
554 skb = dev_alloc_skb(length);
555 if (!skb) {
556 dev->stats.rx_dropped++;
557 return -ENOMEM;
558 }
559
560 skb_put(skb, length);
561 skb->len = length;
562 skb->dev = dev;
563 memcpy(skb->data, bdbuffer, length);
564 break;
565
566 case ARPHRD_PPP:
8978ca7c 567 case ARPHRD_ETHER:
c19b6d24
ZQ
568 length -= HDLC_CRC_SIZE;
569
570 skb = dev_alloc_skb(length);
571 if (!skb) {
572 dev->stats.rx_dropped++;
573 return -ENOMEM;
574 }
575
576 skb_put(skb, length);
577 skb->len = length;
578 skb->dev = dev;
579 memcpy(skb->data, bdbuffer, length);
580 break;
581 }
582
583 dev->stats.rx_packets++;
584 dev->stats.rx_bytes += skb->len;
585 howmany++;
586 if (hdlc->proto)
587 skb->protocol = hdlc_type_trans(skb, dev);
c19b6d24
ZQ
588 netif_receive_skb(skb);
589
590recycle:
ba59d570 591 iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
c19b6d24
ZQ
592
593 /* update to point at the next bd */
594 if (bd_status & R_W_S) {
595 priv->currx_bdnum = 0;
596 bd = priv->rx_bd_base;
597 } else {
598 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
599 priv->currx_bdnum += 1;
600 else
601 priv->currx_bdnum = RX_BD_RING_LEN - 1;
602
603 bd += 1;
604 }
605
606 bd_status = ioread16be(&bd->status);
607 }
608
609 priv->currx_bd = bd;
610 return howmany;
611}
612
613static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
614{
615 struct ucc_hdlc_private *priv = container_of(napi,
616 struct ucc_hdlc_private,
617 napi);
618 int howmany;
619
620 /* Tx event processing */
621 spin_lock(&priv->lock);
10515db5 622 hdlc_tx_done(priv);
c19b6d24
ZQ
623 spin_unlock(&priv->lock);
624
625 howmany = 0;
626 howmany += hdlc_rx_done(priv, budget - howmany);
627
628 if (howmany < budget) {
6ad20165 629 napi_complete_done(napi, howmany);
c19b6d24
ZQ
630 qe_setbits32(priv->uccf->p_uccm,
631 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
632 }
633
634 return howmany;
635}
636
637static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
638{
639 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
640 struct net_device *dev = priv->ndev;
641 struct ucc_fast_private *uccf;
642 struct ucc_tdm_info *ut_info;
643 u32 ucce;
644 u32 uccm;
645
646 ut_info = priv->ut_info;
647 uccf = priv->uccf;
648
649 ucce = ioread32be(uccf->p_ucce);
650 uccm = ioread32be(uccf->p_uccm);
651 ucce &= uccm;
652 iowrite32be(ucce, uccf->p_ucce);
c19b6d24
ZQ
653 if (!ucce)
654 return IRQ_NONE;
655
656 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
657 if (napi_schedule_prep(&priv->napi)) {
658 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
659 << 16);
660 iowrite32be(uccm, uccf->p_uccm);
661 __napi_schedule(&priv->napi);
662 }
663 }
664
665 /* Errors and other events */
666 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
ba59d570 667 dev->stats.rx_missed_errors++;
c19b6d24
ZQ
668 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
669 dev->stats.tx_errors++;
670
671 return IRQ_HANDLED;
672}
673
674static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
675{
676 const size_t size = sizeof(te1_settings);
677 te1_settings line;
678 struct ucc_hdlc_private *priv = netdev_priv(dev);
679
680 if (cmd != SIOCWANDEV)
681 return hdlc_ioctl(dev, ifr, cmd);
682
683 switch (ifr->ifr_settings.type) {
684 case IF_GET_IFACE:
685 ifr->ifr_settings.type = IF_IFACE_E1;
686 if (ifr->ifr_settings.size < size) {
687 ifr->ifr_settings.size = size; /* data size wanted */
688 return -ENOBUFS;
689 }
2f43b9be 690 memset(&line, 0, sizeof(line));
c19b6d24 691 line.clock_type = priv->clocking;
c19b6d24
ZQ
692
693 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
694 return -EFAULT;
695 return 0;
696
697 default:
698 return hdlc_ioctl(dev, ifr, cmd);
699 }
700}
701
702static int uhdlc_open(struct net_device *dev)
703{
704 u32 cecr_subblock;
705 hdlc_device *hdlc = dev_to_hdlc(dev);
706 struct ucc_hdlc_private *priv = hdlc->priv;
707 struct ucc_tdm *utdm = priv->utdm;
708
709 if (priv->hdlc_busy != 1) {
710 if (request_irq(priv->ut_info->uf_info.irq,
711 ucc_hdlc_irq_handler, 0, "hdlc", priv))
712 return -ENODEV;
713
714 cecr_subblock = ucc_fast_get_qe_cr_subblock(
715 priv->ut_info->uf_info.ucc_num);
716
717 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
718 QE_CR_PROTOCOL_UNSPECIFIED, 0);
719
720 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
721
722 /* Enable the TDM port */
723 if (priv->tsa)
724 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
725
726 priv->hdlc_busy = 1;
727 netif_device_attach(priv->ndev);
728 napi_enable(&priv->napi);
2e7ad56a 729 netdev_reset_queue(dev);
c19b6d24
ZQ
730 netif_start_queue(dev);
731 hdlc_open(dev);
732 }
733
734 return 0;
735}
736
737static void uhdlc_memclean(struct ucc_hdlc_private *priv)
738{
739 qe_muram_free(priv->ucc_pram->riptr);
740 qe_muram_free(priv->ucc_pram->tiptr);
741
742 if (priv->rx_bd_base) {
743 dma_free_coherent(priv->dev,
5b8aad93 744 RX_BD_RING_LEN * sizeof(struct qe_bd),
c19b6d24
ZQ
745 priv->rx_bd_base, priv->dma_rx_bd);
746
747 priv->rx_bd_base = NULL;
748 priv->dma_rx_bd = 0;
749 }
750
751 if (priv->tx_bd_base) {
752 dma_free_coherent(priv->dev,
5b8aad93 753 TX_BD_RING_LEN * sizeof(struct qe_bd),
c19b6d24
ZQ
754 priv->tx_bd_base, priv->dma_tx_bd);
755
756 priv->tx_bd_base = NULL;
757 priv->dma_tx_bd = 0;
758 }
759
760 if (priv->ucc_pram) {
761 qe_muram_free(priv->ucc_pram_offset);
762 priv->ucc_pram = NULL;
763 priv->ucc_pram_offset = 0;
764 }
765
766 kfree(priv->rx_skbuff);
767 priv->rx_skbuff = NULL;
768
769 kfree(priv->tx_skbuff);
770 priv->tx_skbuff = NULL;
771
772 if (priv->uf_regs) {
773 iounmap(priv->uf_regs);
774 priv->uf_regs = NULL;
775 }
776
777 if (priv->uccf) {
778 ucc_fast_free(priv->uccf);
779 priv->uccf = NULL;
780 }
781
782 if (priv->rx_buffer) {
783 dma_free_coherent(priv->dev,
784 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
785 priv->rx_buffer, priv->dma_rx_addr);
786 priv->rx_buffer = NULL;
787 priv->dma_rx_addr = 0;
788 }
789
790 if (priv->tx_buffer) {
791 dma_free_coherent(priv->dev,
792 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
793 priv->tx_buffer, priv->dma_tx_addr);
794 priv->tx_buffer = NULL;
795 priv->dma_tx_addr = 0;
796 }
797}
798
799static int uhdlc_close(struct net_device *dev)
800{
801 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
802 struct ucc_tdm *utdm = priv->utdm;
803 u32 cecr_subblock;
804
805 napi_disable(&priv->napi);
806 cecr_subblock = ucc_fast_get_qe_cr_subblock(
807 priv->ut_info->uf_info.ucc_num);
808
809 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
810 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
811 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
812 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
813
814 if (priv->tsa)
815 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
816
817 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
818
819 free_irq(priv->ut_info->uf_info.irq, priv);
820 netif_stop_queue(dev);
2e7ad56a 821 netdev_reset_queue(dev);
c19b6d24
ZQ
822 priv->hdlc_busy = 0;
823
824 return 0;
825}
826
827static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
828 unsigned short parity)
829{
830 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
831
832 if (encoding != ENCODING_NRZ &&
833 encoding != ENCODING_NRZI)
834 return -EINVAL;
835
836 if (parity != PARITY_NONE &&
837 parity != PARITY_CRC32_PR1_CCITT &&
43a78e0e 838 parity != PARITY_CRC16_PR0_CCITT &&
c19b6d24
ZQ
839 parity != PARITY_CRC16_PR1_CCITT)
840 return -EINVAL;
841
842 priv->encoding = encoding;
843 priv->parity = parity;
844
845 return 0;
846}
847
848#ifdef CONFIG_PM
849static void store_clk_config(struct ucc_hdlc_private *priv)
850{
851 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
852
853 /* store si clk */
854 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
855 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
856
857 /* store si sync */
858 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
859
860 /* store ucc clk */
861 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
862}
863
864static void resume_clk_config(struct ucc_hdlc_private *priv)
865{
866 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
867
868 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
869
870 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
871 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
872
873 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
874}
875
876static int uhdlc_suspend(struct device *dev)
877{
878 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
879 struct ucc_tdm_info *ut_info;
880 struct ucc_fast __iomem *uf_regs;
881
882 if (!priv)
883 return -EINVAL;
884
885 if (!netif_running(priv->ndev))
886 return 0;
887
888 netif_device_detach(priv->ndev);
889 napi_disable(&priv->napi);
890
891 ut_info = priv->ut_info;
892 uf_regs = priv->uf_regs;
893
894 /* backup gumr guemr*/
895 priv->gumr = ioread32be(&uf_regs->gumr);
896 priv->guemr = ioread8(&uf_regs->guemr);
897
898 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
899 GFP_KERNEL);
900 if (!priv->ucc_pram_bak)
901 return -ENOMEM;
902
903 /* backup HDLC parameter */
904 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
905 sizeof(struct ucc_hdlc_param));
906
907 /* store the clk configuration */
908 store_clk_config(priv);
909
910 /* save power */
911 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
912
c19b6d24
ZQ
913 return 0;
914}
915
916static int uhdlc_resume(struct device *dev)
917{
918 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
8c57a3a7 919 struct ucc_tdm *utdm;
c19b6d24
ZQ
920 struct ucc_tdm_info *ut_info;
921 struct ucc_fast __iomem *uf_regs;
922 struct ucc_fast_private *uccf;
923 struct ucc_fast_info *uf_info;
924 int ret, i;
925 u32 cecr_subblock;
926 u16 bd_status;
927
928 if (!priv)
929 return -EINVAL;
930
931 if (!netif_running(priv->ndev))
932 return 0;
933
8c57a3a7 934 utdm = priv->utdm;
c19b6d24
ZQ
935 ut_info = priv->ut_info;
936 uf_info = &ut_info->uf_info;
937 uf_regs = priv->uf_regs;
938 uccf = priv->uccf;
939
940 /* restore gumr guemr */
941 iowrite8(priv->guemr, &uf_regs->guemr);
942 iowrite32be(priv->gumr, &uf_regs->gumr);
943
944 /* Set Virtual Fifo registers */
945 iowrite16be(uf_info->urfs, &uf_regs->urfs);
946 iowrite16be(uf_info->urfet, &uf_regs->urfet);
947 iowrite16be(uf_info->urfset, &uf_regs->urfset);
948 iowrite16be(uf_info->utfs, &uf_regs->utfs);
949 iowrite16be(uf_info->utfet, &uf_regs->utfet);
950 iowrite16be(uf_info->utftt, &uf_regs->utftt);
951 /* utfb, urfb are offsets from MURAM base */
952 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
953 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
954
955 /* Rx Tx and sync clock routing */
956 resume_clk_config(priv);
957
958 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
959 iowrite32be(0xffffffff, &uf_regs->ucce);
960
961 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
962
963 /* rebuild SIRAM */
964 if (priv->tsa)
965 ucc_tdm_init(priv->utdm, priv->ut_info);
966
967 /* Write to QE CECR, UCCx channel to Stop Transmission */
968 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
969 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
970 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
971
972 /* Set UPSMR normal mode */
973 iowrite32be(0, &uf_regs->upsmr);
974
975 /* init parameter base */
976 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
977 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
978 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
979
980 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
981 qe_muram_addr(priv->ucc_pram_offset);
982
983 /* restore ucc parameter */
984 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
985 sizeof(struct ucc_hdlc_param));
986 kfree(priv->ucc_pram_bak);
987
988 /* rebuild BD entry */
989 for (i = 0; i < RX_BD_RING_LEN; i++) {
990 if (i < (RX_BD_RING_LEN - 1))
991 bd_status = R_E_S | R_I_S;
992 else
993 bd_status = R_E_S | R_I_S | R_W_S;
994
995 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
996 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
997 &priv->rx_bd_base[i].buf);
998 }
999
1000 for (i = 0; i < TX_BD_RING_LEN; i++) {
1001 if (i < (TX_BD_RING_LEN - 1))
1002 bd_status = T_I_S | T_TC_S;
1003 else
1004 bd_status = T_I_S | T_TC_S | T_W_S;
1005
1006 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
1007 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
1008 &priv->tx_bd_base[i].buf);
1009 }
1010
1011 /* if hdlc is busy enable TX and RX */
1012 if (priv->hdlc_busy == 1) {
1013 cecr_subblock = ucc_fast_get_qe_cr_subblock(
1014 priv->ut_info->uf_info.ucc_num);
1015
1016 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
1017 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
1018
1019 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
1020
1021 /* Enable the TDM port */
1022 if (priv->tsa)
1023 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
1024 }
1025
1026 napi_enable(&priv->napi);
1027 netif_device_attach(priv->ndev);
1028
1029 return 0;
1030}
1031
1032static const struct dev_pm_ops uhdlc_pm_ops = {
1033 .suspend = uhdlc_suspend,
1034 .resume = uhdlc_resume,
1035 .freeze = uhdlc_suspend,
1036 .thaw = uhdlc_resume,
1037};
1038
1039#define HDLC_PM_OPS (&uhdlc_pm_ops)
1040
1041#else
1042
1043#define HDLC_PM_OPS NULL
1044
1045#endif
ccb7bc0e
DG
1046static void uhdlc_tx_timeout(struct net_device *ndev)
1047{
1048 netdev_err(ndev, "%s\n", __func__);
1049}
1050
c19b6d24
ZQ
1051static const struct net_device_ops uhdlc_ops = {
1052 .ndo_open = uhdlc_open,
1053 .ndo_stop = uhdlc_close,
c19b6d24
ZQ
1054 .ndo_start_xmit = hdlc_start_xmit,
1055 .ndo_do_ioctl = uhdlc_ioctl,
ccb7bc0e 1056 .ndo_tx_timeout = uhdlc_tx_timeout,
c19b6d24
ZQ
1057};
1058
8d68100a
WY
1059static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1060{
1061 struct device_node *np;
1062 struct platform_device *pdev;
1063 struct resource *res;
1064 static int siram_init_flag;
1065 int ret = 0;
1066
1067 np = of_find_compatible_node(NULL, NULL, name);
1068 if (!np)
1069 return -EINVAL;
1070
1071 pdev = of_find_device_by_node(np);
1072 if (!pdev) {
1073 pr_err("%pOFn: failed to lookup pdev\n", np);
1074 of_node_put(np);
1075 return -EINVAL;
1076 }
1077
1078 of_node_put(np);
1079 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1080 if (!res) {
1081 ret = -EINVAL;
1082 goto error_put_device;
1083 }
1084 *ptr = ioremap(res->start, resource_size(res));
1085 if (!*ptr) {
1086 ret = -ENOMEM;
1087 goto error_put_device;
1088 }
1089
1090 /* We've remapped the addresses, and we don't need the device any
1091 * more, so we should release it.
1092 */
1093 put_device(&pdev->dev);
1094
1095 if (init_flag && siram_init_flag == 0) {
1096 memset_io(*ptr, 0, resource_size(res));
1097 siram_init_flag = 1;
1098 }
1099 return 0;
1100
1101error_put_device:
1102 put_device(&pdev->dev);
1103
1104 return ret;
1105}
1106
c19b6d24
ZQ
1107static int ucc_hdlc_probe(struct platform_device *pdev)
1108{
1109 struct device_node *np = pdev->dev.of_node;
1110 struct ucc_hdlc_private *uhdlc_priv = NULL;
1111 struct ucc_tdm_info *ut_info;
66bb144b 1112 struct ucc_tdm *utdm = NULL;
c19b6d24
ZQ
1113 struct resource res;
1114 struct net_device *dev;
1115 hdlc_device *hdlc;
1116 int ucc_num;
1117 const char *sprop;
1118 int ret;
1119 u32 val;
1120
1121 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1122 if (ret) {
1123 dev_err(&pdev->dev, "Invalid ucc property\n");
1124 return -ENODEV;
1125 }
1126
1127 ucc_num = val - 1;
d8d74777 1128 if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
c19b6d24
ZQ
1129 dev_err(&pdev->dev, ": Invalid UCC num\n");
1130 return -EINVAL;
1131 }
1132
1133 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1134 sizeof(utdm_primary_info));
1135
1136 ut_info = &utdm_info[ucc_num];
1137 ut_info->uf_info.ucc_num = ucc_num;
1138
1139 sprop = of_get_property(np, "rx-clock-name", NULL);
1140 if (sprop) {
1141 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1142 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1143 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1144 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1145 return -EINVAL;
1146 }
1147 } else {
1148 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1149 return -EINVAL;
1150 }
1151
1152 sprop = of_get_property(np, "tx-clock-name", NULL);
1153 if (sprop) {
1154 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1155 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1156 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1157 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1158 return -EINVAL;
1159 }
1160 } else {
1161 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1162 return -EINVAL;
1163 }
1164
c19b6d24
ZQ
1165 ret = of_address_to_resource(np, 0, &res);
1166 if (ret)
1167 return -EINVAL;
1168
1169 ut_info->uf_info.regs = res.start;
1170 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1171
1172 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1173 if (!uhdlc_priv) {
1efb597d 1174 return -ENOMEM;
c19b6d24
ZQ
1175 }
1176
1177 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1178 uhdlc_priv->dev = &pdev->dev;
1179 uhdlc_priv->ut_info = ut_info;
1180
1181 if (of_get_property(np, "fsl,tdm-interface", NULL))
1182 uhdlc_priv->tsa = 1;
1183
1184 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1185 uhdlc_priv->loopback = 1;
1186
067bb938
HB
1187 if (of_get_property(np, "fsl,hdlc-bus", NULL))
1188 uhdlc_priv->hdlc_bus = 1;
1189
c19b6d24
ZQ
1190 if (uhdlc_priv->tsa == 1) {
1191 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1192 if (!utdm) {
1193 ret = -ENOMEM;
1194 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1efb597d 1195 goto free_uhdlc_priv;
c19b6d24
ZQ
1196 }
1197 uhdlc_priv->utdm = utdm;
1198 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1199 if (ret)
1efb597d 1200 goto free_utdm;
8d68100a
WY
1201
1202 ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1203 (void __iomem **)&utdm->si_regs);
1204 if (ret)
1205 goto free_utdm;
1206 ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1207 (void __iomem **)&utdm->siram);
1208 if (ret)
1209 goto unmap_si_regs;
c19b6d24
ZQ
1210 }
1211
045f77ba
DG
1212 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1213 uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1214
c19b6d24
ZQ
1215 ret = uhdlc_init(uhdlc_priv);
1216 if (ret) {
1217 dev_err(&pdev->dev, "Failed to init uhdlc\n");
8d68100a 1218 goto undo_uhdlc_init;
c19b6d24
ZQ
1219 }
1220
1221 dev = alloc_hdlcdev(uhdlc_priv);
1222 if (!dev) {
1223 ret = -ENOMEM;
1224 pr_err("ucc_hdlc: unable to allocate memory\n");
1efb597d 1225 goto undo_uhdlc_init;
c19b6d24
ZQ
1226 }
1227
1228 uhdlc_priv->ndev = dev;
1229 hdlc = dev_to_hdlc(dev);
1230 dev->tx_queue_len = 16;
1231 dev->netdev_ops = &uhdlc_ops;
ccb7bc0e 1232 dev->watchdog_timeo = 2 * HZ;
c19b6d24
ZQ
1233 hdlc->attach = ucc_hdlc_attach;
1234 hdlc->xmit = ucc_hdlc_tx;
1235 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1236 if (register_hdlc_device(dev)) {
1237 ret = -ENOBUFS;
1238 pr_err("ucc_hdlc: unable to register hdlc device\n");
1efb597d 1239 goto free_dev;
c19b6d24
ZQ
1240 }
1241
1242 return 0;
1243
1efb597d
ZQ
1244free_dev:
1245 free_netdev(dev);
1246undo_uhdlc_init:
8d68100a
WY
1247 iounmap(utdm->siram);
1248unmap_si_regs:
1249 iounmap(utdm->si_regs);
1efb597d 1250free_utdm:
c19b6d24
ZQ
1251 if (uhdlc_priv->tsa)
1252 kfree(utdm);
1efb597d 1253free_uhdlc_priv:
c19b6d24 1254 kfree(uhdlc_priv);
c19b6d24
ZQ
1255 return ret;
1256}
1257
1258static int ucc_hdlc_remove(struct platform_device *pdev)
1259{
1260 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1261
1262 uhdlc_memclean(priv);
1263
1264 if (priv->utdm->si_regs) {
1265 iounmap(priv->utdm->si_regs);
1266 priv->utdm->si_regs = NULL;
1267 }
1268
1269 if (priv->utdm->siram) {
1270 iounmap(priv->utdm->siram);
1271 priv->utdm->siram = NULL;
1272 }
1273 kfree(priv);
1274
1275 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1276
1277 return 0;
1278}
1279
1280static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1281 {
1282 .compatible = "fsl,ucc-hdlc",
1283 },
1284 {},
1285};
1286
1287MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1288
1289static struct platform_driver ucc_hdlc_driver = {
1290 .probe = ucc_hdlc_probe,
1291 .remove = ucc_hdlc_remove,
1292 .driver = {
c19b6d24
ZQ
1293 .name = DRV_NAME,
1294 .pm = HDLC_PM_OPS,
1295 .of_match_table = fsl_ucc_hdlc_of_match,
1296 },
1297};
1298
459421cc 1299module_platform_driver(ucc_hdlc_driver);
74179d44 1300MODULE_LICENSE("GPL");