1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Freescale QUICC Engine HDLC Device Driver
3 *
4 * Copyright 2016 Freescale Semiconductor Inc.
5 */
6
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/hdlc.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/sched.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/stddef.h>
26 #include <soc/fsl/qe/qe_tdm.h>
27 #include <uapi/linux/if_arp.h>
28
29 #include "fsl_ucc_hdlc.h"
30
31 #define DRV_DESC "Freescale QE UCC HDLC Driver"
32 #define DRV_NAME "ucc_hdlc"
33
34 #define TDM_PPPOHT_SLIC_MAXIN
35 #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
36
37 static struct ucc_tdm_info utdm_primary_info = {
38 .uf_info = {
39 .tsa = 0,
40 .cdp = 0,
41 .cds = 1,
42 .ctsp = 1,
43 .ctss = 1,
44 .revd = 0,
45 .urfs = 256,
46 .utfs = 256,
47 .urfet = 128,
48 .urfset = 192,
49 .utfet = 128,
50 .utftt = 0x40,
51 .ufpt = 256,
52 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
53 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
54 .tenc = UCC_FAST_TX_ENCODING_NRZ,
55 .renc = UCC_FAST_RX_ENCODING_NRZ,
56 .tcrc = UCC_FAST_16_BIT_CRC,
57 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
58 },
59
60 .si_info = {
61 #ifdef TDM_PPPOHT_SLIC_MAXIN
62 .simr_rfsd = 1,
63 .simr_tfsd = 2,
64 #else
65 .simr_rfsd = 0,
66 .simr_tfsd = 0,
67 #endif
68 .simr_crt = 0,
69 .simr_sl = 0,
70 .simr_ce = 1,
71 .simr_fe = 1,
72 .simr_gm = 0,
73 },
74 };
75
76 static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
77
uhdlc_init(struct ucc_hdlc_private * priv)78 static int uhdlc_init(struct ucc_hdlc_private *priv)
79 {
80 struct ucc_tdm_info *ut_info;
81 struct ucc_fast_info *uf_info;
82 u32 cecr_subblock;
83 u16 bd_status;
84 int ret, i;
85 void *bd_buffer;
86 dma_addr_t bd_dma_addr;
87 s32 riptr;
88 s32 tiptr;
89 u32 gumr;
90
91 ut_info = priv->ut_info;
92 uf_info = &ut_info->uf_info;
93
94 if (priv->tsa) {
95 uf_info->tsa = 1;
96 uf_info->ctsp = 1;
97 uf_info->cds = 1;
98 uf_info->ctss = 1;
99 } else {
100 uf_info->cds = 0;
101 uf_info->ctsp = 0;
102 uf_info->ctss = 0;
103 }
104
105 /* This sets HPM register in CMXUCR register which configures a
106 * open drain connected HDLC bus
107 */
108 if (priv->hdlc_bus)
109 uf_info->brkpt_support = 1;
110
111 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
112 UCC_HDLC_UCCE_TXB) << 16);
113
114 ret = ucc_fast_init(uf_info, &priv->uccf);
115 if (ret) {
116 dev_err(priv->dev, "Failed to init uccf.");
117 return ret;
118 }
119
120 priv->uf_regs = priv->uccf->uf_regs;
121 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
122
123 /* Loopback mode */
124 if (priv->loopback) {
125 dev_info(priv->dev, "Loopback Mode\n");
126 /* use the same clock when work in loopback */
127 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
128
129 gumr = ioread32be(&priv->uf_regs->gumr);
130 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
131 UCC_FAST_GUMR_TCI);
132 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
133 iowrite32be(gumr, &priv->uf_regs->gumr);
134 }
135
136 /* Initialize SI */
137 if (priv->tsa)
138 ucc_tdm_init(priv->utdm, priv->ut_info);
139
140 /* Write to QE CECR, UCCx channel to Stop Transmission */
141 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
142 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
143 QE_CR_PROTOCOL_UNSPECIFIED, 0);
144
145 /* Set UPSMR normal mode (need fixed)*/
146 iowrite32be(0, &priv->uf_regs->upsmr);
147
148 /* hdlc_bus mode */
149 if (priv->hdlc_bus) {
150 u32 upsmr;
151
152 dev_info(priv->dev, "HDLC bus Mode\n");
153 upsmr = ioread32be(&priv->uf_regs->upsmr);
154
155 /* bus mode and retransmit enable, with collision window
156 * set to 8 bytes
157 */
158 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
159 UCC_HDLC_UPSMR_CW8;
160 iowrite32be(upsmr, &priv->uf_regs->upsmr);
161
162 /* explicitly disable CDS & CTSP */
163 gumr = ioread32be(&priv->uf_regs->gumr);
164 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
165 /* set automatic sync to explicitly ignore CD signal */
166 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
167 iowrite32be(gumr, &priv->uf_regs->gumr);
168 }
169
170 priv->rx_ring_size = RX_BD_RING_LEN;
171 priv->tx_ring_size = TX_BD_RING_LEN;
172 /* Alloc Rx BD */
173 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
174 RX_BD_RING_LEN * sizeof(struct qe_bd),
175 &priv->dma_rx_bd, GFP_KERNEL);
176
177 if (!priv->rx_bd_base) {
178 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
179 ret = -ENOMEM;
180 goto free_uccf;
181 }
182
183 /* Alloc Tx BD */
184 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
185 TX_BD_RING_LEN * sizeof(struct qe_bd),
186 &priv->dma_tx_bd, GFP_KERNEL);
187
188 if (!priv->tx_bd_base) {
189 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
190 ret = -ENOMEM;
191 goto free_rx_bd;
192 }
193
194 /* Alloc parameter ram for ucc hdlc */
195 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
196 ALIGNMENT_OF_UCC_HDLC_PRAM);
197
198 if (priv->ucc_pram_offset < 0) {
199 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
200 ret = -ENOMEM;
201 goto free_tx_bd;
202 }
203
204 priv->rx_skbuff = kcalloc(priv->rx_ring_size,
205 sizeof(*priv->rx_skbuff),
206 GFP_KERNEL);
207 if (!priv->rx_skbuff) {
208 ret = -ENOMEM;
209 goto free_ucc_pram;
210 }
211
212 priv->tx_skbuff = kcalloc(priv->tx_ring_size,
213 sizeof(*priv->tx_skbuff),
214 GFP_KERNEL);
215 if (!priv->tx_skbuff) {
216 ret = -ENOMEM;
217 goto free_rx_skbuff;
218 }
219
220 priv->skb_curtx = 0;
221 priv->skb_dirtytx = 0;
222 priv->curtx_bd = priv->tx_bd_base;
223 priv->dirty_tx = priv->tx_bd_base;
224 priv->currx_bd = priv->rx_bd_base;
225 priv->currx_bdnum = 0;
226
227 /* init parameter base */
228 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
229 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
230 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
231
232 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
233 qe_muram_addr(priv->ucc_pram_offset);
234
235 /* Zero out parameter ram */
236 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
237
238 /* Alloc riptr, tiptr */
239 riptr = qe_muram_alloc(32, 32);
240 if (riptr < 0) {
241 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
242 ret = -ENOMEM;
243 goto free_tx_skbuff;
244 }
245
246 tiptr = qe_muram_alloc(32, 32);
247 if (tiptr < 0) {
248 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
249 ret = -ENOMEM;
250 goto free_riptr;
251 }
252 if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
253 dev_err(priv->dev, "MURAM allocation out of addressable range\n");
254 ret = -ENOMEM;
255 goto free_tiptr;
256 }
257
258 /* Set RIPTR, TIPTR */
259 iowrite16be(riptr, &priv->ucc_pram->riptr);
260 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
261
262 /* Set MRBLR */
263 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
264
265 /* Set RBASE, TBASE */
266 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
267 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
268
269 /* Set RSTATE, TSTATE */
270 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
271 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
272
273 /* Set C_MASK, C_PRES for 16bit CRC */
274 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
275 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
276
277 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
278 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
279 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
280 iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
281 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
282 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
283 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
284 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
285
286 /* Get BD buffer */
287 bd_buffer = dma_alloc_coherent(priv->dev,
288 (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
289 &bd_dma_addr, GFP_KERNEL);
290
291 if (!bd_buffer) {
292 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
293 ret = -ENOMEM;
294 goto free_tiptr;
295 }
296
297 priv->rx_buffer = bd_buffer;
298 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
299
300 priv->dma_rx_addr = bd_dma_addr;
301 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
302
303 for (i = 0; i < RX_BD_RING_LEN; i++) {
304 if (i < (RX_BD_RING_LEN - 1))
305 bd_status = R_E_S | R_I_S;
306 else
307 bd_status = R_E_S | R_I_S | R_W_S;
308
309 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
310 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
311 &priv->rx_bd_base[i].buf);
312 }
313
314 for (i = 0; i < TX_BD_RING_LEN; i++) {
315 if (i < (TX_BD_RING_LEN - 1))
316 bd_status = T_I_S | T_TC_S;
317 else
318 bd_status = T_I_S | T_TC_S | T_W_S;
319
320 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
321 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
322 &priv->tx_bd_base[i].buf);
323 }
324
325 return 0;
326
327 free_tiptr:
328 qe_muram_free(tiptr);
329 free_riptr:
330 qe_muram_free(riptr);
331 free_tx_skbuff:
332 kfree(priv->tx_skbuff);
333 free_rx_skbuff:
334 kfree(priv->rx_skbuff);
335 free_ucc_pram:
336 qe_muram_free(priv->ucc_pram_offset);
337 free_tx_bd:
338 dma_free_coherent(priv->dev,
339 TX_BD_RING_LEN * sizeof(struct qe_bd),
340 priv->tx_bd_base, priv->dma_tx_bd);
341 free_rx_bd:
342 dma_free_coherent(priv->dev,
343 RX_BD_RING_LEN * sizeof(struct qe_bd),
344 priv->rx_bd_base, priv->dma_rx_bd);
345 free_uccf:
346 ucc_fast_free(priv->uccf);
347
348 return ret;
349 }
350
ucc_hdlc_tx(struct sk_buff * skb,struct net_device * dev)351 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
352 {
353 hdlc_device *hdlc = dev_to_hdlc(dev);
354 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
355 struct qe_bd __iomem *bd;
356 u16 bd_status;
357 unsigned long flags;
358 u16 *proto_head;
359
360 switch (dev->type) {
361 case ARPHRD_RAWHDLC:
362 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
363 dev->stats.tx_dropped++;
364 dev_kfree_skb(skb);
365 netdev_err(dev, "No enough space for hdlc head\n");
366 return -ENOMEM;
367 }
368
369 skb_push(skb, HDLC_HEAD_LEN);
370
371 proto_head = (u16 *)skb->data;
372 *proto_head = htons(DEFAULT_HDLC_HEAD);
373
374 dev->stats.tx_bytes += skb->len;
375 break;
376
377 case ARPHRD_PPP:
378 proto_head = (u16 *)skb->data;
379 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
380 dev->stats.tx_dropped++;
381 dev_kfree_skb(skb);
382 netdev_err(dev, "Wrong ppp header\n");
383 return -ENOMEM;
384 }
385
386 dev->stats.tx_bytes += skb->len;
387 break;
388
389 case ARPHRD_ETHER:
390 dev->stats.tx_bytes += skb->len;
391 break;
392
393 default:
394 dev->stats.tx_dropped++;
395 dev_kfree_skb(skb);
396 return -ENOMEM;
397 }
398 netdev_sent_queue(dev, skb->len);
399 spin_lock_irqsave(&priv->lock, flags);
400
401 /* Start from the next BD that should be filled */
402 bd = priv->curtx_bd;
403 bd_status = ioread16be(&bd->status);
404 /* Save the skb pointer so we can free it later */
405 priv->tx_skbuff[priv->skb_curtx] = skb;
406
407 /* Update the current skb pointer (wrapping if this was the last) */
408 priv->skb_curtx =
409 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
410
411 /* copy skb data to tx buffer for sdma processing */
412 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
413 skb->data, skb->len);
414
415 /* set bd status and length */
416 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
417
418 iowrite16be(skb->len, &bd->length);
419 iowrite16be(bd_status, &bd->status);
420
421 /* Move to next BD in the ring */
422 if (!(bd_status & T_W_S))
423 bd += 1;
424 else
425 bd = priv->tx_bd_base;
426
427 if (bd == priv->dirty_tx) {
428 if (!netif_queue_stopped(dev))
429 netif_stop_queue(dev);
430 }
431
432 priv->curtx_bd = bd;
433
434 spin_unlock_irqrestore(&priv->lock, flags);
435
436 return NETDEV_TX_OK;
437 }
438
hdlc_tx_restart(struct ucc_hdlc_private * priv)439 static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
440 {
441 u32 cecr_subblock;
442
443 cecr_subblock =
444 ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
445
446 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
447 QE_CR_PROTOCOL_UNSPECIFIED, 0);
448 return 0;
449 }
450
hdlc_tx_done(struct ucc_hdlc_private * priv)451 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
452 {
453 /* Start from the next BD that should be filled */
454 struct net_device *dev = priv->ndev;
455 unsigned int bytes_sent = 0;
456 int howmany = 0;
457 struct qe_bd *bd; /* BD pointer */
458 u16 bd_status;
459 int tx_restart = 0;
460
461 bd = priv->dirty_tx;
462 bd_status = ioread16be(&bd->status);
463
464 /* Normal processing. */
465 while ((bd_status & T_R_S) == 0) {
466 struct sk_buff *skb;
467
468 if (bd_status & T_UN_S) { /* Underrun */
469 dev->stats.tx_fifo_errors++;
470 tx_restart = 1;
471 }
472 if (bd_status & T_CT_S) { /* Carrier lost */
473 dev->stats.tx_carrier_errors++;
474 tx_restart = 1;
475 }
476
477 /* BD contains already transmitted buffer. */
478 /* Handle the transmitted buffer and release */
479 /* the BD to be used with the current frame */
480
481 skb = priv->tx_skbuff[priv->skb_dirtytx];
482 if (!skb)
483 break;
484 howmany++;
485 bytes_sent += skb->len;
486 dev->stats.tx_packets++;
487 memset(priv->tx_buffer +
488 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
489 0, skb->len);
490 dev_consume_skb_irq(skb);
491
492 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
493 priv->skb_dirtytx =
494 (priv->skb_dirtytx +
495 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
496
497 /* We freed a buffer, so now we can restart transmission */
498 if (netif_queue_stopped(dev))
499 netif_wake_queue(dev);
500
501 /* Advance the confirmation BD pointer */
502 if (!(bd_status & T_W_S))
503 bd += 1;
504 else
505 bd = priv->tx_bd_base;
506 bd_status = ioread16be(&bd->status);
507 }
508 priv->dirty_tx = bd;
509
510 if (tx_restart)
511 hdlc_tx_restart(priv);
512
513 netdev_completed_queue(dev, howmany, bytes_sent);
514 return 0;
515 }
516
hdlc_rx_done(struct ucc_hdlc_private * priv,int rx_work_limit)517 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
518 {
519 struct net_device *dev = priv->ndev;
520 struct sk_buff *skb = NULL;
521 hdlc_device *hdlc = dev_to_hdlc(dev);
522 struct qe_bd *bd;
523 u16 bd_status;
524 u16 length, howmany = 0;
525 u8 *bdbuffer;
526
527 bd = priv->currx_bd;
528 bd_status = ioread16be(&bd->status);
529
530 /* while there are received buffers and BD is full (~R_E) */
531 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
532 if (bd_status & (RX_BD_ERRORS)) {
533 dev->stats.rx_errors++;
534
535 if (bd_status & R_CD_S)
536 dev->stats.collisions++;
537 if (bd_status & R_OV_S)
538 dev->stats.rx_fifo_errors++;
539 if (bd_status & R_CR_S)
540 dev->stats.rx_crc_errors++;
541 if (bd_status & R_AB_S)
542 dev->stats.rx_over_errors++;
543 if (bd_status & R_NO_S)
544 dev->stats.rx_frame_errors++;
545 if (bd_status & R_LG_S)
546 dev->stats.rx_length_errors++;
547
548 goto recycle;
549 }
550 bdbuffer = priv->rx_buffer +
551 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
552 length = ioread16be(&bd->length);
553
554 switch (dev->type) {
555 case ARPHRD_RAWHDLC:
556 bdbuffer += HDLC_HEAD_LEN;
557 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
558
559 skb = dev_alloc_skb(length);
560 if (!skb) {
561 dev->stats.rx_dropped++;
562 return -ENOMEM;
563 }
564
565 skb_put(skb, length);
566 skb->len = length;
567 skb->dev = dev;
568 memcpy(skb->data, bdbuffer, length);
569 break;
570
571 case ARPHRD_PPP:
572 case ARPHRD_ETHER:
573 length -= HDLC_CRC_SIZE;
574
575 skb = dev_alloc_skb(length);
576 if (!skb) {
577 dev->stats.rx_dropped++;
578 return -ENOMEM;
579 }
580
581 skb_put(skb, length);
582 skb->len = length;
583 skb->dev = dev;
584 memcpy(skb->data, bdbuffer, length);
585 break;
586 }
587
588 dev->stats.rx_packets++;
589 dev->stats.rx_bytes += skb->len;
590 howmany++;
591 if (hdlc->proto)
592 skb->protocol = hdlc_type_trans(skb, dev);
593 netif_receive_skb(skb);
594
595 recycle:
596 iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
597
598 /* update to point at the next bd */
599 if (bd_status & R_W_S) {
600 priv->currx_bdnum = 0;
601 bd = priv->rx_bd_base;
602 } else {
603 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
604 priv->currx_bdnum += 1;
605 else
606 priv->currx_bdnum = RX_BD_RING_LEN - 1;
607
608 bd += 1;
609 }
610
611 bd_status = ioread16be(&bd->status);
612 }
613
614 priv->currx_bd = bd;
615 return howmany;
616 }
617
ucc_hdlc_poll(struct napi_struct * napi,int budget)618 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
619 {
620 struct ucc_hdlc_private *priv = container_of(napi,
621 struct ucc_hdlc_private,
622 napi);
623 int howmany;
624
625 /* Tx event processing */
626 spin_lock(&priv->lock);
627 hdlc_tx_done(priv);
628 spin_unlock(&priv->lock);
629
630 howmany = 0;
631 howmany += hdlc_rx_done(priv, budget - howmany);
632
633 if (howmany < budget) {
634 napi_complete_done(napi, howmany);
635 qe_setbits_be32(priv->uccf->p_uccm,
636 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
637 }
638
639 return howmany;
640 }
641
ucc_hdlc_irq_handler(int irq,void * dev_id)642 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
643 {
644 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
645 struct net_device *dev = priv->ndev;
646 struct ucc_fast_private *uccf;
647 u32 ucce;
648 u32 uccm;
649
650 uccf = priv->uccf;
651
652 ucce = ioread32be(uccf->p_ucce);
653 uccm = ioread32be(uccf->p_uccm);
654 ucce &= uccm;
655 iowrite32be(ucce, uccf->p_ucce);
656 if (!ucce)
657 return IRQ_NONE;
658
659 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
660 if (napi_schedule_prep(&priv->napi)) {
661 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
662 << 16);
663 iowrite32be(uccm, uccf->p_uccm);
664 __napi_schedule(&priv->napi);
665 }
666 }
667
668 /* Errors and other events */
669 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
670 dev->stats.rx_missed_errors++;
671 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
672 dev->stats.tx_errors++;
673
674 return IRQ_HANDLED;
675 }
676
uhdlc_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)677 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
678 {
679 const size_t size = sizeof(te1_settings);
680 te1_settings line;
681 struct ucc_hdlc_private *priv = netdev_priv(dev);
682
683 if (cmd != SIOCWANDEV)
684 return hdlc_ioctl(dev, ifr, cmd);
685
686 switch (ifr->ifr_settings.type) {
687 case IF_GET_IFACE:
688 ifr->ifr_settings.type = IF_IFACE_E1;
689 if (ifr->ifr_settings.size < size) {
690 ifr->ifr_settings.size = size; /* data size wanted */
691 return -ENOBUFS;
692 }
693 memset(&line, 0, sizeof(line));
694 line.clock_type = priv->clocking;
695
696 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
697 return -EFAULT;
698 return 0;
699
700 default:
701 return hdlc_ioctl(dev, ifr, cmd);
702 }
703 }
704
uhdlc_open(struct net_device * dev)705 static int uhdlc_open(struct net_device *dev)
706 {
707 u32 cecr_subblock;
708 hdlc_device *hdlc = dev_to_hdlc(dev);
709 struct ucc_hdlc_private *priv = hdlc->priv;
710 struct ucc_tdm *utdm = priv->utdm;
711
712 if (priv->hdlc_busy != 1) {
713 if (request_irq(priv->ut_info->uf_info.irq,
714 ucc_hdlc_irq_handler, 0, "hdlc", priv))
715 return -ENODEV;
716
717 cecr_subblock = ucc_fast_get_qe_cr_subblock(
718 priv->ut_info->uf_info.ucc_num);
719
720 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
721 QE_CR_PROTOCOL_UNSPECIFIED, 0);
722
723 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
724
725 /* Enable the TDM port */
726 if (priv->tsa)
727 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
728
729 priv->hdlc_busy = 1;
730 netif_device_attach(priv->ndev);
731 napi_enable(&priv->napi);
732 netdev_reset_queue(dev);
733 netif_start_queue(dev);
734 hdlc_open(dev);
735 }
736
737 return 0;
738 }
739
uhdlc_memclean(struct ucc_hdlc_private * priv)740 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
741 {
742 qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
743 qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
744
745 if (priv->rx_bd_base) {
746 dma_free_coherent(priv->dev,
747 RX_BD_RING_LEN * sizeof(struct qe_bd),
748 priv->rx_bd_base, priv->dma_rx_bd);
749
750 priv->rx_bd_base = NULL;
751 priv->dma_rx_bd = 0;
752 }
753
754 if (priv->tx_bd_base) {
755 dma_free_coherent(priv->dev,
756 TX_BD_RING_LEN * sizeof(struct qe_bd),
757 priv->tx_bd_base, priv->dma_tx_bd);
758
759 priv->tx_bd_base = NULL;
760 priv->dma_tx_bd = 0;
761 }
762
763 if (priv->ucc_pram) {
764 qe_muram_free(priv->ucc_pram_offset);
765 priv->ucc_pram = NULL;
766 priv->ucc_pram_offset = 0;
767 }
768
769 kfree(priv->rx_skbuff);
770 priv->rx_skbuff = NULL;
771
772 kfree(priv->tx_skbuff);
773 priv->tx_skbuff = NULL;
774
775 if (priv->uf_regs) {
776 iounmap(priv->uf_regs);
777 priv->uf_regs = NULL;
778 }
779
780 if (priv->uccf) {
781 ucc_fast_free(priv->uccf);
782 priv->uccf = NULL;
783 }
784
785 if (priv->rx_buffer) {
786 dma_free_coherent(priv->dev,
787 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
788 priv->rx_buffer, priv->dma_rx_addr);
789 priv->rx_buffer = NULL;
790 priv->dma_rx_addr = 0;
791 }
792
793 if (priv->tx_buffer) {
794 dma_free_coherent(priv->dev,
795 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
796 priv->tx_buffer, priv->dma_tx_addr);
797 priv->tx_buffer = NULL;
798 priv->dma_tx_addr = 0;
799 }
800 }
801
uhdlc_close(struct net_device * dev)802 static int uhdlc_close(struct net_device *dev)
803 {
804 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
805 struct ucc_tdm *utdm = priv->utdm;
806 u32 cecr_subblock;
807
808 napi_disable(&priv->napi);
809 cecr_subblock = ucc_fast_get_qe_cr_subblock(
810 priv->ut_info->uf_info.ucc_num);
811
812 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
813 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
814 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
815 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
816
817 if (priv->tsa)
818 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
819
820 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
821
822 free_irq(priv->ut_info->uf_info.irq, priv);
823 netif_stop_queue(dev);
824 netdev_reset_queue(dev);
825 priv->hdlc_busy = 0;
826
827 return 0;
828 }
829
ucc_hdlc_attach(struct net_device * dev,unsigned short encoding,unsigned short parity)830 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
831 unsigned short parity)
832 {
833 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
834
835 if (encoding != ENCODING_NRZ &&
836 encoding != ENCODING_NRZI)
837 return -EINVAL;
838
839 if (parity != PARITY_NONE &&
840 parity != PARITY_CRC32_PR1_CCITT &&
841 parity != PARITY_CRC16_PR0_CCITT &&
842 parity != PARITY_CRC16_PR1_CCITT)
843 return -EINVAL;
844
845 priv->encoding = encoding;
846 priv->parity = parity;
847
848 return 0;
849 }
850
851 #ifdef CONFIG_PM
store_clk_config(struct ucc_hdlc_private * priv)852 static void store_clk_config(struct ucc_hdlc_private *priv)
853 {
854 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
855
856 /* store si clk */
857 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
858 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
859
860 /* store si sync */
861 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
862
863 /* store ucc clk */
864 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
865 }
866
resume_clk_config(struct ucc_hdlc_private * priv)867 static void resume_clk_config(struct ucc_hdlc_private *priv)
868 {
869 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
870
871 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
872
873 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
874 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
875
876 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
877 }
878
uhdlc_suspend(struct device * dev)879 static int uhdlc_suspend(struct device *dev)
880 {
881 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
882 struct ucc_fast __iomem *uf_regs;
883
884 if (!priv)
885 return -EINVAL;
886
887 if (!netif_running(priv->ndev))
888 return 0;
889
890 netif_device_detach(priv->ndev);
891 napi_disable(&priv->napi);
892
893 uf_regs = priv->uf_regs;
894
895 /* backup gumr guemr*/
896 priv->gumr = ioread32be(&uf_regs->gumr);
897 priv->guemr = ioread8(&uf_regs->guemr);
898
899 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
900 GFP_KERNEL);
901 if (!priv->ucc_pram_bak)
902 return -ENOMEM;
903
904 /* backup HDLC parameter */
905 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
906 sizeof(struct ucc_hdlc_param));
907
908 /* store the clk configuration */
909 store_clk_config(priv);
910
911 /* save power */
912 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
913
914 return 0;
915 }
916
uhdlc_resume(struct device * dev)917 static int uhdlc_resume(struct device *dev)
918 {
919 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
920 struct ucc_tdm *utdm;
921 struct ucc_tdm_info *ut_info;
922 struct ucc_fast __iomem *uf_regs;
923 struct ucc_fast_private *uccf;
924 struct ucc_fast_info *uf_info;
925 int i;
926 u32 cecr_subblock;
927 u16 bd_status;
928
929 if (!priv)
930 return -EINVAL;
931
932 if (!netif_running(priv->ndev))
933 return 0;
934
935 utdm = priv->utdm;
936 ut_info = priv->ut_info;
937 uf_info = &ut_info->uf_info;
938 uf_regs = priv->uf_regs;
939 uccf = priv->uccf;
940
941 /* restore gumr guemr */
942 iowrite8(priv->guemr, &uf_regs->guemr);
943 iowrite32be(priv->gumr, &uf_regs->gumr);
944
945 /* Set Virtual Fifo registers */
946 iowrite16be(uf_info->urfs, &uf_regs->urfs);
947 iowrite16be(uf_info->urfet, &uf_regs->urfet);
948 iowrite16be(uf_info->urfset, &uf_regs->urfset);
949 iowrite16be(uf_info->utfs, &uf_regs->utfs);
950 iowrite16be(uf_info->utfet, &uf_regs->utfet);
951 iowrite16be(uf_info->utftt, &uf_regs->utftt);
952 /* utfb, urfb are offsets from MURAM base */
953 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
954 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
955
956 /* Rx Tx and sync clock routing */
957 resume_clk_config(priv);
958
959 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
960 iowrite32be(0xffffffff, &uf_regs->ucce);
961
962 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
963
964 /* rebuild SIRAM */
965 if (priv->tsa)
966 ucc_tdm_init(priv->utdm, priv->ut_info);
967
968 /* Write to QE CECR, UCCx channel to Stop Transmission */
969 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
970 qe_issue_cmd(QE_STOP_TX, cecr_subblock,
971 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
972
973 /* Set UPSMR normal mode */
974 iowrite32be(0, &uf_regs->upsmr);
975
976 /* init parameter base */
977 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
978 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
979 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
980
981 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
982 qe_muram_addr(priv->ucc_pram_offset);
983
984 /* restore ucc parameter */
985 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
986 sizeof(struct ucc_hdlc_param));
987 kfree(priv->ucc_pram_bak);
988
989 /* rebuild BD entry */
990 for (i = 0; i < RX_BD_RING_LEN; i++) {
991 if (i < (RX_BD_RING_LEN - 1))
992 bd_status = R_E_S | R_I_S;
993 else
994 bd_status = R_E_S | R_I_S | R_W_S;
995
996 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
997 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
998 &priv->rx_bd_base[i].buf);
999 }
1000
1001 for (i = 0; i < TX_BD_RING_LEN; i++) {
1002 if (i < (TX_BD_RING_LEN - 1))
1003 bd_status = T_I_S | T_TC_S;
1004 else
1005 bd_status = T_I_S | T_TC_S | T_W_S;
1006
1007 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
1008 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
1009 &priv->tx_bd_base[i].buf);
1010 }
1011
1012 /* if hdlc is busy enable TX and RX */
1013 if (priv->hdlc_busy == 1) {
1014 cecr_subblock = ucc_fast_get_qe_cr_subblock(
1015 priv->ut_info->uf_info.ucc_num);
1016
1017 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
1018 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
1019
1020 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
1021
1022 /* Enable the TDM port */
1023 if (priv->tsa)
1024 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
1025 }
1026
1027 napi_enable(&priv->napi);
1028 netif_device_attach(priv->ndev);
1029
1030 return 0;
1031 }
1032
1033 static const struct dev_pm_ops uhdlc_pm_ops = {
1034 .suspend = uhdlc_suspend,
1035 .resume = uhdlc_resume,
1036 .freeze = uhdlc_suspend,
1037 .thaw = uhdlc_resume,
1038 };
1039
1040 #define HDLC_PM_OPS (&uhdlc_pm_ops)
1041
1042 #else
1043
1044 #define HDLC_PM_OPS NULL
1045
1046 #endif
uhdlc_tx_timeout(struct net_device * ndev,unsigned int txqueue)1047 static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1048 {
1049 netdev_err(ndev, "%s\n", __func__);
1050 }
1051
1052 static const struct net_device_ops uhdlc_ops = {
1053 .ndo_open = uhdlc_open,
1054 .ndo_stop = uhdlc_close,
1055 .ndo_start_xmit = hdlc_start_xmit,
1056 .ndo_do_ioctl = uhdlc_ioctl,
1057 .ndo_tx_timeout = uhdlc_tx_timeout,
1058 };
1059
hdlc_map_iomem(char * name,int init_flag,void __iomem ** ptr)1060 static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1061 {
1062 struct device_node *np;
1063 struct platform_device *pdev;
1064 struct resource *res;
1065 static int siram_init_flag;
1066 int ret = 0;
1067
1068 np = of_find_compatible_node(NULL, NULL, name);
1069 if (!np)
1070 return -EINVAL;
1071
1072 pdev = of_find_device_by_node(np);
1073 if (!pdev) {
1074 pr_err("%pOFn: failed to lookup pdev\n", np);
1075 of_node_put(np);
1076 return -EINVAL;
1077 }
1078
1079 of_node_put(np);
1080 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1081 if (!res) {
1082 ret = -EINVAL;
1083 goto error_put_device;
1084 }
1085 *ptr = ioremap(res->start, resource_size(res));
1086 if (!*ptr) {
1087 ret = -ENOMEM;
1088 goto error_put_device;
1089 }
1090
1091 /* We've remapped the addresses, and we don't need the device any
1092 * more, so we should release it.
1093 */
1094 put_device(&pdev->dev);
1095
1096 if (init_flag && siram_init_flag == 0) {
1097 memset_io(*ptr, 0, resource_size(res));
1098 siram_init_flag = 1;
1099 }
1100 return 0;
1101
1102 error_put_device:
1103 put_device(&pdev->dev);
1104
1105 return ret;
1106 }
1107
ucc_hdlc_probe(struct platform_device * pdev)1108 static int ucc_hdlc_probe(struct platform_device *pdev)
1109 {
1110 struct device_node *np = pdev->dev.of_node;
1111 struct ucc_hdlc_private *uhdlc_priv = NULL;
1112 struct ucc_tdm_info *ut_info;
1113 struct ucc_tdm *utdm = NULL;
1114 struct resource res;
1115 struct net_device *dev;
1116 hdlc_device *hdlc;
1117 int ucc_num;
1118 const char *sprop;
1119 int ret;
1120 u32 val;
1121
1122 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1123 if (ret) {
1124 dev_err(&pdev->dev, "Invalid ucc property\n");
1125 return -ENODEV;
1126 }
1127
1128 ucc_num = val - 1;
1129 if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1130 dev_err(&pdev->dev, ": Invalid UCC num\n");
1131 return -EINVAL;
1132 }
1133
1134 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1135 sizeof(utdm_primary_info));
1136
1137 ut_info = &utdm_info[ucc_num];
1138 ut_info->uf_info.ucc_num = ucc_num;
1139
1140 sprop = of_get_property(np, "rx-clock-name", NULL);
1141 if (sprop) {
1142 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1143 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1144 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1145 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1146 return -EINVAL;
1147 }
1148 } else {
1149 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1150 return -EINVAL;
1151 }
1152
1153 sprop = of_get_property(np, "tx-clock-name", NULL);
1154 if (sprop) {
1155 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1156 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1157 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1158 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1159 return -EINVAL;
1160 }
1161 } else {
1162 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1163 return -EINVAL;
1164 }
1165
1166 ret = of_address_to_resource(np, 0, &res);
1167 if (ret)
1168 return -EINVAL;
1169
1170 ut_info->uf_info.regs = res.start;
1171 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1172
1173 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1174 if (!uhdlc_priv) {
1175 return -ENOMEM;
1176 }
1177
1178 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1179 uhdlc_priv->dev = &pdev->dev;
1180 uhdlc_priv->ut_info = ut_info;
1181
1182 if (of_get_property(np, "fsl,tdm-interface", NULL))
1183 uhdlc_priv->tsa = 1;
1184
1185 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1186 uhdlc_priv->loopback = 1;
1187
1188 if (of_get_property(np, "fsl,hdlc-bus", NULL))
1189 uhdlc_priv->hdlc_bus = 1;
1190
1191 if (uhdlc_priv->tsa == 1) {
1192 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1193 if (!utdm) {
1194 ret = -ENOMEM;
1195 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1196 goto free_uhdlc_priv;
1197 }
1198 uhdlc_priv->utdm = utdm;
1199 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1200 if (ret)
1201 goto free_utdm;
1202
1203 ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1204 (void __iomem **)&utdm->si_regs);
1205 if (ret)
1206 goto free_utdm;
1207 ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1208 (void __iomem **)&utdm->siram);
1209 if (ret)
1210 goto unmap_si_regs;
1211 }
1212
1213 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1214 uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1215
1216 ret = uhdlc_init(uhdlc_priv);
1217 if (ret) {
1218 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1219 goto undo_uhdlc_init;
1220 }
1221
1222 dev = alloc_hdlcdev(uhdlc_priv);
1223 if (!dev) {
1224 ret = -ENOMEM;
1225 pr_err("ucc_hdlc: unable to allocate memory\n");
1226 goto undo_uhdlc_init;
1227 }
1228
1229 uhdlc_priv->ndev = dev;
1230 hdlc = dev_to_hdlc(dev);
1231 dev->tx_queue_len = 16;
1232 dev->netdev_ops = &uhdlc_ops;
1233 dev->watchdog_timeo = 2 * HZ;
1234 hdlc->attach = ucc_hdlc_attach;
1235 hdlc->xmit = ucc_hdlc_tx;
1236 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1237 if (register_hdlc_device(dev)) {
1238 ret = -ENOBUFS;
1239 pr_err("ucc_hdlc: unable to register hdlc device\n");
1240 goto free_dev;
1241 }
1242
1243 return 0;
1244
1245 free_dev:
1246 free_netdev(dev);
1247 undo_uhdlc_init:
1248 iounmap(utdm->siram);
1249 unmap_si_regs:
1250 iounmap(utdm->si_regs);
1251 free_utdm:
1252 if (uhdlc_priv->tsa)
1253 kfree(utdm);
1254 free_uhdlc_priv:
1255 kfree(uhdlc_priv);
1256 return ret;
1257 }
1258
ucc_hdlc_remove(struct platform_device * pdev)1259 static int ucc_hdlc_remove(struct platform_device *pdev)
1260 {
1261 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1262
1263 uhdlc_memclean(priv);
1264
1265 if (priv->utdm->si_regs) {
1266 iounmap(priv->utdm->si_regs);
1267 priv->utdm->si_regs = NULL;
1268 }
1269
1270 if (priv->utdm->siram) {
1271 iounmap(priv->utdm->siram);
1272 priv->utdm->siram = NULL;
1273 }
1274 kfree(priv);
1275
1276 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1277
1278 return 0;
1279 }
1280
1281 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1282 {
1283 .compatible = "fsl,ucc-hdlc",
1284 },
1285 {},
1286 };
1287
1288 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1289
1290 static struct platform_driver ucc_hdlc_driver = {
1291 .probe = ucc_hdlc_probe,
1292 .remove = ucc_hdlc_remove,
1293 .driver = {
1294 .name = DRV_NAME,
1295 .pm = HDLC_PM_OPS,
1296 .of_match_table = fsl_ucc_hdlc_of_match,
1297 },
1298 };
1299
1300 module_platform_driver(ucc_hdlc_driver);
1301 MODULE_LICENSE("GPL");
1302 MODULE_DESCRIPTION(DRV_DESC);
1303