forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/net/wan/fsl_ucc_hdlc.c
....@@ -1,11 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /* Freescale QUICC Engine HDLC Device Driver
23 *
34 * Copyright 2016 Freescale Semiconductor Inc.
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms of the GNU General Public License as published by the
7
- * Free Software Foundation; either version 2 of the License, or (at your
8
- * option) any later version.
95 */
106
117 #include <linux/delay.h>
....@@ -36,6 +32,7 @@
3632 #define DRV_NAME "ucc_hdlc"
3733
3834 #define TDM_PPPOHT_SLIC_MAXIN
35
+#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
3936
4037 static struct ucc_tdm_info utdm_primary_info = {
4138 .uf_info = {
....@@ -87,8 +84,8 @@
8784 int ret, i;
8885 void *bd_buffer;
8986 dma_addr_t bd_dma_addr;
90
- u32 riptr;
91
- u32 tiptr;
87
+ s32 riptr;
88
+ s32 tiptr;
9289 u32 gumr;
9390
9491 ut_info = priv->ut_info;
....@@ -97,6 +94,12 @@
9794 if (priv->tsa) {
9895 uf_info->tsa = 1;
9996 uf_info->ctsp = 1;
97
+ uf_info->cds = 1;
98
+ uf_info->ctss = 1;
99
+ } else {
100
+ uf_info->cds = 0;
101
+ uf_info->ctsp = 0;
102
+ uf_info->ctss = 0;
100103 }
101104
102105 /* This sets HPM register in CMXUCR register which configures a
....@@ -192,7 +195,7 @@
192195 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
193196 ALIGNMENT_OF_UCC_HDLC_PRAM);
194197
195
- if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
198
+ if (priv->ucc_pram_offset < 0) {
196199 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
197200 ret = -ENOMEM;
198201 goto free_tx_bd;
....@@ -234,14 +237,14 @@
234237
235238 /* Alloc riptr, tiptr */
236239 riptr = qe_muram_alloc(32, 32);
237
- if (IS_ERR_VALUE(riptr)) {
240
+ if (riptr < 0) {
238241 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
239242 ret = -ENOMEM;
240243 goto free_tx_skbuff;
241244 }
242245
243246 tiptr = qe_muram_alloc(32, 32);
244
- if (IS_ERR_VALUE(tiptr)) {
247
+ if (tiptr < 0) {
245248 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
246249 ret = -ENOMEM;
247250 goto free_riptr;
....@@ -274,17 +277,16 @@
274277 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
275278 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
276279 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
277
- iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
280
+ iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
278281 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
279282 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
280283 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
281284 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
282285
283286 /* Get BD buffer */
284
- bd_buffer = dma_zalloc_coherent(priv->dev,
285
- (RX_BD_RING_LEN + TX_BD_RING_LEN) *
286
- MAX_RX_BUF_LENGTH,
287
- &bd_dma_addr, GFP_KERNEL);
287
+ bd_buffer = dma_alloc_coherent(priv->dev,
288
+ (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
289
+ &bd_dma_addr, GFP_KERNEL);
288290
289291 if (!bd_buffer) {
290292 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
....@@ -384,11 +386,16 @@
384386 dev->stats.tx_bytes += skb->len;
385387 break;
386388
389
+ case ARPHRD_ETHER:
390
+ dev->stats.tx_bytes += skb->len;
391
+ break;
392
+
387393 default:
388394 dev->stats.tx_dropped++;
389395 dev_kfree_skb(skb);
390396 return -ENOMEM;
391397 }
398
+ netdev_sent_queue(dev, skb->len);
392399 spin_lock_irqsave(&priv->lock, flags);
393400
394401 /* Start from the next BD that should be filled */
....@@ -429,12 +436,27 @@
429436 return NETDEV_TX_OK;
430437 }
431438
439
+static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
440
+{
441
+ u32 cecr_subblock;
442
+
443
+ cecr_subblock =
444
+ ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
445
+
446
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
447
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
448
+ return 0;
449
+}
450
+
432451 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
433452 {
434453 /* Start from the next BD that should be filled */
435454 struct net_device *dev = priv->ndev;
455
+ unsigned int bytes_sent = 0;
456
+ int howmany = 0;
436457 struct qe_bd *bd; /* BD pointer */
437458 u16 bd_status;
459
+ int tx_restart = 0;
438460
439461 bd = priv->dirty_tx;
440462 bd_status = ioread16be(&bd->status);
....@@ -443,6 +465,15 @@
443465 while ((bd_status & T_R_S) == 0) {
444466 struct sk_buff *skb;
445467
468
+ if (bd_status & T_UN_S) { /* Underrun */
469
+ dev->stats.tx_fifo_errors++;
470
+ tx_restart = 1;
471
+ }
472
+ if (bd_status & T_CT_S) { /* Carrier lost */
473
+ dev->stats.tx_carrier_errors++;
474
+ tx_restart = 1;
475
+ }
476
+
446477 /* BD contains already transmitted buffer. */
447478 /* Handle the transmitted buffer and release */
448479 /* the BD to be used with the current frame */
....@@ -450,11 +481,13 @@
450481 skb = priv->tx_skbuff[priv->skb_dirtytx];
451482 if (!skb)
452483 break;
484
+ howmany++;
485
+ bytes_sent += skb->len;
453486 dev->stats.tx_packets++;
454487 memset(priv->tx_buffer +
455488 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
456489 0, skb->len);
457
- dev_kfree_skb_irq(skb);
490
+ dev_consume_skb_irq(skb);
458491
459492 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
460493 priv->skb_dirtytx =
....@@ -474,6 +507,10 @@
474507 }
475508 priv->dirty_tx = bd;
476509
510
+ if (tx_restart)
511
+ hdlc_tx_restart(priv);
512
+
513
+ netdev_completed_queue(dev, howmany, bytes_sent);
477514 return 0;
478515 }
479516
....@@ -492,11 +529,22 @@
492529
493530 /* while there are received buffers and BD is full (~R_E) */
494531 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
495
- if (bd_status & R_OV_S)
496
- dev->stats.rx_over_errors++;
497
- if (bd_status & R_CR_S) {
498
- dev->stats.rx_crc_errors++;
499
- dev->stats.rx_dropped++;
532
+ if (bd_status & (RX_BD_ERRORS)) {
533
+ dev->stats.rx_errors++;
534
+
535
+ if (bd_status & R_CD_S)
536
+ dev->stats.collisions++;
537
+ if (bd_status & R_OV_S)
538
+ dev->stats.rx_fifo_errors++;
539
+ if (bd_status & R_CR_S)
540
+ dev->stats.rx_crc_errors++;
541
+ if (bd_status & R_AB_S)
542
+ dev->stats.rx_over_errors++;
543
+ if (bd_status & R_NO_S)
544
+ dev->stats.rx_frame_errors++;
545
+ if (bd_status & R_LG_S)
546
+ dev->stats.rx_length_errors++;
547
+
500548 goto recycle;
501549 }
502550 bdbuffer = priv->rx_buffer +
....@@ -521,6 +569,7 @@
521569 break;
522570
523571 case ARPHRD_PPP:
572
+ case ARPHRD_ETHER:
524573 length -= HDLC_CRC_SIZE;
525574
526575 skb = dev_alloc_skb(length);
....@@ -544,7 +593,7 @@
544593 netif_receive_skb(skb);
545594
546595 recycle:
547
- iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
596
+ iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
548597
549598 /* update to point at the next bd */
550599 if (bd_status & R_W_S) {
....@@ -583,8 +632,8 @@
583632
584633 if (howmany < budget) {
585634 napi_complete_done(napi, howmany);
586
- qe_setbits32(priv->uccf->p_uccm,
587
- (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
635
+ qe_setbits_be32(priv->uccf->p_uccm,
636
+ (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
588637 }
589638
590639 return howmany;
....@@ -595,11 +644,9 @@
595644 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
596645 struct net_device *dev = priv->ndev;
597646 struct ucc_fast_private *uccf;
598
- struct ucc_tdm_info *ut_info;
599647 u32 ucce;
600648 u32 uccm;
601649
602
- ut_info = priv->ut_info;
603650 uccf = priv->uccf;
604651
605652 ucce = ioread32be(uccf->p_ucce);
....@@ -620,7 +667,7 @@
620667
621668 /* Errors and other events */
622669 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
623
- dev->stats.rx_errors++;
670
+ dev->stats.rx_missed_errors++;
624671 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
625672 dev->stats.tx_errors++;
626673
....@@ -682,6 +729,7 @@
682729 priv->hdlc_busy = 1;
683730 netif_device_attach(priv->ndev);
684731 napi_enable(&priv->napi);
732
+ netdev_reset_queue(dev);
685733 netif_start_queue(dev);
686734 hdlc_open(dev);
687735 }
....@@ -691,8 +739,8 @@
691739
692740 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
693741 {
694
- qe_muram_free(priv->ucc_pram->riptr);
695
- qe_muram_free(priv->ucc_pram->tiptr);
742
+ qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
743
+ qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
696744
697745 if (priv->rx_bd_base) {
698746 dma_free_coherent(priv->dev,
....@@ -773,6 +821,7 @@
773821
774822 free_irq(priv->ut_info->uf_info.irq, priv);
775823 netif_stop_queue(dev);
824
+ netdev_reset_queue(dev);
776825 priv->hdlc_busy = 0;
777826
778827 return 0;
....@@ -789,6 +838,7 @@
789838
790839 if (parity != PARITY_NONE &&
791840 parity != PARITY_CRC32_PR1_CCITT &&
841
+ parity != PARITY_CRC16_PR0_CCITT &&
792842 parity != PARITY_CRC16_PR1_CCITT)
793843 return -EINVAL;
794844
....@@ -829,7 +879,6 @@
829879 static int uhdlc_suspend(struct device *dev)
830880 {
831881 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
832
- struct ucc_tdm_info *ut_info;
833882 struct ucc_fast __iomem *uf_regs;
834883
835884 if (!priv)
....@@ -841,7 +890,6 @@
841890 netif_device_detach(priv->ndev);
842891 napi_disable(&priv->napi);
843892
844
- ut_info = priv->ut_info;
845893 uf_regs = priv->uf_regs;
846894
847895 /* backup gumr guemr*/
....@@ -874,7 +922,7 @@
874922 struct ucc_fast __iomem *uf_regs;
875923 struct ucc_fast_private *uccf;
876924 struct ucc_fast_info *uf_info;
877
- int ret, i;
925
+ int i;
878926 u32 cecr_subblock;
879927 u16 bd_status;
880928
....@@ -919,16 +967,16 @@
919967
920968 /* Write to QE CECR, UCCx channel to Stop Transmission */
921969 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
922
- ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
923
- (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
970
+ qe_issue_cmd(QE_STOP_TX, cecr_subblock,
971
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
924972
925973 /* Set UPSMR normal mode */
926974 iowrite32be(0, &uf_regs->upsmr);
927975
928976 /* init parameter base */
929977 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
930
- ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
931
- QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
978
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
979
+ QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
932980
933981 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
934982 qe_muram_addr(priv->ucc_pram_offset);
....@@ -996,12 +1044,66 @@
9961044 #define HDLC_PM_OPS NULL
9971045
9981046 #endif
1047
+static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1048
+{
1049
+ netdev_err(ndev, "%s\n", __func__);
1050
+}
1051
+
9991052 static const struct net_device_ops uhdlc_ops = {
10001053 .ndo_open = uhdlc_open,
10011054 .ndo_stop = uhdlc_close,
10021055 .ndo_start_xmit = hdlc_start_xmit,
10031056 .ndo_do_ioctl = uhdlc_ioctl,
1057
+ .ndo_tx_timeout = uhdlc_tx_timeout,
10041058 };
1059
+
1060
+static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1061
+{
1062
+ struct device_node *np;
1063
+ struct platform_device *pdev;
1064
+ struct resource *res;
1065
+ static int siram_init_flag;
1066
+ int ret = 0;
1067
+
1068
+ np = of_find_compatible_node(NULL, NULL, name);
1069
+ if (!np)
1070
+ return -EINVAL;
1071
+
1072
+ pdev = of_find_device_by_node(np);
1073
+ if (!pdev) {
1074
+ pr_err("%pOFn: failed to lookup pdev\n", np);
1075
+ of_node_put(np);
1076
+ return -EINVAL;
1077
+ }
1078
+
1079
+ of_node_put(np);
1080
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1081
+ if (!res) {
1082
+ ret = -EINVAL;
1083
+ goto error_put_device;
1084
+ }
1085
+ *ptr = ioremap(res->start, resource_size(res));
1086
+ if (!*ptr) {
1087
+ ret = -ENOMEM;
1088
+ goto error_put_device;
1089
+ }
1090
+
1091
+ /* We've remapped the addresses, and we don't need the device any
1092
+ * more, so we should release it.
1093
+ */
1094
+ put_device(&pdev->dev);
1095
+
1096
+ if (init_flag && siram_init_flag == 0) {
1097
+ memset_io(*ptr, 0, resource_size(res));
1098
+ siram_init_flag = 1;
1099
+ }
1100
+ return 0;
1101
+
1102
+error_put_device:
1103
+ put_device(&pdev->dev);
1104
+
1105
+ return ret;
1106
+}
10051107
10061108 static int ucc_hdlc_probe(struct platform_device *pdev)
10071109 {
....@@ -1024,7 +1126,7 @@
10241126 }
10251127
10261128 ucc_num = val - 1;
1027
- if ((ucc_num > 3) || (ucc_num < 0)) {
1129
+ if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
10281130 dev_err(&pdev->dev, ": Invalid UCC num\n");
10291131 return -EINVAL;
10301132 }
....@@ -1097,12 +1199,24 @@
10971199 ret = ucc_of_parse_tdm(np, utdm, ut_info);
10981200 if (ret)
10991201 goto free_utdm;
1202
+
1203
+ ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1204
+ (void __iomem **)&utdm->si_regs);
1205
+ if (ret)
1206
+ goto free_utdm;
1207
+ ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1208
+ (void __iomem **)&utdm->siram);
1209
+ if (ret)
1210
+ goto unmap_si_regs;
11001211 }
1212
+
1213
+ if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1214
+ uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
11011215
11021216 ret = uhdlc_init(uhdlc_priv);
11031217 if (ret) {
11041218 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1105
- goto free_utdm;
1219
+ goto undo_uhdlc_init;
11061220 }
11071221
11081222 dev = alloc_hdlcdev(uhdlc_priv);
....@@ -1116,6 +1230,7 @@
11161230 hdlc = dev_to_hdlc(dev);
11171231 dev->tx_queue_len = 16;
11181232 dev->netdev_ops = &uhdlc_ops;
1233
+ dev->watchdog_timeo = 2 * HZ;
11191234 hdlc->attach = ucc_hdlc_attach;
11201235 hdlc->xmit = ucc_hdlc_tx;
11211236 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
....@@ -1130,6 +1245,9 @@
11301245 free_dev:
11311246 free_netdev(dev);
11321247 undo_uhdlc_init:
1248
+ iounmap(utdm->siram);
1249
+unmap_si_regs:
1250
+ iounmap(utdm->si_regs);
11331251 free_utdm:
11341252 if (uhdlc_priv->tsa)
11351253 kfree(utdm);
....@@ -1181,3 +1299,4 @@
11811299
11821300 module_platform_driver(ucc_hdlc_driver);
11831301 MODULE_LICENSE("GPL");
1302
+MODULE_DESCRIPTION(DRV_DESC);