hc
2024-09-20 a36159eec6ca17402b0e146b86efaf76568dc353
kernel/drivers/net/can/ti_hecc.c
....@@ -5,6 +5,7 @@
55 * specs for the same is available at <http://www.ti.com>
66 *
77 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
8
+ * Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com>
89 *
910 * This program is free software; you can redistribute it and/or
1011 * modify it under the terms of the GNU General Public License as
....@@ -34,6 +35,7 @@
3435 #include <linux/can/dev.h>
3536 #include <linux/can/error.h>
3637 #include <linux/can/led.h>
38
+#include <linux/can/rx-offload.h>
3739
3840 #define DRV_NAME "ti_hecc"
3941 #define HECC_MODULE_VERSION "0.7"
....@@ -44,8 +46,7 @@
4446 #define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */
4547 #define MAX_TX_PRIO 0x3F /* hardware value - do not change */
4648
47
-/*
48
- * Important Note: TX mailbox configuration
49
+/* Important Note: TX mailbox configuration
4950 * TX mailboxes should be restricted to the number of SKB buffers to avoid
5051 * maintaining SKB buffers separately. TX mailboxes should be a power of 2
5152 * for the mailbox logic to work. Top mailbox numbers are reserved for RX
....@@ -63,29 +64,16 @@
6364 #define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT)
6465 #define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1)
6566 #define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK)
66
-#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1))
67
-#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX
6867
69
-/*
70
- * Important Note: RX mailbox configuration
71
- * RX mailboxes are further logically split into two - main and buffer
72
- * mailboxes. The goal is to get all packets into main mailboxes as
73
- * driven by mailbox number and receive priority (higher to lower) and
74
- * buffer mailboxes are used to receive pkts while main mailboxes are being
75
- * processed. This ensures in-order packet reception.
68
+/* RX mailbox configuration
7669 *
77
- * Here are the recommended values for buffer mailbox. Note that RX mailboxes
78
- * start after TX mailboxes:
79
- *
80
- * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes
81
- * 28 12 8
82
- * 16 20 4
70
+ * The remaining mailboxes are used for reception and are delivered
71
+ * based on their timestamp, to avoid a hardware race when CANME is
72
+ * changed while CAN-bus traffic is being received.
8373 */
84
-
8574 #define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
86
-#define HECC_RX_BUFFER_MBOX 12 /* as per table above */
8775 #define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
88
-#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1))
76
+#define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX)
8977
9078 /* TI HECC module registers */
9179 #define HECC_CANME 0x0 /* Mailbox enable */
....@@ -95,7 +83,7 @@
9583 #define HECC_CANTA 0x10 /* Transmission acknowledge */
9684 #define HECC_CANAA 0x14 /* Abort acknowledge */
9785 #define HECC_CANRMP 0x18 /* Receive message pending */
98
-#define HECC_CANRML 0x1C /* Remote message lost */
86
+#define HECC_CANRML 0x1C /* Receive message lost */
9987 #define HECC_CANRFP 0x20 /* Remote frame pending */
10088 #define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
10189 #define HECC_CANMC 0x28 /* Master control */
....@@ -116,6 +104,9 @@
116104 #define HECC_CANTOS 0x64 /* HECC only: Time-out status */
117105 #define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */
118106 #define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */
107
+
108
+/* TI HECC RAM registers */
109
+#define HECC_CANMOTS 0x80 /* Message object time stamp */
119110
120111 /* Mailbox registers */
121112 #define HECC_CANMID 0x0
....@@ -159,6 +150,8 @@
159150 #define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
160151 HECC_CANES_CRCE | HECC_CANES_SE |\
161152 HECC_CANES_ACKE)
153
+#define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\
154
+ HECC_CANES_EP | HECC_CANES_EW)
162155
163156 #define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
164157
....@@ -193,7 +186,7 @@
193186
194187 struct ti_hecc_priv {
195188 struct can_priv can; /* MUST be first member/field */
196
- struct napi_struct napi;
189
+ struct can_rx_offload offload;
197190 struct net_device *ndev;
198191 struct clk *clk;
199192 void __iomem *base;
....@@ -203,7 +196,6 @@
203196 spinlock_t mbx_lock; /* CANME register needs protection */
204197 u32 tx_head;
205198 u32 tx_tail;
206
- u32 rx_next;
207199 struct regulator *reg_xceiver;
208200 };
209201
....@@ -227,8 +219,13 @@
227219 __raw_writel(val, priv->hecc_ram + mbxno * 4);
228220 }
229221
222
+static inline u32 hecc_read_stamp(struct ti_hecc_priv *priv, u32 mbxno)
223
+{
224
+ return __raw_readl(priv->hecc_ram + HECC_CANMOTS + mbxno * 4);
225
+}
226
+
230227 static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
231
- u32 reg, u32 val)
228
+ u32 reg, u32 val)
232229 {
233230 __raw_writel(val, priv->mbx + mbxno * 0x10 + reg);
234231 }
....@@ -249,13 +246,13 @@
249246 }
250247
251248 static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg,
252
- u32 bit_mask)
249
+ u32 bit_mask)
253250 {
254251 hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask);
255252 }
256253
257254 static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg,
258
- u32 bit_mask)
255
+ u32 bit_mask)
259256 {
260257 hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask);
261258 }
....@@ -277,8 +274,8 @@
277274 if (bit_timing->brp > 4)
278275 can_btc |= HECC_CANBTC_SAM;
279276 else
280
- netdev_warn(priv->ndev, "WARN: Triple"
281
- "sampling not set due to h/w limitations");
277
+ netdev_warn(priv->ndev,
278
+ "WARN: Triple sampling not set due to h/w limitations");
282279 }
283280 can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8;
284281 can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16;
....@@ -314,8 +311,7 @@
314311 /* Set change control request and wait till enabled */
315312 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
316313
317
- /*
318
- * INFO: It has been observed that at times CCE bit may not be
314
+ /* INFO: It has been observed that at times CCE bit may not be
319315 * set and hw seems to be ok even if this bit is not set so
320316 * timing out with a timing of 1ms to respect the specs
321317 */
....@@ -325,8 +321,7 @@
325321 udelay(10);
326322 }
327323
328
- /*
329
- * Note: On HECC, BTC can be programmed only in initialization mode, so
324
+ /* Note: On HECC, BTC can be programmed only in initialization mode, so
330325 * it is expected that the can bittiming parameters are set via ip
331326 * utility before the device is opened
332327 */
....@@ -335,13 +330,11 @@
335330 /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */
336331 hecc_write(priv, HECC_CANMC, 0);
337332
338
- /*
339
- * INFO: CAN net stack handles bus off and hence disabling auto-bus-on
333
+ /* INFO: CAN net stack handles bus off and hence disabling auto-bus-on
340334 * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO);
341335 */
342336
343
- /*
344
- * INFO: It has been observed that at times CCE bit may not be
337
+ /* INFO: It has been observed that at times CCE bit may not be
345338 * set and hw seems to be ok even if this bit is not set so
346339 */
347340 cnt = HECC_CCE_WAIT_COUNT;
....@@ -374,8 +367,8 @@
374367 /* put HECC in initialization mode and set btc */
375368 ti_hecc_reset(ndev);
376369
377
- priv->tx_head = priv->tx_tail = HECC_TX_MASK;
378
- priv->rx_next = HECC_RX_FIRST_MBOX;
370
+ priv->tx_head = HECC_TX_MASK;
371
+ priv->tx_tail = HECC_TX_MASK;
379372
380373 /* Enable local and global acceptance mask registers */
381374 hecc_write(priv, HECC_CANGAM, HECC_SET_REG);
....@@ -392,8 +385,18 @@
392385 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
393386 }
394387
395
- /* Prevent message over-write & Enable interrupts */
396
- hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
388
+ /* Enable tx interrupts */
389
+ hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1);
390
+
391
+ /* Prevent message over-write to create a rx fifo, but not for
392
+ * the lowest priority mailbox, since that allows detecting
393
+ * overflows instead of the hardware silently dropping the
394
+ * messages.
395
+ */
396
+ mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
397
+ hecc_write(priv, HECC_CANOPC, mbx_mask);
398
+
399
+ /* Enable interrupts */
397400 if (priv->use_hecc1int) {
398401 hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
399402 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
....@@ -401,7 +404,7 @@
401404 } else {
402405 hecc_write(priv, HECC_CANMIL, 0);
403406 hecc_write(priv, HECC_CANGIM,
404
- HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
407
+ HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
405408 }
406409 priv->can.state = CAN_STATE_ERROR_ACTIVE;
407410 }
....@@ -409,6 +412,9 @@
409412 static void ti_hecc_stop(struct net_device *ndev)
410413 {
411414 struct ti_hecc_priv *priv = netdev_priv(ndev);
415
+
416
+ /* Disable the CPK; stop sending, erroring and acking */
417
+ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
412418
413419 /* Disable interrupts and disable mailboxes */
414420 hecc_write(priv, HECC_CANGIM, 0);
....@@ -435,7 +441,7 @@
435441 }
436442
437443 static int ti_hecc_get_berr_counter(const struct net_device *ndev,
438
- struct can_berr_counter *bec)
444
+ struct can_berr_counter *bec)
439445 {
440446 struct ti_hecc_priv *priv = netdev_priv(ndev);
441447
....@@ -445,11 +451,10 @@
445451 return 0;
446452 }
447453
448
-/*
449
- * ti_hecc_xmit: HECC Transmit
454
+/* ti_hecc_xmit: HECC Transmit
450455 *
451456 * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
452
- * priority of the mailbox for tranmission is dependent upon priority setting
457
+ * priority of the mailbox for transmission is dependent upon priority setting
453458 * field in mailbox registers. The mailbox with highest value in priority field
454459 * is transmitted first. Only when two mailboxes have the same value in
455460 * priority field the highest numbered mailbox is transmitted first.
....@@ -484,8 +489,8 @@
484489 spin_unlock_irqrestore(&priv->mbx_lock, flags);
485490 netif_stop_queue(ndev);
486491 netdev_err(priv->ndev,
487
- "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
488
- priv->tx_head, priv->tx_tail);
492
+ "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
493
+ priv->tx_head, priv->tx_tail);
489494 return NETDEV_TX_BUSY;
490495 }
491496 spin_unlock_irqrestore(&priv->mbx_lock, flags);
....@@ -502,10 +507,10 @@
502507 data = (cf->can_id & CAN_SFF_MASK) << 18;
503508 hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
504509 hecc_write_mbx(priv, mbxno, HECC_CANMDL,
505
- be32_to_cpu(*(__be32 *)(cf->data)));
510
+ be32_to_cpu(*(__be32 *)(cf->data)));
506511 if (cf->can_dlc > 4)
507512 hecc_write_mbx(priv, mbxno, HECC_CANMDH,
508
- be32_to_cpu(*(__be32 *)(cf->data + 4)));
513
+ be32_to_cpu(*(__be32 *)(cf->data + 4)));
509514 else
510515 *(u32 *)(cf->data + 4) = 0;
511516 can_put_echo_skb(skb, ndev, mbxno);
....@@ -513,230 +518,159 @@
513518 spin_lock_irqsave(&priv->mbx_lock, flags);
514519 --priv->tx_head;
515520 if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) ||
516
- (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
521
+ (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
517522 netif_stop_queue(ndev);
518523 }
519524 hecc_set_bit(priv, HECC_CANME, mbx_mask);
520525 spin_unlock_irqrestore(&priv->mbx_lock, flags);
521526
522
- hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
523
- hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
524527 hecc_write(priv, HECC_CANTRS, mbx_mask);
525528
526529 return NETDEV_TX_OK;
527530 }
528531
529
-static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
532
+static inline
533
+struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload)
530534 {
531
- struct net_device_stats *stats = &priv->ndev->stats;
532
- struct can_frame *cf;
533
- struct sk_buff *skb;
534
- u32 data, mbx_mask;
535
- unsigned long flags;
535
+ return container_of(offload, struct ti_hecc_priv, offload);
536
+}
536537
537
- skb = alloc_can_skb(priv->ndev, &cf);
538
- if (!skb) {
539
- if (printk_ratelimit())
540
- netdev_err(priv->ndev,
541
- "ti_hecc_rx_pkt: alloc_can_skb() failed\n");
542
- return -ENOMEM;
543
- }
538
+static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload,
539
+ unsigned int mbxno, u32 *timestamp,
540
+ bool drop)
541
+{
542
+ struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
543
+ struct sk_buff *skb;
544
+ struct can_frame *cf;
545
+ u32 data, mbx_mask;
544546
545547 mbx_mask = BIT(mbxno);
548
+
549
+ if (unlikely(drop)) {
550
+ skb = ERR_PTR(-ENOBUFS);
551
+ goto mark_as_read;
552
+ }
553
+
554
+ skb = alloc_can_skb(offload->dev, &cf);
555
+ if (unlikely(!skb)) {
556
+ skb = ERR_PTR(-ENOMEM);
557
+ goto mark_as_read;
558
+ }
559
+
546560 data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
547561 if (data & HECC_CANMID_IDE)
548562 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
549563 else
550564 cf->can_id = (data >> 18) & CAN_SFF_MASK;
565
+
551566 data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
552567 if (data & HECC_CANMCF_RTR)
553568 cf->can_id |= CAN_RTR_FLAG;
554569 cf->can_dlc = get_can_dlc(data & 0xF);
570
+
555571 data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
556572 *(__be32 *)(cf->data) = cpu_to_be32(data);
557573 if (cf->can_dlc > 4) {
558574 data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
559575 *(__be32 *)(cf->data + 4) = cpu_to_be32(data);
560576 }
561
- spin_lock_irqsave(&priv->mbx_lock, flags);
562
- hecc_clear_bit(priv, HECC_CANME, mbx_mask);
577
+
578
+ *timestamp = hecc_read_stamp(priv, mbxno);
579
+
580
+ /* Check for FIFO overrun.
581
+ *
582
+ * All but the last RX mailbox have activated overwrite
583
+ * protection. So skip check for overrun, if we're not
584
+ * handling the last RX mailbox.
585
+ *
586
+ * As the overwrite protection for the last RX mailbox is
587
+ * disabled, the CAN core might update while we're reading
588
+ * it. This means the skb might be inconsistent.
589
+ *
590
+ * Return an error to let rx-offload discard this CAN frame.
591
+ */
592
+ if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
593
+ hecc_read(priv, HECC_CANRML) & mbx_mask))
594
+ skb = ERR_PTR(-ENOBUFS);
595
+
596
+ mark_as_read:
563597 hecc_write(priv, HECC_CANRMP, mbx_mask);
564
- /* enable mailbox only if it is part of rx buffer mailboxes */
565
- if (priv->rx_next < HECC_RX_BUFFER_MBOX)
566
- hecc_set_bit(priv, HECC_CANME, mbx_mask);
567
- spin_unlock_irqrestore(&priv->mbx_lock, flags);
568598
569
- stats->rx_bytes += cf->can_dlc;
570
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
571
- netif_receive_skb(skb);
572
- stats->rx_packets++;
573
-
574
- return 0;
575
-}
576
-
577
-/*
578
- * ti_hecc_rx_poll - HECC receive pkts
579
- *
580
- * The receive mailboxes start from highest numbered mailbox till last xmit
581
- * mailbox. On CAN frame reception the hardware places the data into highest
582
- * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes
583
- * have same filtering (ALL CAN frames) packets will arrive in the highest
584
- * available RX mailbox and we need to ensure in-order packet reception.
585
- *
586
- * To ensure the packets are received in the right order we logically divide
587
- * the RX mailboxes into main and buffer mailboxes. Packets are received as per
588
- * mailbox priotity (higher to lower) in the main bank and once it is full we
589
- * disable further reception into main mailboxes. While the main mailboxes are
590
- * processed in NAPI, further packets are received in buffer mailboxes.
591
- *
592
- * We maintain a RX next mailbox counter to process packets and once all main
593
- * mailboxe packets are passed to the upper stack we enable all of them but
594
- * continue to process packets received in buffer mailboxes. With each packet
595
- * received from buffer mailbox we enable it immediately so as to handle the
596
- * overflow from higher mailboxes.
597
- */
598
-static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
599
-{
600
- struct net_device *ndev = napi->dev;
601
- struct ti_hecc_priv *priv = netdev_priv(ndev);
602
- u32 num_pkts = 0;
603
- u32 mbx_mask;
604
- unsigned long pending_pkts, flags;
605
-
606
- if (!netif_running(ndev))
607
- return 0;
608
-
609
- while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) &&
610
- num_pkts < quota) {
611
- mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */
612
- if (mbx_mask & pending_pkts) {
613
- if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0)
614
- return num_pkts;
615
- ++num_pkts;
616
- } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) {
617
- break; /* pkt not received yet */
618
- }
619
- --priv->rx_next;
620
- if (priv->rx_next == HECC_RX_BUFFER_MBOX) {
621
- /* enable high bank mailboxes */
622
- spin_lock_irqsave(&priv->mbx_lock, flags);
623
- mbx_mask = hecc_read(priv, HECC_CANME);
624
- mbx_mask |= HECC_RX_HIGH_MBOX_MASK;
625
- hecc_write(priv, HECC_CANME, mbx_mask);
626
- spin_unlock_irqrestore(&priv->mbx_lock, flags);
627
- } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) {
628
- priv->rx_next = HECC_RX_FIRST_MBOX;
629
- break;
630
- }
631
- }
632
-
633
- /* Enable packet interrupt if all pkts are handled */
634
- if (hecc_read(priv, HECC_CANRMP) == 0) {
635
- napi_complete(napi);
636
- /* Re-enable RX mailbox interrupts */
637
- mbx_mask = hecc_read(priv, HECC_CANMIM);
638
- mbx_mask |= HECC_TX_MBOX_MASK;
639
- hecc_write(priv, HECC_CANMIM, mbx_mask);
640
- } else {
641
- /* repoll is done only if whole budget is used */
642
- num_pkts = quota;
643
- }
644
-
645
- return num_pkts;
599
+ return skb;
646600 }
647601
648602 static int ti_hecc_error(struct net_device *ndev, int int_status,
649
- int err_status)
603
+ int err_status)
650604 {
651605 struct ti_hecc_priv *priv = netdev_priv(ndev);
652
- struct net_device_stats *stats = &ndev->stats;
653606 struct can_frame *cf;
654607 struct sk_buff *skb;
655
-
656
- /* propagate the error condition to the can stack */
657
- skb = alloc_can_err_skb(ndev, &cf);
658
- if (!skb) {
659
- if (printk_ratelimit())
660
- netdev_err(priv->ndev,
661
- "ti_hecc_error: alloc_can_err_skb() failed\n");
662
- return -ENOMEM;
663
- }
664
-
665
- if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
666
- if ((int_status & HECC_CANGIF_BOIF) == 0) {
667
- priv->can.state = CAN_STATE_ERROR_WARNING;
668
- ++priv->can.can_stats.error_warning;
669
- cf->can_id |= CAN_ERR_CRTL;
670
- if (hecc_read(priv, HECC_CANTEC) > 96)
671
- cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
672
- if (hecc_read(priv, HECC_CANREC) > 96)
673
- cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
674
- }
675
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
676
- netdev_dbg(priv->ndev, "Error Warning interrupt\n");
677
- hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
678
- }
679
-
680
- if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
681
- if ((int_status & HECC_CANGIF_BOIF) == 0) {
682
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
683
- ++priv->can.can_stats.error_passive;
684
- cf->can_id |= CAN_ERR_CRTL;
685
- if (hecc_read(priv, HECC_CANTEC) > 127)
686
- cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
687
- if (hecc_read(priv, HECC_CANREC) > 127)
688
- cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
689
- }
690
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
691
- netdev_dbg(priv->ndev, "Error passive interrupt\n");
692
- hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
693
- }
694
-
695
- /*
696
- * Need to check busoff condition in error status register too to
697
- * ensure warning interrupts don't hog the system
698
- */
699
- if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
700
- priv->can.state = CAN_STATE_BUS_OFF;
701
- cf->can_id |= CAN_ERR_BUSOFF;
702
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
703
- hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
704
- /* Disable all interrupts in bus-off to avoid int hog */
705
- hecc_write(priv, HECC_CANGIM, 0);
706
- ++priv->can.can_stats.bus_off;
707
- can_bus_off(ndev);
708
- }
608
+ u32 timestamp;
609
+ int err;
709610
710611 if (err_status & HECC_BUS_ERROR) {
612
+ /* propagate the error condition to the can stack */
613
+ skb = alloc_can_err_skb(ndev, &cf);
614
+ if (!skb) {
615
+ if (net_ratelimit())
616
+ netdev_err(priv->ndev,
617
+ "%s: alloc_can_err_skb() failed\n",
618
+ __func__);
619
+ return -ENOMEM;
620
+ }
621
+
711622 ++priv->can.can_stats.bus_error;
712623 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
713
- if (err_status & HECC_CANES_FE) {
714
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
624
+ if (err_status & HECC_CANES_FE)
715625 cf->data[2] |= CAN_ERR_PROT_FORM;
716
- }
717
- if (err_status & HECC_CANES_BE) {
718
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
626
+ if (err_status & HECC_CANES_BE)
719627 cf->data[2] |= CAN_ERR_PROT_BIT;
720
- }
721
- if (err_status & HECC_CANES_SE) {
722
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
628
+ if (err_status & HECC_CANES_SE)
723629 cf->data[2] |= CAN_ERR_PROT_STUFF;
724
- }
725
- if (err_status & HECC_CANES_CRCE) {
726
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
630
+ if (err_status & HECC_CANES_CRCE)
727631 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
728
- }
729
- if (err_status & HECC_CANES_ACKE) {
730
- hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
632
+ if (err_status & HECC_CANES_ACKE)
731633 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
732
- }
634
+
635
+ timestamp = hecc_read(priv, HECC_CANLNT);
636
+ err = can_rx_offload_queue_sorted(&priv->offload, skb,
637
+ timestamp);
638
+ if (err)
639
+ ndev->stats.rx_fifo_errors++;
733640 }
734641
735
- stats->rx_packets++;
736
- stats->rx_bytes += cf->can_dlc;
737
- netif_rx(skb);
642
+ hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS);
738643
739644 return 0;
645
+}
646
+
647
+static void ti_hecc_change_state(struct net_device *ndev,
648
+ enum can_state rx_state,
649
+ enum can_state tx_state)
650
+{
651
+ struct ti_hecc_priv *priv = netdev_priv(ndev);
652
+ struct can_frame *cf;
653
+ struct sk_buff *skb;
654
+ u32 timestamp;
655
+ int err;
656
+
657
+ skb = alloc_can_err_skb(priv->ndev, &cf);
658
+ if (unlikely(!skb)) {
659
+ priv->can.state = max(tx_state, rx_state);
660
+ return;
661
+ }
662
+
663
+ can_change_state(priv->ndev, cf, tx_state, rx_state);
664
+
665
+ if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
666
+ cf->data[6] = hecc_read(priv, HECC_CANTEC);
667
+ cf->data[7] = hecc_read(priv, HECC_CANREC);
668
+ }
669
+
670
+ timestamp = hecc_read(priv, HECC_CANLNT);
671
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
672
+ if (err)
673
+ ndev->stats.rx_fifo_errors++;
740674 }
741675
742676 static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
....@@ -744,19 +678,71 @@
744678 struct net_device *ndev = (struct net_device *)dev_id;
745679 struct ti_hecc_priv *priv = netdev_priv(ndev);
746680 struct net_device_stats *stats = &ndev->stats;
747
- u32 mbxno, mbx_mask, int_status, err_status;
748
- unsigned long ack, flags;
681
+ u32 mbxno, mbx_mask, int_status, err_status, stamp;
682
+ unsigned long flags, rx_pending;
683
+ u32 handled = 0;
749684
750685 int_status = hecc_read(priv,
751
- (priv->use_hecc1int) ? HECC_CANGIF1 : HECC_CANGIF0);
686
+ priv->use_hecc1int ?
687
+ HECC_CANGIF1 : HECC_CANGIF0);
752688
753689 if (!int_status)
754690 return IRQ_NONE;
755691
756692 err_status = hecc_read(priv, HECC_CANES);
757
- if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
758
- HECC_CANES_EP | HECC_CANES_EW))
759
- ti_hecc_error(ndev, int_status, err_status);
693
+ if (unlikely(err_status & HECC_CANES_FLAGS))
694
+ ti_hecc_error(ndev, int_status, err_status);
695
+
696
+ if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) {
697
+ enum can_state rx_state, tx_state;
698
+ u32 rec = hecc_read(priv, HECC_CANREC);
699
+ u32 tec = hecc_read(priv, HECC_CANTEC);
700
+
701
+ if (int_status & HECC_CANGIF_WLIF) {
702
+ handled |= HECC_CANGIF_WLIF;
703
+ rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0;
704
+ tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0;
705
+ netdev_dbg(priv->ndev, "Error Warning interrupt\n");
706
+ ti_hecc_change_state(ndev, rx_state, tx_state);
707
+ }
708
+
709
+ if (int_status & HECC_CANGIF_EPIF) {
710
+ handled |= HECC_CANGIF_EPIF;
711
+ rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0;
712
+ tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0;
713
+ netdev_dbg(priv->ndev, "Error passive interrupt\n");
714
+ ti_hecc_change_state(ndev, rx_state, tx_state);
715
+ }
716
+
717
+ if (int_status & HECC_CANGIF_BOIF) {
718
+ handled |= HECC_CANGIF_BOIF;
719
+ rx_state = CAN_STATE_BUS_OFF;
720
+ tx_state = CAN_STATE_BUS_OFF;
721
+ netdev_dbg(priv->ndev, "Bus off interrupt\n");
722
+
723
+ /* Disable all interrupts */
724
+ hecc_write(priv, HECC_CANGIM, 0);
725
+ can_bus_off(ndev);
726
+ ti_hecc_change_state(ndev, rx_state, tx_state);
727
+ }
728
+ } else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
729
+ enum can_state new_state, tx_state, rx_state;
730
+ u32 rec = hecc_read(priv, HECC_CANREC);
731
+ u32 tec = hecc_read(priv, HECC_CANTEC);
732
+
733
+ if (rec >= 128 || tec >= 128)
734
+ new_state = CAN_STATE_ERROR_PASSIVE;
735
+ else if (rec >= 96 || tec >= 96)
736
+ new_state = CAN_STATE_ERROR_WARNING;
737
+ else
738
+ new_state = CAN_STATE_ERROR_ACTIVE;
739
+
740
+ if (new_state < priv->can.state) {
741
+ rx_state = rec >= tec ? new_state : 0;
742
+ tx_state = rec <= tec ? new_state : 0;
743
+ ti_hecc_change_state(ndev, rx_state, tx_state);
744
+ }
745
+ }
760746
761747 if (int_status & HECC_CANGIF_GMIF) {
762748 while (priv->tx_tail - priv->tx_head > 0) {
....@@ -764,41 +750,39 @@
764750 mbx_mask = BIT(mbxno);
765751 if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
766752 break;
767
- hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
768753 hecc_write(priv, HECC_CANTA, mbx_mask);
769754 spin_lock_irqsave(&priv->mbx_lock, flags);
770755 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
771756 spin_unlock_irqrestore(&priv->mbx_lock, flags);
772
- stats->tx_bytes += hecc_read_mbx(priv, mbxno,
773
- HECC_CANMCF) & 0xF;
757
+ stamp = hecc_read_stamp(priv, mbxno);
758
+ stats->tx_bytes +=
759
+ can_rx_offload_get_echo_skb(&priv->offload,
760
+ mbxno, stamp);
774761 stats->tx_packets++;
775762 can_led_event(ndev, CAN_LED_EVENT_TX);
776
- can_get_echo_skb(ndev, mbxno);
777763 --priv->tx_tail;
778764 }
779765
780766 /* restart queue if wrap-up or if queue stalled on last pkt */
781
- if (((priv->tx_head == priv->tx_tail) &&
782
- ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
783
- (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
784
- ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
767
+ if ((priv->tx_head == priv->tx_tail &&
768
+ ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
769
+ (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
770
+ ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
785771 netif_wake_queue(ndev);
786772
787
- /* Disable RX mailbox interrupts and let NAPI reenable them */
788
- if (hecc_read(priv, HECC_CANRMP)) {
789
- ack = hecc_read(priv, HECC_CANMIM);
790
- ack &= BIT(HECC_MAX_TX_MBOX) - 1;
791
- hecc_write(priv, HECC_CANMIM, ack);
792
- napi_schedule(&priv->napi);
773
+ /* offload RX mailboxes and let NAPI deliver them */
774
+ while ((rx_pending = hecc_read(priv, HECC_CANRMP))) {
775
+ can_rx_offload_irq_offload_timestamp(&priv->offload,
776
+ rx_pending);
793777 }
794778 }
795779
796780 /* clear all interrupt conditions - read back to avoid spurious ints */
797781 if (priv->use_hecc1int) {
798
- hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
782
+ hecc_write(priv, HECC_CANGIF1, handled);
799783 int_status = hecc_read(priv, HECC_CANGIF1);
800784 } else {
801
- hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
785
+ hecc_write(priv, HECC_CANGIF0, handled);
802786 int_status = hecc_read(priv, HECC_CANGIF0);
803787 }
804788
....@@ -811,7 +795,7 @@
811795 int err;
812796
813797 err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED,
814
- ndev->name, ndev);
798
+ ndev->name, ndev);
815799 if (err) {
816800 netdev_err(ndev, "error requesting interrupt\n");
817801 return err;
....@@ -831,7 +815,7 @@
831815 can_led_event(ndev, CAN_LED_EVENT_OPEN);
832816
833817 ti_hecc_start(ndev);
834
- napi_enable(&priv->napi);
818
+ can_rx_offload_enable(&priv->offload);
835819 netif_start_queue(ndev);
836820
837821 return 0;
....@@ -842,7 +826,7 @@
842826 struct ti_hecc_priv *priv = netdev_priv(ndev);
843827
844828 netif_stop_queue(ndev);
845
- napi_disable(&priv->napi);
829
+ can_rx_offload_disable(&priv->offload);
846830 ti_hecc_stop(ndev);
847831 free_irq(ndev->irq, ndev);
848832 close_candev(ndev);
....@@ -873,7 +857,7 @@
873857 struct net_device *ndev = (struct net_device *)0;
874858 struct ti_hecc_priv *priv;
875859 struct device_node *np = pdev->dev.of_node;
876
- struct resource *res, *irq;
860
+ struct resource *irq;
877861 struct regulator *reg_xceiver;
878862 int err = -ENODEV;
879863
....@@ -894,13 +878,7 @@
894878 priv = netdev_priv(ndev);
895879
896880 /* handle hecc memory */
897
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc");
898
- if (!res) {
899
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc\n");
900
- return -EINVAL;
901
- }
902
-
903
- priv->base = devm_ioremap_resource(&pdev->dev, res);
881
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "hecc");
904882 if (IS_ERR(priv->base)) {
905883 dev_err(&pdev->dev, "hecc ioremap failed\n");
906884 err = PTR_ERR(priv->base);
....@@ -908,13 +886,8 @@
908886 }
909887
910888 /* handle hecc-ram memory */
911
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc-ram");
912
- if (!res) {
913
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc-ram\n");
914
- return -EINVAL;
915
- }
916
-
917
- priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res);
889
+ priv->hecc_ram = devm_platform_ioremap_resource_byname(pdev,
890
+ "hecc-ram");
918891 if (IS_ERR(priv->hecc_ram)) {
919892 dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
920893 err = PTR_ERR(priv->hecc_ram);
....@@ -922,13 +895,7 @@
922895 }
923896
924897 /* handle mbx memory */
925
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mbx");
926
- if (!res) {
927
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM mbx\n");
928
- return -EINVAL;
929
- }
930
-
931
- priv->mbx = devm_ioremap_resource(&pdev->dev, res);
898
+ priv->mbx = devm_platform_ioremap_resource_byname(pdev, "mbx");
932899 if (IS_ERR(priv->mbx)) {
933900 dev_err(&pdev->dev, "mbx ioremap failed\n");
934901 err = PTR_ERR(priv->mbx);
....@@ -965,29 +932,40 @@
965932 goto probe_exit_candev;
966933 }
967934 priv->can.clock.freq = clk_get_rate(priv->clk);
968
- netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
969
- HECC_DEF_NAPI_WEIGHT);
970935
971936 err = clk_prepare_enable(priv->clk);
972937 if (err) {
973938 dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
974
- goto probe_exit_clk;
939
+ goto probe_exit_release_clk;
940
+ }
941
+
942
+ priv->offload.mailbox_read = ti_hecc_mailbox_read;
943
+ priv->offload.mb_first = HECC_RX_FIRST_MBOX;
944
+ priv->offload.mb_last = HECC_RX_LAST_MBOX;
945
+ err = can_rx_offload_add_timestamp(ndev, &priv->offload);
946
+ if (err) {
947
+ dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
948
+ goto probe_exit_disable_clk;
975949 }
976950
977951 err = register_candev(ndev);
978952 if (err) {
979953 dev_err(&pdev->dev, "register_candev() failed\n");
980
- goto probe_exit_clk;
954
+ goto probe_exit_offload;
981955 }
982956
983957 devm_can_led_init(ndev);
984958
985959 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
986
- priv->base, (u32) ndev->irq);
960
+ priv->base, (u32)ndev->irq);
987961
988962 return 0;
989963
990
-probe_exit_clk:
964
+probe_exit_offload:
965
+ can_rx_offload_del(&priv->offload);
966
+probe_exit_disable_clk:
967
+ clk_disable_unprepare(priv->clk);
968
+probe_exit_release_clk:
991969 clk_put(priv->clk);
992970 probe_exit_candev:
993971 free_candev(ndev);
....@@ -1003,6 +981,7 @@
1003981 unregister_candev(ndev);
1004982 clk_disable_unprepare(priv->clk);
1005983 clk_put(priv->clk);
984
+ can_rx_offload_del(&priv->offload);
1006985 free_candev(ndev);
1007986
1008987 return 0;