hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/net/ethernet/xilinx/ll_temac_main.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Driver for Xilinx TEMAC Ethernet device
34 *
....@@ -21,7 +22,6 @@
2122 *
2223 * TODO:
2324 * - Factor out locallink DMA code into separate driver
24
- * - Fix multicast assignment.
2525 * - Fix support for hardware checksumming.
2626 * - Testing. Lots and lots of testing.
2727 *
....@@ -33,6 +33,7 @@
3333 #include <linux/module.h>
3434 #include <linux/mutex.h>
3535 #include <linux/netdevice.h>
36
+#include <linux/if_ether.h>
3637 #include <linux/of.h>
3738 #include <linux/of_device.h>
3839 #include <linux/of_irq.h>
....@@ -50,92 +51,187 @@
5051 #include <linux/ip.h>
5152 #include <linux/slab.h>
5253 #include <linux/interrupt.h>
54
+#include <linux/workqueue.h>
5355 #include <linux/dma-mapping.h>
56
+#include <linux/processor.h>
57
+#include <linux/platform_data/xilinx-ll-temac.h>
5458
5559 #include "ll_temac.h"
5660
57
-#define TX_BD_NUM 64
58
-#define RX_BD_NUM 128
61
+/* Descriptors defines for Tx and Rx DMA */
62
+#define TX_BD_NUM_DEFAULT 64
63
+#define RX_BD_NUM_DEFAULT 1024
64
+#define TX_BD_NUM_MAX 4096
65
+#define RX_BD_NUM_MAX 4096
5966
6067 /* ---------------------------------------------------------------------
6168 * Low level register access functions
6269 */
6370
64
-u32 temac_ior(struct temac_local *lp, int offset)
71
+static u32 _temac_ior_be(struct temac_local *lp, int offset)
6572 {
66
- return in_be32(lp->regs + offset);
73
+ return ioread32be(lp->regs + offset);
6774 }
6875
69
-void temac_iow(struct temac_local *lp, int offset, u32 value)
76
+static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
7077 {
71
- out_be32(lp->regs + offset, value);
78
+ return iowrite32be(value, lp->regs + offset);
7279 }
7380
81
+static u32 _temac_ior_le(struct temac_local *lp, int offset)
82
+{
83
+ return ioread32(lp->regs + offset);
84
+}
85
+
86
+static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
87
+{
88
+ return iowrite32(value, lp->regs + offset);
89
+}
90
+
91
+static bool hard_acs_rdy(struct temac_local *lp)
92
+{
93
+ return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
94
+}
95
+
96
+static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
97
+{
98
+ ktime_t cur = ktime_get();
99
+
100
+ return hard_acs_rdy(lp) || ktime_after(cur, timeout);
101
+}
102
+
103
+/* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz
104
+ * that was used before, and should cover MDIO bus speed down to 3200
105
+ * Hz.
106
+ */
107
+#define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
108
+
109
+/*
110
+ * temac_indirect_busywait - Wait for current indirect register access
111
+ * to complete.
112
+ */
74113 int temac_indirect_busywait(struct temac_local *lp)
75114 {
76
- unsigned long end = jiffies + 2;
115
+ ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
77116
78
- while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
79
- if (time_before_eq(end, jiffies)) {
80
- WARN_ON(1);
81
- return -ETIMEDOUT;
82
- }
83
- msleep(1);
84
- }
85
- return 0;
117
+ spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
118
+ if (WARN_ON(!hard_acs_rdy(lp)))
119
+ return -ETIMEDOUT;
120
+ else
121
+ return 0;
86122 }
87123
88
-/**
89
- * temac_indirect_in32
90
- *
91
- * lp->indirect_mutex must be held when calling this function
124
+/*
125
+ * temac_indirect_in32 - Indirect register read access. This function
126
+ * must be called without lp->indirect_lock being held.
92127 */
93128 u32 temac_indirect_in32(struct temac_local *lp, int reg)
94129 {
95
- u32 val;
130
+ unsigned long flags;
131
+ int val;
96132
97
- if (temac_indirect_busywait(lp))
98
- return -ETIMEDOUT;
99
- temac_iow(lp, XTE_CTL0_OFFSET, reg);
100
- if (temac_indirect_busywait(lp))
101
- return -ETIMEDOUT;
102
- val = temac_ior(lp, XTE_LSW0_OFFSET);
103
-
133
+ spin_lock_irqsave(lp->indirect_lock, flags);
134
+ val = temac_indirect_in32_locked(lp, reg);
135
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
104136 return val;
105137 }
106138
107
-/**
108
- * temac_indirect_out32
109
- *
110
- * lp->indirect_mutex must be held when calling this function
139
+/*
140
+ * temac_indirect_in32_locked - Indirect register read access. This
141
+ * function must be called with lp->indirect_lock being held. Use
142
+ * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
143
+ * repeated lock/unlock and to ensure uninterrupted access to indirect
144
+ * registers.
145
+ */
146
+u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
147
+{
148
+ /* This initial wait should normally not spin, as we always
149
+ * try to wait for indirect access to complete before
150
+ * releasing the indirect_lock.
151
+ */
152
+ if (WARN_ON(temac_indirect_busywait(lp)))
153
+ return -ETIMEDOUT;
154
+ /* Initiate read from indirect register */
155
+ temac_iow(lp, XTE_CTL0_OFFSET, reg);
156
+ /* Wait for indirect register access to complete. We really
157
+ * should not see timeouts, and could even end up causing
158
+ * problem for following indirect access, so let's make a bit
159
+ * of WARN noise.
160
+ */
161
+ if (WARN_ON(temac_indirect_busywait(lp)))
162
+ return -ETIMEDOUT;
163
+ /* Value is ready now */
164
+ return temac_ior(lp, XTE_LSW0_OFFSET);
165
+}
166
+
167
+/*
168
+ * temac_indirect_out32 - Indirect register write access. This function
169
+ * must be called without lp->indirect_lock being held.
111170 */
112171 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
113172 {
114
- if (temac_indirect_busywait(lp))
173
+ unsigned long flags;
174
+
175
+ spin_lock_irqsave(lp->indirect_lock, flags);
176
+ temac_indirect_out32_locked(lp, reg, value);
177
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
178
+}
179
+
180
+/*
181
+ * temac_indirect_out32_locked - Indirect register write access. This
182
+ * function must be called with lp->indirect_lock being held. Use
183
+ * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
184
+ * repeated lock/unlock and to ensure uninterrupted access to indirect
185
+ * registers.
186
+ */
187
+void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
188
+{
189
+ /* As in temac_indirect_in32_locked(), we should normally not
190
+ * spin here. And if it happens, we actually end up silently
191
+ * ignoring the write request. Ouch.
192
+ */
193
+ if (WARN_ON(temac_indirect_busywait(lp)))
115194 return;
195
+ /* Initiate write to indirect register */
116196 temac_iow(lp, XTE_LSW0_OFFSET, value);
117197 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
118
- temac_indirect_busywait(lp);
198
+ /* As in temac_indirect_in32_locked(), we should not see timeouts
199
+ * here. And if it happens, we continue before the write has
200
+ * completed. Not good.
201
+ */
202
+ WARN_ON(temac_indirect_busywait(lp));
119203 }
120204
121
-/**
122
- * temac_dma_in32 - Memory mapped DMA read, this function expects a
123
- * register input that is based on DCR word addresses which
124
- * are then converted to memory mapped byte addresses
205
+/*
206
+ * temac_dma_in32_* - Memory mapped DMA read, these function expects a
207
+ * register input that is based on DCR word addresses which are then
208
+ * converted to memory mapped byte addresses. To be assigned to
209
+ * lp->dma_in32.
125210 */
126
-static u32 temac_dma_in32(struct temac_local *lp, int reg)
211
+static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
127212 {
128
- return in_be32(lp->sdma_regs + (reg << 2));
213
+ return ioread32be(lp->sdma_regs + (reg << 2));
129214 }
130215
131
-/**
132
- * temac_dma_out32 - Memory mapped DMA read, this function expects a
133
- * register input that is based on DCR word addresses which
134
- * are then converted to memory mapped byte addresses
135
- */
136
-static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
216
+static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
137217 {
138
- out_be32(lp->sdma_regs + (reg << 2), value);
218
+ return ioread32(lp->sdma_regs + (reg << 2));
219
+}
220
+
221
+/*
222
+ * temac_dma_out32_* - Memory mapped DMA read, these function expects
223
+ * a register input that is based on DCR word addresses which are then
224
+ * converted to memory mapped byte addresses. To be assigned to
225
+ * lp->dma_out32.
226
+ */
227
+static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
228
+{
229
+ iowrite32be(value, lp->sdma_regs + (reg << 2));
230
+}
231
+
232
+static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
233
+{
234
+ iowrite32(value, lp->sdma_regs + (reg << 2));
139235 }
140236
141237 /* DMA register access functions can be DCR based or memory mapped.
....@@ -144,7 +240,7 @@
144240 */
145241 #ifdef CONFIG_PPC_DCR
146242
147
-/**
243
+/*
148244 * temac_dma_dcr_in32 - DCR based DMA read
149245 */
150246 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
....@@ -152,7 +248,7 @@
152248 return dcr_read(lp->sdma_dcrs, reg);
153249 }
154250
155
-/**
251
+/*
156252 * temac_dma_dcr_out32 - DCR based DMA write
157253 */
158254 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
....@@ -160,7 +256,7 @@
160256 dcr_write(lp->sdma_dcrs, reg, value);
161257 }
162258
163
-/**
259
+/*
164260 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
165261 * I/O functions
166262 */
....@@ -187,7 +283,7 @@
187283
188284 /*
189285 * temac_dcr_setup - This is a stub for when DCR is not supported,
190
- * such as with MicroBlaze
286
+ * such as with MicroBlaze and x86
191287 */
192288 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
193289 struct device_node *np)
....@@ -197,7 +293,7 @@
197293
198294 #endif
199295
200
-/**
296
+/*
201297 * temac_dma_bd_release - Release buffer descriptor rings
202298 */
203299 static void temac_dma_bd_release(struct net_device *ndev)
....@@ -208,7 +304,7 @@
208304 /* Reset Local Link (DMA) */
209305 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
210306
211
- for (i = 0; i < RX_BD_NUM; i++) {
307
+ for (i = 0; i < lp->rx_bd_num; i++) {
212308 if (!lp->rx_skb[i])
213309 break;
214310 else {
....@@ -219,50 +315,51 @@
219315 }
220316 if (lp->rx_bd_v)
221317 dma_free_coherent(ndev->dev.parent,
222
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
223
- lp->rx_bd_v, lp->rx_bd_p);
318
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
319
+ lp->rx_bd_v, lp->rx_bd_p);
224320 if (lp->tx_bd_v)
225321 dma_free_coherent(ndev->dev.parent,
226
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
227
- lp->tx_bd_v, lp->tx_bd_p);
228
- kfree(lp->rx_skb);
322
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
323
+ lp->tx_bd_v, lp->tx_bd_p);
229324 }
230325
231
-/**
326
+/*
232327 * temac_dma_bd_init - Setup buffer descriptor rings
233328 */
234329 static int temac_dma_bd_init(struct net_device *ndev)
235330 {
236331 struct temac_local *lp = netdev_priv(ndev);
237332 struct sk_buff *skb;
333
+ dma_addr_t skb_dma_addr;
238334 int i;
239335
240
- lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
336
+ lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
337
+ sizeof(*lp->rx_skb), GFP_KERNEL);
241338 if (!lp->rx_skb)
242339 goto out;
243340
244341 /* allocate the tx and rx ring buffer descriptors. */
245342 /* returns a virtual address and a physical address. */
246
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
247
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
248
- &lp->tx_bd_p, GFP_KERNEL);
343
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
344
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
345
+ &lp->tx_bd_p, GFP_KERNEL);
249346 if (!lp->tx_bd_v)
250347 goto out;
251348
252
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
253
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
254
- &lp->rx_bd_p, GFP_KERNEL);
349
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
350
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
351
+ &lp->rx_bd_p, GFP_KERNEL);
255352 if (!lp->rx_bd_v)
256353 goto out;
257354
258
- for (i = 0; i < TX_BD_NUM; i++) {
259
- lp->tx_bd_v[i].next = lp->tx_bd_p +
260
- sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
355
+ for (i = 0; i < lp->tx_bd_num; i++) {
356
+ lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
357
+ + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
261358 }
262359
263
- for (i = 0; i < RX_BD_NUM; i++) {
264
- lp->rx_bd_v[i].next = lp->rx_bd_p +
265
- sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
360
+ for (i = 0; i < lp->rx_bd_num; i++) {
361
+ lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
362
+ + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
266363
267364 skb = netdev_alloc_skb_ip_align(ndev,
268365 XTE_MAX_JUMBO_FRAME_SIZE);
....@@ -271,37 +368,42 @@
271368
272369 lp->rx_skb[i] = skb;
273370 /* returns physical address of skb->data */
274
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
275
- skb->data,
276
- XTE_MAX_JUMBO_FRAME_SIZE,
277
- DMA_FROM_DEVICE);
278
- lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
279
- lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
371
+ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
372
+ XTE_MAX_JUMBO_FRAME_SIZE,
373
+ DMA_FROM_DEVICE);
374
+ if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
375
+ goto out;
376
+ lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
377
+ lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
378
+ lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
280379 }
281380
282
- lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
283
- CHNL_CTRL_IRQ_EN |
284
- CHNL_CTRL_IRQ_DLY_EN |
285
- CHNL_CTRL_IRQ_COAL_EN);
286
- /* 0x10220483 */
287
- /* 0x00100483 */
288
- lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
289
- CHNL_CTRL_IRQ_EN |
290
- CHNL_CTRL_IRQ_DLY_EN |
291
- CHNL_CTRL_IRQ_COAL_EN |
292
- CHNL_CTRL_IRQ_IOE);
293
- /* 0xff010283 */
294
-
295
- lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
296
- lp->dma_out(lp, RX_TAILDESC_PTR,
297
- lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
298
- lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
381
+ /* Configure DMA channel (irq setup) */
382
+ lp->dma_out(lp, TX_CHNL_CTRL,
383
+ lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
384
+ 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
385
+ CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
386
+ CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
387
+ lp->dma_out(lp, RX_CHNL_CTRL,
388
+ lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
389
+ CHNL_CTRL_IRQ_IOE |
390
+ CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
391
+ CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
299392
300393 /* Init descriptor indexes */
301394 lp->tx_bd_ci = 0;
302
- lp->tx_bd_next = 0;
303395 lp->tx_bd_tail = 0;
304396 lp->rx_bd_ci = 0;
397
+ lp->rx_bd_tail = lp->rx_bd_num - 1;
398
+
399
+ /* Enable RX DMA transfers */
400
+ wmb();
401
+ lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
402
+ lp->dma_out(lp, RX_TAILDESC_PTR,
403
+ lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
404
+
405
+ /* Prepare for TX DMA transfer */
406
+ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
305407
306408 return 0;
307409
....@@ -317,25 +419,26 @@
317419 static void temac_do_set_mac_address(struct net_device *ndev)
318420 {
319421 struct temac_local *lp = netdev_priv(ndev);
422
+ unsigned long flags;
320423
321424 /* set up unicast MAC address filter set its mac address */
322
- mutex_lock(&lp->indirect_mutex);
323
- temac_indirect_out32(lp, XTE_UAW0_OFFSET,
324
- (ndev->dev_addr[0]) |
325
- (ndev->dev_addr[1] << 8) |
326
- (ndev->dev_addr[2] << 16) |
327
- (ndev->dev_addr[3] << 24));
425
+ spin_lock_irqsave(lp->indirect_lock, flags);
426
+ temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
427
+ (ndev->dev_addr[0]) |
428
+ (ndev->dev_addr[1] << 8) |
429
+ (ndev->dev_addr[2] << 16) |
430
+ (ndev->dev_addr[3] << 24));
328431 /* There are reserved bits in EUAW1
329432 * so don't affect them Set MAC bits [47:32] in EUAW1 */
330
- temac_indirect_out32(lp, XTE_UAW1_OFFSET,
331
- (ndev->dev_addr[4] & 0x000000ff) |
332
- (ndev->dev_addr[5] << 8));
333
- mutex_unlock(&lp->indirect_mutex);
433
+ temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
434
+ (ndev->dev_addr[4] & 0x000000ff) |
435
+ (ndev->dev_addr[5] << 8));
436
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
334437 }
335438
336439 static int temac_init_mac_address(struct net_device *ndev, const void *address)
337440 {
338
- memcpy(ndev->dev_addr, address, ETH_ALEN);
441
+ ether_addr_copy(ndev->dev_addr, address);
339442 if (!is_valid_ether_addr(ndev->dev_addr))
340443 eth_hw_addr_random(ndev);
341444 temac_do_set_mac_address(ndev);
....@@ -356,49 +459,58 @@
356459 static void temac_set_multicast_list(struct net_device *ndev)
357460 {
358461 struct temac_local *lp = netdev_priv(ndev);
359
- u32 multi_addr_msw, multi_addr_lsw, val;
360
- int i;
462
+ u32 multi_addr_msw, multi_addr_lsw;
463
+ int i = 0;
464
+ unsigned long flags;
465
+ bool promisc_mode_disabled = false;
361466
362
- mutex_lock(&lp->indirect_mutex);
363
- if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
364
- netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
365
- /*
366
- * We must make the kernel realise we had to move
367
- * into promisc mode or we start all out war on
368
- * the cable. If it was a promisc request the
369
- * flag is already set. If not we assert it.
370
- */
371
- ndev->flags |= IFF_PROMISC;
467
+ if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
468
+ (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
372469 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
373470 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
374
- } else if (!netdev_mc_empty(ndev)) {
471
+ return;
472
+ }
473
+
474
+ spin_lock_irqsave(lp->indirect_lock, flags);
475
+
476
+ if (!netdev_mc_empty(ndev)) {
375477 struct netdev_hw_addr *ha;
376478
377
- i = 0;
378479 netdev_for_each_mc_addr(ha, ndev) {
379
- if (i >= MULTICAST_CAM_TABLE_NUM)
480
+ if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
380481 break;
381482 multi_addr_msw = ((ha->addr[3] << 24) |
382483 (ha->addr[2] << 16) |
383484 (ha->addr[1] << 8) |
384485 (ha->addr[0]));
385
- temac_indirect_out32(lp, XTE_MAW0_OFFSET,
386
- multi_addr_msw);
486
+ temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
487
+ multi_addr_msw);
387488 multi_addr_lsw = ((ha->addr[5] << 8) |
388489 (ha->addr[4]) | (i << 16));
389
- temac_indirect_out32(lp, XTE_MAW1_OFFSET,
390
- multi_addr_lsw);
490
+ temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
491
+ multi_addr_lsw);
391492 i++;
392493 }
393
- } else {
394
- val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
395
- temac_indirect_out32(lp, XTE_AFM_OFFSET,
396
- val & ~XTE_AFM_EPPRM_MASK);
397
- temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
398
- temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
399
- dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
400494 }
401
- mutex_unlock(&lp->indirect_mutex);
495
+
496
+ /* Clear all or remaining/unused address table entries */
497
+ while (i < MULTICAST_CAM_TABLE_NUM) {
498
+ temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
499
+ temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
500
+ i++;
501
+ }
502
+
503
+ /* Enable address filter block if currently disabled */
504
+ if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
505
+ & XTE_AFM_EPPRM_MASK) {
506
+ temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
507
+ promisc_mode_disabled = true;
508
+ }
509
+
510
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
511
+
512
+ if (promisc_mode_disabled)
513
+ dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
402514 }
403515
404516 static struct temac_option {
....@@ -481,7 +593,7 @@
481593 {}
482594 };
483595
484
-/**
596
+/*
485597 * temac_setoptions
486598 */
487599 static u32 temac_setoptions(struct net_device *ndev, u32 options)
....@@ -489,17 +601,19 @@
489601 struct temac_local *lp = netdev_priv(ndev);
490602 struct temac_option *tp = &temac_options[0];
491603 int reg;
604
+ unsigned long flags;
492605
493
- mutex_lock(&lp->indirect_mutex);
606
+ spin_lock_irqsave(lp->indirect_lock, flags);
494607 while (tp->opt) {
495
- reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
496
- if (options & tp->opt)
608
+ reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
609
+ if (options & tp->opt) {
497610 reg |= tp->m_or;
498
- temac_indirect_out32(lp, tp->reg, reg);
611
+ temac_indirect_out32_locked(lp, tp->reg, reg);
612
+ }
499613 tp++;
500614 }
615
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
501616 lp->options |= options;
502
- mutex_unlock(&lp->indirect_mutex);
503617
504618 return 0;
505619 }
....@@ -510,6 +624,7 @@
510624 struct temac_local *lp = netdev_priv(ndev);
511625 u32 timeout;
512626 u32 val;
627
+ unsigned long flags;
513628
514629 /* Perform a software reset */
515630
....@@ -518,7 +633,6 @@
518633
519634 dev_dbg(&ndev->dev, "%s()\n", __func__);
520635
521
- mutex_lock(&lp->indirect_mutex);
522636 /* Reset the receiver and wait for it to finish reset */
523637 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
524638 timeout = 1000;
....@@ -544,8 +658,11 @@
544658 }
545659
546660 /* Disable the receiver */
547
- val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
548
- temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
661
+ spin_lock_irqsave(lp->indirect_lock, flags);
662
+ val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
663
+ temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
664
+ val & ~XTE_RXC1_RXEN_MASK);
665
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
549666
550667 /* Reset Local Link (DMA) */
551668 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
....@@ -565,12 +682,12 @@
565682 "temac_device_reset descriptor allocation failed\n");
566683 }
567684
568
- temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
569
- temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
570
- temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
571
- temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
572
-
573
- mutex_unlock(&lp->indirect_mutex);
685
+ spin_lock_irqsave(lp->indirect_lock, flags);
686
+ temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
687
+ temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
688
+ temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
689
+ temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
690
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
574691
575692 /* Sync default options with HW
576693 * but leave receiver and transmitter disabled. */
....@@ -594,13 +711,14 @@
594711 struct phy_device *phy = ndev->phydev;
595712 u32 mii_speed;
596713 int link_state;
714
+ unsigned long flags;
597715
598716 /* hash together the state values to decide if something has changed */
599717 link_state = phy->speed | (phy->duplex << 1) | phy->link;
600718
601
- mutex_lock(&lp->indirect_mutex);
602719 if (lp->last_link != link_state) {
603
- mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
720
+ spin_lock_irqsave(lp->indirect_lock, flags);
721
+ mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
604722 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
605723
606724 switch (phy->speed) {
....@@ -610,43 +728,85 @@
610728 }
611729
612730 /* Write new speed setting out to TEMAC */
613
- temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
731
+ temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
732
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
733
+
614734 lp->last_link = link_state;
615735 phy_print_status(phy);
616736 }
617
- mutex_unlock(&lp->indirect_mutex);
618737 }
738
+
739
+#ifdef CONFIG_64BIT
740
+
741
+static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
742
+{
743
+ bd->app3 = (u32)(((u64)p) >> 32);
744
+ bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
745
+}
746
+
747
+static void *ptr_from_txbd(struct cdmac_bd *bd)
748
+{
749
+ return (void *)(((u64)(bd->app3) << 32) | bd->app4);
750
+}
751
+
752
+#else
753
+
754
+static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
755
+{
756
+ bd->app4 = (u32)p;
757
+}
758
+
759
+static void *ptr_from_txbd(struct cdmac_bd *bd)
760
+{
761
+ return (void *)(bd->app4);
762
+}
763
+
764
+#endif
619765
620766 static void temac_start_xmit_done(struct net_device *ndev)
621767 {
622768 struct temac_local *lp = netdev_priv(ndev);
623769 struct cdmac_bd *cur_p;
624770 unsigned int stat = 0;
771
+ struct sk_buff *skb;
625772
626773 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
627
- stat = cur_p->app0;
774
+ stat = be32_to_cpu(cur_p->app0);
628775
629776 while (stat & STS_CTRL_APP0_CMPLT) {
630
- dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
631
- DMA_TO_DEVICE);
632
- if (cur_p->app4)
633
- dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
634
- cur_p->app0 = 0;
777
+ /* Make sure that the other fields are read after bd is
778
+ * released by dma
779
+ */
780
+ rmb();
781
+ dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
782
+ be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
783
+ skb = (struct sk_buff *)ptr_from_txbd(cur_p);
784
+ if (skb)
785
+ dev_consume_skb_irq(skb);
635786 cur_p->app1 = 0;
636787 cur_p->app2 = 0;
637788 cur_p->app3 = 0;
638789 cur_p->app4 = 0;
639790
640791 ndev->stats.tx_packets++;
641
- ndev->stats.tx_bytes += cur_p->len;
792
+ ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
793
+
794
+ /* app0 must be visible last, as it is used to flag
795
+ * availability of the bd
796
+ */
797
+ smp_mb();
798
+ cur_p->app0 = 0;
642799
643800 lp->tx_bd_ci++;
644
- if (lp->tx_bd_ci >= TX_BD_NUM)
801
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
645802 lp->tx_bd_ci = 0;
646803
647804 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
648
- stat = cur_p->app0;
805
+ stat = be32_to_cpu(cur_p->app0);
649806 }
807
+
808
+ /* Matches barrier in temac_start_xmit */
809
+ smp_mb();
650810
651811 netif_wake_queue(ndev);
652812 }
....@@ -663,8 +823,11 @@
663823 if (cur_p->app0)
664824 return NETDEV_TX_BUSY;
665825
826
+ /* Make sure to read next bd app0 after this one */
827
+ rmb();
828
+
666829 tail++;
667
- if (tail >= TX_BD_NUM)
830
+ if (tail >= lp->tx_bd_num)
668831 tail = 0;
669832
670833 cur_p = &lp->tx_bd_v[tail];
....@@ -679,20 +842,29 @@
679842 {
680843 struct temac_local *lp = netdev_priv(ndev);
681844 struct cdmac_bd *cur_p;
682
- dma_addr_t start_p, tail_p;
845
+ dma_addr_t tail_p, skb_dma_addr;
683846 int ii;
684847 unsigned long num_frag;
685848 skb_frag_t *frag;
686849
687850 num_frag = skb_shinfo(skb)->nr_frags;
688851 frag = &skb_shinfo(skb)->frags[0];
689
- start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
690852 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
691853
692
- if (temac_check_tx_bd_space(lp, num_frag)) {
693
- if (!netif_queue_stopped(ndev))
694
- netif_stop_queue(ndev);
695
- return NETDEV_TX_BUSY;
854
+ if (temac_check_tx_bd_space(lp, num_frag + 1)) {
855
+ if (netif_queue_stopped(ndev))
856
+ return NETDEV_TX_BUSY;
857
+
858
+ netif_stop_queue(ndev);
859
+
860
+ /* Matches barrier in temac_start_xmit_done */
861
+ smp_mb();
862
+
863
+ /* Space might have just been freed - check again */
864
+ if (temac_check_tx_bd_space(lp, num_frag + 1))
865
+ return NETDEV_TX_BUSY;
866
+
867
+ netif_wake_queue(ndev);
696868 }
697869
698870 cur_p->app0 = 0;
....@@ -700,40 +872,74 @@
700872 unsigned int csum_start_off = skb_checksum_start_offset(skb);
701873 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
702874
703
- cur_p->app0 |= 1; /* TX Checksum Enabled */
704
- cur_p->app1 = (csum_start_off << 16) | csum_index_off;
875
+ cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
876
+ cur_p->app1 = cpu_to_be32((csum_start_off << 16)
877
+ | csum_index_off);
705878 cur_p->app2 = 0; /* initial checksum seed */
706879 }
707880
708
- cur_p->app0 |= STS_CTRL_APP0_SOP;
709
- cur_p->len = skb_headlen(skb);
710
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
711
- skb_headlen(skb), DMA_TO_DEVICE);
712
- cur_p->app4 = (unsigned long)skb;
881
+ cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
882
+ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
883
+ skb_headlen(skb), DMA_TO_DEVICE);
884
+ cur_p->len = cpu_to_be32(skb_headlen(skb));
885
+ if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
886
+ dev_kfree_skb_any(skb);
887
+ ndev->stats.tx_dropped++;
888
+ return NETDEV_TX_OK;
889
+ }
890
+ cur_p->phys = cpu_to_be32(skb_dma_addr);
713891
714892 for (ii = 0; ii < num_frag; ii++) {
715
- lp->tx_bd_tail++;
716
- if (lp->tx_bd_tail >= TX_BD_NUM)
893
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
717894 lp->tx_bd_tail = 0;
718895
719896 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
720
- cur_p->phys = dma_map_single(ndev->dev.parent,
721
- skb_frag_address(frag),
722
- skb_frag_size(frag), DMA_TO_DEVICE);
723
- cur_p->len = skb_frag_size(frag);
897
+ skb_dma_addr = dma_map_single(ndev->dev.parent,
898
+ skb_frag_address(frag),
899
+ skb_frag_size(frag),
900
+ DMA_TO_DEVICE);
901
+ if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
902
+ if (--lp->tx_bd_tail < 0)
903
+ lp->tx_bd_tail = lp->tx_bd_num - 1;
904
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
905
+ while (--ii >= 0) {
906
+ --frag;
907
+ dma_unmap_single(ndev->dev.parent,
908
+ be32_to_cpu(cur_p->phys),
909
+ skb_frag_size(frag),
910
+ DMA_TO_DEVICE);
911
+ if (--lp->tx_bd_tail < 0)
912
+ lp->tx_bd_tail = lp->tx_bd_num - 1;
913
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
914
+ }
915
+ dma_unmap_single(ndev->dev.parent,
916
+ be32_to_cpu(cur_p->phys),
917
+ skb_headlen(skb), DMA_TO_DEVICE);
918
+ dev_kfree_skb_any(skb);
919
+ ndev->stats.tx_dropped++;
920
+ return NETDEV_TX_OK;
921
+ }
922
+ cur_p->phys = cpu_to_be32(skb_dma_addr);
923
+ cur_p->len = cpu_to_be32(skb_frag_size(frag));
724924 cur_p->app0 = 0;
725925 frag++;
726926 }
727
- cur_p->app0 |= STS_CTRL_APP0_EOP;
927
+ cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
928
+
929
+ /* Mark last fragment with skb address, so it can be consumed
930
+ * in temac_start_xmit_done()
931
+ */
932
+ ptr_to_txbd((void *)skb, cur_p);
728933
729934 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
730935 lp->tx_bd_tail++;
731
- if (lp->tx_bd_tail >= TX_BD_NUM)
936
+ if (lp->tx_bd_tail >= lp->tx_bd_num)
732937 lp->tx_bd_tail = 0;
733938
734939 skb_tx_timestamp(skb);
735940
736941 /* Kick off the transfer */
942
+ wmb();
737943 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
738944
739945 if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
....@@ -742,31 +948,56 @@
742948 return NETDEV_TX_OK;
743949 }
744950
951
+static int ll_temac_recv_buffers_available(struct temac_local *lp)
952
+{
953
+ int available;
954
+
955
+ if (!lp->rx_skb[lp->rx_bd_ci])
956
+ return 0;
957
+ available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
958
+ if (available <= 0)
959
+ available += lp->rx_bd_num;
960
+ return available;
961
+}
745962
746963 static void ll_temac_recv(struct net_device *ndev)
747964 {
748965 struct temac_local *lp = netdev_priv(ndev);
749
- struct sk_buff *skb, *new_skb;
750
- unsigned int bdstat;
751
- struct cdmac_bd *cur_p;
752
- dma_addr_t tail_p;
753
- int length;
754966 unsigned long flags;
967
+ int rx_bd;
968
+ bool update_tail = false;
755969
756970 spin_lock_irqsave(&lp->rx_lock, flags);
757971
758
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
759
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
972
+ /* Process all received buffers, passing them on network
973
+ * stack. After this, the buffer descriptors will be in an
974
+ * un-allocated stage, where no skb is allocated for it, and
975
+ * they are therefore not available for TEMAC/DMA.
976
+ */
977
+ do {
978
+ struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
979
+ struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
980
+ unsigned int bdstat = be32_to_cpu(bd->app0);
981
+ int length;
760982
761
- bdstat = cur_p->app0;
762
- while ((bdstat & STS_CTRL_APP0_CMPLT)) {
983
+ /* While this should not normally happen, we can end
984
+ * here when GFP_ATOMIC allocations fail, and we
985
+ * therefore have un-allocated buffers.
986
+ */
987
+ if (!skb)
988
+ break;
763989
764
- skb = lp->rx_skb[lp->rx_bd_ci];
765
- length = cur_p->app4 & 0x3FFF;
990
+ /* Loop over all completed buffer descriptors */
991
+ if (!(bdstat & STS_CTRL_APP0_CMPLT))
992
+ break;
766993
767
- dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
768
- DMA_FROM_DEVICE);
994
+ dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
995
+ XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
996
+ /* The buffer is not valid for DMA anymore */
997
+ bd->phys = 0;
998
+ bd->len = 0;
769999
1000
+ length = be32_to_cpu(bd->app4) & 0x3FFF;
7701001 skb_put(skb, length);
7711002 skb->protocol = eth_type_trans(skb, ndev);
7721003 skb_checksum_none_assert(skb);
....@@ -776,40 +1007,105 @@
7761007 (skb->protocol == htons(ETH_P_IP)) &&
7771008 (skb->len > 64)) {
7781009
779
- skb->csum = cur_p->app3 & 0xFFFF;
1010
+ /* Convert from device endianness (be32) to cpu
1011
+ * endiannes, and if necessary swap the bytes
1012
+ * (back) for proper IP checksum byte order
1013
+ * (be16).
1014
+ */
1015
+ skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
7801016 skb->ip_summed = CHECKSUM_COMPLETE;
7811017 }
7821018
7831019 if (!skb_defer_rx_timestamp(skb))
7841020 netif_rx(skb);
1021
+ /* The skb buffer is now owned by network stack above */
1022
+ lp->rx_skb[lp->rx_bd_ci] = NULL;
7851023
7861024 ndev->stats.rx_packets++;
7871025 ndev->stats.rx_bytes += length;
7881026
789
- new_skb = netdev_alloc_skb_ip_align(ndev,
790
- XTE_MAX_JUMBO_FRAME_SIZE);
791
- if (!new_skb) {
792
- spin_unlock_irqrestore(&lp->rx_lock, flags);
793
- return;
1027
+ rx_bd = lp->rx_bd_ci;
1028
+ if (++lp->rx_bd_ci >= lp->rx_bd_num)
1029
+ lp->rx_bd_ci = 0;
1030
+ } while (rx_bd != lp->rx_bd_tail);
1031
+
1032
+ /* DMA operations will halt when the last buffer descriptor is
1033
+ * processed (ie. the one pointed to by RX_TAILDESC_PTR).
1034
+ * When that happens, no more interrupt events will be
1035
+ * generated. No IRQ_COAL or IRQ_DLY, and not even an
1036
+ * IRQ_ERR. To avoid stalling, we schedule a delayed work
1037
+ * when there is a potential risk of that happening. The work
1038
+ * will call this function, and thus re-schedule itself until
1039
+ * enough buffers are available again.
1040
+ */
1041
+ if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
1042
+ schedule_delayed_work(&lp->restart_work, HZ / 1000);
1043
+
1044
+ /* Allocate new buffers for those buffer descriptors that were
1045
+ * passed to network stack. Note that GFP_ATOMIC allocations
1046
+ * can fail (e.g. when a larger burst of GFP_ATOMIC
1047
+ * allocations occurs), so while we try to allocate all
1048
+ * buffers in the same interrupt where they were processed, we
1049
+ * continue with what we could get in case of allocation
1050
+ * failure. Allocation of remaining buffers will be retried
1051
+ * in following calls.
1052
+ */
1053
+ while (1) {
1054
+ struct sk_buff *skb;
1055
+ struct cdmac_bd *bd;
1056
+ dma_addr_t skb_dma_addr;
1057
+
1058
+ rx_bd = lp->rx_bd_tail + 1;
1059
+ if (rx_bd >= lp->rx_bd_num)
1060
+ rx_bd = 0;
1061
+ bd = &lp->rx_bd_v[rx_bd];
1062
+
1063
+ if (bd->phys)
1064
+ break; /* All skb's allocated */
1065
+
1066
+ skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
1067
+ if (!skb) {
1068
+ dev_warn(&ndev->dev, "skb alloc failed\n");
1069
+ break;
7941070 }
7951071
796
- cur_p->app0 = STS_CTRL_APP0_IRQONEND;
797
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
798
- XTE_MAX_JUMBO_FRAME_SIZE,
799
- DMA_FROM_DEVICE);
800
- cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
801
- lp->rx_skb[lp->rx_bd_ci] = new_skb;
1072
+ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1073
+ XTE_MAX_JUMBO_FRAME_SIZE,
1074
+ DMA_FROM_DEVICE);
1075
+ if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
1076
+ skb_dma_addr))) {
1077
+ dev_kfree_skb_any(skb);
1078
+ break;
1079
+ }
8021080
803
- lp->rx_bd_ci++;
804
- if (lp->rx_bd_ci >= RX_BD_NUM)
805
- lp->rx_bd_ci = 0;
1081
+ bd->phys = cpu_to_be32(skb_dma_addr);
1082
+ bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1083
+ bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1084
+ lp->rx_skb[rx_bd] = skb;
8061085
807
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
808
- bdstat = cur_p->app0;
1086
+ lp->rx_bd_tail = rx_bd;
1087
+ update_tail = true;
8091088 }
810
- lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
1089
+
1090
+ /* Move tail pointer when buffers have been allocated */
1091
+ if (update_tail) {
1092
+ lp->dma_out(lp, RX_TAILDESC_PTR,
1093
+ lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1094
+ }
8111095
8121096 spin_unlock_irqrestore(&lp->rx_lock, flags);
1097
+}
1098
+
1099
+/* Function scheduled to ensure a restart in case of DMA halt
1100
+ * condition caused by running out of buffer descriptors.
1101
+ */
1102
+static void ll_temac_restart_work_func(struct work_struct *work)
1103
+{
1104
+ struct temac_local *lp = container_of(work, struct temac_local,
1105
+ restart_work.work);
1106
+ struct net_device *ndev = lp->ndev;
1107
+
1108
+ ll_temac_recv(ndev);
8131109 }
8141110
8151111 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
....@@ -823,8 +1119,10 @@
8231119
8241120 if (status & (IRQ_COAL | IRQ_DLY))
8251121 temac_start_xmit_done(lp->ndev);
826
- if (status & 0x080)
827
- dev_err(&ndev->dev, "DMA error 0x%x\n", status);
1122
+ if (status & (IRQ_ERR | IRQ_DMAERR))
1123
+ dev_err_ratelimited(&ndev->dev,
1124
+ "TX error 0x%x TX_CHNL_STS=0x%08x\n",
1125
+ status, lp->dma_in(lp, TX_CHNL_STS));
8281126
8291127 return IRQ_HANDLED;
8301128 }
....@@ -841,6 +1139,10 @@
8411139
8421140 if (status & (IRQ_COAL | IRQ_DLY))
8431141 ll_temac_recv(lp->ndev);
1142
+ if (status & (IRQ_ERR | IRQ_DMAERR))
1143
+ dev_err_ratelimited(&ndev->dev,
1144
+ "RX error 0x%x RX_CHNL_STS=0x%08x\n",
1145
+ status, lp->dma_in(lp, RX_CHNL_STS));
8441146
8451147 return IRQ_HANDLED;
8461148 }
....@@ -860,7 +1162,14 @@
8601162 dev_err(lp->dev, "of_phy_connect() failed\n");
8611163 return -ENODEV;
8621164 }
863
-
1165
+ phy_start(phydev);
1166
+ } else if (strlen(lp->phy_name) > 0) {
1167
+ phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1168
+ lp->phy_interface);
1169
+ if (IS_ERR(phydev)) {
1170
+ dev_err(lp->dev, "phy_connect() failed\n");
1171
+ return PTR_ERR(phydev);
1172
+ }
8641173 phy_start(phydev);
8651174 }
8661175
....@@ -891,6 +1200,8 @@
8911200
8921201 dev_dbg(&ndev->dev, "temac_close()\n");
8931202
1203
+ cancel_delayed_work_sync(&lp->restart_work);
1204
+
8941205 free_irq(lp->tx_irq, ndev);
8951206 free_irq(lp->rx_irq, ndev);
8961207
....@@ -919,24 +1230,14 @@
9191230 }
9201231 #endif
9211232
922
-static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
923
-{
924
- if (!netif_running(ndev))
925
- return -EINVAL;
926
-
927
- if (!ndev->phydev)
928
- return -EINVAL;
929
-
930
- return phy_mii_ioctl(ndev->phydev, rq, cmd);
931
-}
932
-
9331233 static const struct net_device_ops temac_netdev_ops = {
9341234 .ndo_open = temac_open,
9351235 .ndo_stop = temac_stop,
9361236 .ndo_start_xmit = temac_start_xmit,
1237
+ .ndo_set_rx_mode = temac_set_multicast_list,
9371238 .ndo_set_mac_address = temac_set_mac_address,
9381239 .ndo_validate_addr = eth_validate_addr,
939
- .ndo_do_ioctl = temac_ioctl,
1240
+ .ndo_do_ioctl = phy_do_ioctl_running,
9401241 #ifdef CONFIG_NET_POLL_CONTROLLER
9411242 .ndo_poll_controller = temac_poll_controller,
9421243 #endif
....@@ -971,32 +1272,116 @@
9711272 .attrs = temac_device_attrs,
9721273 };
9731274
974
-/* ethtool support */
1275
+/* ---------------------------------------------------------------------
1276
+ * ethtool support
1277
+ */
1278
+
1279
+static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
1280
+ struct ethtool_ringparam *ering)
1281
+{
1282
+ struct temac_local *lp = netdev_priv(ndev);
1283
+
1284
+ ering->rx_max_pending = RX_BD_NUM_MAX;
1285
+ ering->rx_mini_max_pending = 0;
1286
+ ering->rx_jumbo_max_pending = 0;
1287
+ ering->tx_max_pending = TX_BD_NUM_MAX;
1288
+ ering->rx_pending = lp->rx_bd_num;
1289
+ ering->rx_mini_pending = 0;
1290
+ ering->rx_jumbo_pending = 0;
1291
+ ering->tx_pending = lp->tx_bd_num;
1292
+}
1293
+
1294
+static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
1295
+ struct ethtool_ringparam *ering)
1296
+{
1297
+ struct temac_local *lp = netdev_priv(ndev);
1298
+
1299
+ if (ering->rx_pending > RX_BD_NUM_MAX ||
1300
+ ering->rx_mini_pending ||
1301
+ ering->rx_jumbo_pending ||
1302
+ ering->rx_pending > TX_BD_NUM_MAX)
1303
+ return -EINVAL;
1304
+
1305
+ if (netif_running(ndev))
1306
+ return -EBUSY;
1307
+
1308
+ lp->rx_bd_num = ering->rx_pending;
1309
+ lp->tx_bd_num = ering->tx_pending;
1310
+ return 0;
1311
+}
1312
+
1313
+static int ll_temac_ethtools_get_coalesce(struct net_device *ndev,
1314
+ struct ethtool_coalesce *ec)
1315
+{
1316
+ struct temac_local *lp = netdev_priv(ndev);
1317
+
1318
+ ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
1319
+ ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
1320
+ ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
1321
+ ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
1322
+ return 0;
1323
+}
1324
+
1325
+static int ll_temac_ethtools_set_coalesce(struct net_device *ndev,
1326
+ struct ethtool_coalesce *ec)
1327
+{
1328
+ struct temac_local *lp = netdev_priv(ndev);
1329
+
1330
+ if (netif_running(ndev)) {
1331
+ netdev_err(ndev,
1332
+ "Please stop netif before applying configuration\n");
1333
+ return -EFAULT;
1334
+ }
1335
+
1336
+ if (ec->rx_max_coalesced_frames)
1337
+ lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
1338
+ if (ec->tx_max_coalesced_frames)
1339
+ lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
1340
+ /* With typical LocalLink clock speed of 200 MHz and
1341
+ * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
1342
+ */
1343
+ if (ec->rx_coalesce_usecs)
1344
+ lp->coalesce_delay_rx =
1345
+ min(255U, (ec->rx_coalesce_usecs * 100) / 512);
1346
+ if (ec->tx_coalesce_usecs)
1347
+ lp->coalesce_delay_tx =
1348
+ min(255U, (ec->tx_coalesce_usecs * 100) / 512);
1349
+
1350
+ return 0;
1351
+}
1352
+
9751353 static const struct ethtool_ops temac_ethtool_ops = {
1354
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1355
+ ETHTOOL_COALESCE_MAX_FRAMES,
9761356 .nway_reset = phy_ethtool_nway_reset,
9771357 .get_link = ethtool_op_get_link,
9781358 .get_ts_info = ethtool_op_get_ts_info,
9791359 .get_link_ksettings = phy_ethtool_get_link_ksettings,
9801360 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1361
+ .get_ringparam = ll_temac_ethtools_get_ringparam,
1362
+ .set_ringparam = ll_temac_ethtools_set_ringparam,
1363
+ .get_coalesce = ll_temac_ethtools_get_coalesce,
1364
+ .set_coalesce = ll_temac_ethtools_set_coalesce,
9811365 };
9821366
983
-static int temac_of_probe(struct platform_device *op)
1367
+static int temac_probe(struct platform_device *pdev)
9841368 {
985
- struct device_node *np;
1369
+ struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1370
+ struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
9861371 struct temac_local *lp;
9871372 struct net_device *ndev;
9881373 const void *addr;
9891374 __be32 *p;
1375
+ bool little_endian;
9901376 int rc = 0;
9911377
9921378 /* Init network device structure */
993
- ndev = alloc_etherdev(sizeof(*lp));
1379
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
9941380 if (!ndev)
9951381 return -ENOMEM;
9961382
997
- platform_set_drvdata(op, ndev);
998
- SET_NETDEV_DEV(ndev, &op->dev);
999
- ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1383
+ platform_set_drvdata(pdev, ndev);
1384
+ SET_NETDEV_DEV(ndev, &pdev->dev);
10001385 ndev->features = NETIF_F_SG;
10011386 ndev->netdev_ops = &temac_netdev_ops;
10021387 ndev->ethtool_ops = &temac_ethtool_ops;
....@@ -1017,89 +1402,196 @@
10171402 /* setup temac private info structure */
10181403 lp = netdev_priv(ndev);
10191404 lp->ndev = ndev;
1020
- lp->dev = &op->dev;
1405
+ lp->dev = &pdev->dev;
10211406 lp->options = XTE_OPTION_DEFAULTS;
1407
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1408
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
10221409 spin_lock_init(&lp->rx_lock);
1023
- mutex_init(&lp->indirect_mutex);
1410
+ INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1411
+
1412
+ /* Setup mutex for synchronization of indirect register access */
1413
+ if (pdata) {
1414
+ if (!pdata->indirect_lock) {
1415
+ dev_err(&pdev->dev,
1416
+ "indirect_lock missing in platform_data\n");
1417
+ return -EINVAL;
1418
+ }
1419
+ lp->indirect_lock = pdata->indirect_lock;
1420
+ } else {
1421
+ lp->indirect_lock = devm_kmalloc(&pdev->dev,
1422
+ sizeof(*lp->indirect_lock),
1423
+ GFP_KERNEL);
1424
+ if (!lp->indirect_lock)
1425
+ return -ENOMEM;
1426
+ spin_lock_init(lp->indirect_lock);
1427
+ }
10241428
10251429 /* map device registers */
1026
- lp->regs = of_iomap(op->dev.of_node, 0);
1027
- if (!lp->regs) {
1028
- dev_err(&op->dev, "could not map temac regs.\n");
1029
- rc = -ENOMEM;
1030
- goto nodev;
1430
+ lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
1431
+ if (IS_ERR(lp->regs)) {
1432
+ dev_err(&pdev->dev, "could not map TEMAC registers\n");
1433
+ return -ENOMEM;
1434
+ }
1435
+
1436
+ /* Select register access functions with the specified
1437
+ * endianness mode. Default for OF devices is big-endian.
1438
+ */
1439
+ little_endian = false;
1440
+ if (temac_np) {
1441
+ if (of_get_property(temac_np, "little-endian", NULL))
1442
+ little_endian = true;
1443
+ } else if (pdata) {
1444
+ little_endian = pdata->reg_little_endian;
1445
+ }
1446
+ if (little_endian) {
1447
+ lp->temac_ior = _temac_ior_le;
1448
+ lp->temac_iow = _temac_iow_le;
1449
+ } else {
1450
+ lp->temac_ior = _temac_ior_be;
1451
+ lp->temac_iow = _temac_iow_be;
10311452 }
10321453
10331454 /* Setup checksum offload, but default to off if not specified */
10341455 lp->temac_features = 0;
1035
- p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
1036
- if (p && be32_to_cpu(*p)) {
1037
- lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1456
+ if (temac_np) {
1457
+ p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1458
+ if (p && be32_to_cpu(*p))
1459
+ lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1460
+ p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1461
+ if (p && be32_to_cpu(*p))
1462
+ lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1463
+ } else if (pdata) {
1464
+ if (pdata->txcsum)
1465
+ lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1466
+ if (pdata->rxcsum)
1467
+ lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1468
+ }
1469
+ if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
10381470 /* Can checksum TCP/UDP over IPv4. */
10391471 ndev->features |= NETIF_F_IP_CSUM;
1040
- }
1041
- p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
1042
- if (p && be32_to_cpu(*p))
1043
- lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
10441472
1045
- /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1046
- np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
1047
- if (!np) {
1048
- dev_err(&op->dev, "could not find DMA node\n");
1049
- rc = -ENODEV;
1050
- goto err_iounmap;
1051
- }
1473
+ /* Defaults for IRQ delay/coalescing setup. These are
1474
+ * configuration values, so does not belong in device-tree.
1475
+ */
1476
+ lp->coalesce_delay_tx = 0x10;
1477
+ lp->coalesce_count_tx = 0x22;
1478
+ lp->coalesce_delay_rx = 0xff;
1479
+ lp->coalesce_count_rx = 0x07;
10521480
1053
- /* Setup the DMA register accesses, could be DCR or memory mapped */
1054
- if (temac_dcr_setup(lp, op, np)) {
1481
+ /* Setup LocalLink DMA */
1482
+ if (temac_np) {
1483
+ /* Find the DMA node, map the DMA registers, and
1484
+ * decode the DMA IRQs.
1485
+ */
1486
+ dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1487
+ if (!dma_np) {
1488
+ dev_err(&pdev->dev, "could not find DMA node\n");
1489
+ return -ENODEV;
1490
+ }
10551491
1056
- /* no DCR in the device tree, try non-DCR */
1057
- lp->sdma_regs = of_iomap(np, 0);
1058
- if (lp->sdma_regs) {
1059
- lp->dma_in = temac_dma_in32;
1060
- lp->dma_out = temac_dma_out32;
1061
- dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
1492
+ /* Setup the DMA register accesses, could be DCR or
1493
+ * memory mapped.
1494
+ */
1495
+ if (temac_dcr_setup(lp, pdev, dma_np)) {
1496
+ /* no DCR in the device tree, try non-DCR */
1497
+ lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1498
+ NULL);
1499
+ if (IS_ERR(lp->sdma_regs)) {
1500
+ dev_err(&pdev->dev,
1501
+ "unable to map DMA registers\n");
1502
+ of_node_put(dma_np);
1503
+ return PTR_ERR(lp->sdma_regs);
1504
+ }
1505
+ if (of_get_property(dma_np, "little-endian", NULL)) {
1506
+ lp->dma_in = temac_dma_in32_le;
1507
+ lp->dma_out = temac_dma_out32_le;
1508
+ } else {
1509
+ lp->dma_in = temac_dma_in32_be;
1510
+ lp->dma_out = temac_dma_out32_be;
1511
+ }
1512
+ dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1513
+ }
1514
+
1515
+ /* Get DMA RX and TX interrupts */
1516
+ lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1517
+ lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1518
+
1519
+ /* Finished with the DMA node; drop the reference */
1520
+ of_node_put(dma_np);
1521
+ } else if (pdata) {
1522
+ /* 2nd memory resource specifies DMA registers */
1523
+ lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
1524
+ if (IS_ERR(lp->sdma_regs)) {
1525
+ dev_err(&pdev->dev,
1526
+ "could not map DMA registers\n");
1527
+ return PTR_ERR(lp->sdma_regs);
1528
+ }
1529
+ if (pdata->dma_little_endian) {
1530
+ lp->dma_in = temac_dma_in32_le;
1531
+ lp->dma_out = temac_dma_out32_le;
10621532 } else {
1063
- dev_err(&op->dev, "unable to map DMA registers\n");
1064
- of_node_put(np);
1065
- goto err_iounmap;
1533
+ lp->dma_in = temac_dma_in32_be;
1534
+ lp->dma_out = temac_dma_out32_be;
1535
+ }
1536
+
1537
+ /* Get DMA RX and TX interrupts */
1538
+ lp->rx_irq = platform_get_irq(pdev, 0);
1539
+ lp->tx_irq = platform_get_irq(pdev, 1);
1540
+
1541
+ /* IRQ delay/coalescing setup */
1542
+ if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
1543
+ lp->coalesce_delay_tx = pdata->tx_irq_timeout;
1544
+ lp->coalesce_count_tx = pdata->tx_irq_count;
1545
+ }
1546
+ if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
1547
+ lp->coalesce_delay_rx = pdata->rx_irq_timeout;
1548
+ lp->coalesce_count_rx = pdata->rx_irq_count;
10661549 }
10671550 }
10681551
1069
- lp->rx_irq = irq_of_parse_and_map(np, 0);
1070
- lp->tx_irq = irq_of_parse_and_map(np, 1);
1071
-
1072
- of_node_put(np); /* Finished with the DMA node; drop the reference */
1073
-
1074
- if (!lp->rx_irq || !lp->tx_irq) {
1075
- dev_err(&op->dev, "could not determine irqs\n");
1076
- rc = -ENOMEM;
1077
- goto err_iounmap_2;
1552
+ /* Error handle returned DMA RX and TX interrupts */
1553
+ if (lp->rx_irq < 0) {
1554
+ if (lp->rx_irq != -EPROBE_DEFER)
1555
+ dev_err(&pdev->dev, "could not get DMA RX irq\n");
1556
+ return lp->rx_irq;
1557
+ }
1558
+ if (lp->tx_irq < 0) {
1559
+ if (lp->tx_irq != -EPROBE_DEFER)
1560
+ dev_err(&pdev->dev, "could not get DMA TX irq\n");
1561
+ return lp->tx_irq;
10781562 }
10791563
1080
-
1081
- /* Retrieve the MAC address */
1082
- addr = of_get_mac_address(op->dev.of_node);
1083
- if (!addr) {
1084
- dev_err(&op->dev, "could not find MAC address\n");
1085
- rc = -ENODEV;
1086
- goto err_iounmap_2;
1564
+ if (temac_np) {
1565
+ /* Retrieve the MAC address */
1566
+ addr = of_get_mac_address(temac_np);
1567
+ if (IS_ERR(addr)) {
1568
+ dev_err(&pdev->dev, "could not find MAC address\n");
1569
+ return -ENODEV;
1570
+ }
1571
+ temac_init_mac_address(ndev, addr);
1572
+ } else if (pdata) {
1573
+ temac_init_mac_address(ndev, pdata->mac_addr);
10871574 }
1088
- temac_init_mac_address(ndev, addr);
10891575
1090
- rc = temac_mdio_setup(lp, op->dev.of_node);
1576
+ rc = temac_mdio_setup(lp, pdev);
10911577 if (rc)
1092
- dev_warn(&op->dev, "error registering MDIO bus\n");
1578
+ dev_warn(&pdev->dev, "error registering MDIO bus\n");
10931579
1094
- lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
1095
- if (lp->phy_node)
1096
- dev_dbg(lp->dev, "using PHY node %pOF (%p)\n", np, np);
1580
+ if (temac_np) {
1581
+ lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1582
+ if (lp->phy_node)
1583
+ dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1584
+ } else if (pdata) {
1585
+ snprintf(lp->phy_name, sizeof(lp->phy_name),
1586
+ PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1587
+ lp->phy_interface = pdata->phy_interface;
1588
+ }
10971589
10981590 /* Add the device attributes */
10991591 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
11001592 if (rc) {
11011593 dev_err(lp->dev, "Error creating sysfs files\n");
1102
- goto err_iounmap_2;
1594
+ goto err_sysfs_create;
11031595 }
11041596
11051597 rc = register_netdev(lp->ndev);
....@@ -1110,33 +1602,25 @@
11101602
11111603 return 0;
11121604
1113
- err_register_ndev:
1605
+err_register_ndev:
11141606 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1115
- err_iounmap_2:
1116
- if (lp->sdma_regs)
1117
- iounmap(lp->sdma_regs);
1118
- err_iounmap:
1119
- iounmap(lp->regs);
1120
- nodev:
1121
- free_netdev(ndev);
1122
- ndev = NULL;
1607
+err_sysfs_create:
1608
+ if (lp->phy_node)
1609
+ of_node_put(lp->phy_node);
1610
+ temac_mdio_teardown(lp);
11231611 return rc;
11241612 }
11251613
1126
-static int temac_of_remove(struct platform_device *op)
1614
+static int temac_remove(struct platform_device *pdev)
11271615 {
1128
- struct net_device *ndev = platform_get_drvdata(op);
1616
+ struct net_device *ndev = platform_get_drvdata(pdev);
11291617 struct temac_local *lp = netdev_priv(ndev);
11301618
1131
- temac_mdio_teardown(lp);
11321619 unregister_netdev(ndev);
11331620 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1134
- of_node_put(lp->phy_node);
1135
- lp->phy_node = NULL;
1136
- iounmap(lp->regs);
1137
- if (lp->sdma_regs)
1138
- iounmap(lp->sdma_regs);
1139
- free_netdev(ndev);
1621
+ if (lp->phy_node)
1622
+ of_node_put(lp->phy_node);
1623
+ temac_mdio_teardown(lp);
11401624 return 0;
11411625 }
11421626
....@@ -1149,16 +1633,16 @@
11491633 };
11501634 MODULE_DEVICE_TABLE(of, temac_of_match);
11511635
1152
-static struct platform_driver temac_of_driver = {
1153
- .probe = temac_of_probe,
1154
- .remove = temac_of_remove,
1636
+static struct platform_driver temac_driver = {
1637
+ .probe = temac_probe,
1638
+ .remove = temac_remove,
11551639 .driver = {
11561640 .name = "xilinx_temac",
11571641 .of_match_table = temac_of_match,
11581642 },
11591643 };
11601644
1161
-module_platform_driver(temac_of_driver);
1645
+module_platform_driver(temac_driver);
11621646
11631647 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
11641648 MODULE_AUTHOR("Yoshio Kashiwagi");