.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Driver for Xilinx TEMAC Ethernet device |
---|
3 | 4 | * |
---|
.. | .. |
---|
21 | 22 | * |
---|
22 | 23 | * TODO: |
---|
23 | 24 | * - Factor out locallink DMA code into separate driver |
---|
24 | | - * - Fix multicast assignment. |
---|
25 | 25 | * - Fix support for hardware checksumming. |
---|
26 | 26 | * - Testing. Lots and lots of testing. |
---|
27 | 27 | * |
---|
.. | .. |
---|
33 | 33 | #include <linux/module.h> |
---|
34 | 34 | #include <linux/mutex.h> |
---|
35 | 35 | #include <linux/netdevice.h> |
---|
| 36 | +#include <linux/if_ether.h> |
---|
36 | 37 | #include <linux/of.h> |
---|
37 | 38 | #include <linux/of_device.h> |
---|
38 | 39 | #include <linux/of_irq.h> |
---|
.. | .. |
---|
50 | 51 | #include <linux/ip.h> |
---|
51 | 52 | #include <linux/slab.h> |
---|
52 | 53 | #include <linux/interrupt.h> |
---|
| 54 | +#include <linux/workqueue.h> |
---|
53 | 55 | #include <linux/dma-mapping.h> |
---|
| 56 | +#include <linux/processor.h> |
---|
| 57 | +#include <linux/platform_data/xilinx-ll-temac.h> |
---|
54 | 58 | |
---|
55 | 59 | #include "ll_temac.h" |
---|
56 | 60 | |
---|
57 | | -#define TX_BD_NUM 64 |
---|
58 | | -#define RX_BD_NUM 128 |
---|
| 61 | +/* Descriptors defines for Tx and Rx DMA */ |
---|
| 62 | +#define TX_BD_NUM_DEFAULT 64 |
---|
| 63 | +#define RX_BD_NUM_DEFAULT 1024 |
---|
| 64 | +#define TX_BD_NUM_MAX 4096 |
---|
| 65 | +#define RX_BD_NUM_MAX 4096 |
---|
59 | 66 | |
---|
60 | 67 | /* --------------------------------------------------------------------- |
---|
61 | 68 | * Low level register access functions |
---|
62 | 69 | */ |
---|
63 | 70 | |
---|
64 | | -u32 temac_ior(struct temac_local *lp, int offset) |
---|
| 71 | +static u32 _temac_ior_be(struct temac_local *lp, int offset) |
---|
65 | 72 | { |
---|
66 | | - return in_be32(lp->regs + offset); |
---|
| 73 | + return ioread32be(lp->regs + offset); |
---|
67 | 74 | } |
---|
68 | 75 | |
---|
69 | | -void temac_iow(struct temac_local *lp, int offset, u32 value) |
---|
| 76 | +static void _temac_iow_be(struct temac_local *lp, int offset, u32 value) |
---|
70 | 77 | { |
---|
71 | | - out_be32(lp->regs + offset, value); |
---|
| 78 | + return iowrite32be(value, lp->regs + offset); |
---|
72 | 79 | } |
---|
73 | 80 | |
---|
| 81 | +static u32 _temac_ior_le(struct temac_local *lp, int offset) |
---|
| 82 | +{ |
---|
| 83 | + return ioread32(lp->regs + offset); |
---|
| 84 | +} |
---|
| 85 | + |
---|
| 86 | +static void _temac_iow_le(struct temac_local *lp, int offset, u32 value) |
---|
| 87 | +{ |
---|
| 88 | + return iowrite32(value, lp->regs + offset); |
---|
| 89 | +} |
---|
| 90 | + |
---|
| 91 | +static bool hard_acs_rdy(struct temac_local *lp) |
---|
| 92 | +{ |
---|
| 93 | + return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK; |
---|
| 94 | +} |
---|
| 95 | + |
---|
| 96 | +static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout) |
---|
| 97 | +{ |
---|
| 98 | + ktime_t cur = ktime_get(); |
---|
| 99 | + |
---|
| 100 | + return hard_acs_rdy(lp) || ktime_after(cur, timeout); |
---|
| 101 | +} |
---|
| 102 | + |
---|
| 103 | +/* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz |
---|
| 104 | + * that was used before, and should cover MDIO bus speed down to 3200 |
---|
| 105 | + * Hz. |
---|
| 106 | + */ |
---|
| 107 | +#define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC) |
---|
| 108 | + |
---|
| 109 | +/* |
---|
| 110 | + * temac_indirect_busywait - Wait for current indirect register access |
---|
| 111 | + * to complete. |
---|
| 112 | + */ |
---|
74 | 113 | int temac_indirect_busywait(struct temac_local *lp) |
---|
75 | 114 | { |
---|
76 | | - unsigned long end = jiffies + 2; |
---|
| 115 | + ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS); |
---|
77 | 116 | |
---|
78 | | - while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) { |
---|
79 | | - if (time_before_eq(end, jiffies)) { |
---|
80 | | - WARN_ON(1); |
---|
81 | | - return -ETIMEDOUT; |
---|
82 | | - } |
---|
83 | | - msleep(1); |
---|
84 | | - } |
---|
85 | | - return 0; |
---|
| 117 | + spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout)); |
---|
| 118 | + if (WARN_ON(!hard_acs_rdy(lp))) |
---|
| 119 | + return -ETIMEDOUT; |
---|
| 120 | + else |
---|
| 121 | + return 0; |
---|
86 | 122 | } |
---|
87 | 123 | |
---|
88 | | -/** |
---|
89 | | - * temac_indirect_in32 |
---|
90 | | - * |
---|
91 | | - * lp->indirect_mutex must be held when calling this function |
---|
| 124 | +/* |
---|
| 125 | + * temac_indirect_in32 - Indirect register read access. This function |
---|
| 126 | + * must be called without lp->indirect_lock being held. |
---|
92 | 127 | */ |
---|
93 | 128 | u32 temac_indirect_in32(struct temac_local *lp, int reg) |
---|
94 | 129 | { |
---|
95 | | - u32 val; |
---|
| 130 | + unsigned long flags; |
---|
| 131 | + int val; |
---|
96 | 132 | |
---|
97 | | - if (temac_indirect_busywait(lp)) |
---|
98 | | - return -ETIMEDOUT; |
---|
99 | | - temac_iow(lp, XTE_CTL0_OFFSET, reg); |
---|
100 | | - if (temac_indirect_busywait(lp)) |
---|
101 | | - return -ETIMEDOUT; |
---|
102 | | - val = temac_ior(lp, XTE_LSW0_OFFSET); |
---|
103 | | - |
---|
| 133 | + spin_lock_irqsave(lp->indirect_lock, flags); |
---|
| 134 | + val = temac_indirect_in32_locked(lp, reg); |
---|
| 135 | + spin_unlock_irqrestore(lp->indirect_lock, flags); |
---|
104 | 136 | return val; |
---|
105 | 137 | } |
---|
106 | 138 | |
---|
107 | | -/** |
---|
108 | | - * temac_indirect_out32 |
---|
109 | | - * |
---|
110 | | - * lp->indirect_mutex must be held when calling this function |
---|
| 139 | +/* |
---|
| 140 | + * temac_indirect_in32_locked - Indirect register read access. This |
---|
| 141 | + * function must be called with lp->indirect_lock being held. Use |
---|
| 142 | + * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid |
---|
| 143 | + * repeated lock/unlock and to ensure uninterrupted access to indirect |
---|
| 144 | + * registers. |
---|
| 145 | + */ |
---|
| 146 | +u32 temac_indirect_in32_locked(struct temac_local *lp, int reg) |
---|
| 147 | +{ |
---|
| 148 | + /* This initial wait should normally not spin, as we always |
---|
| 149 | + * try to wait for indirect access to complete before |
---|
| 150 | + * releasing the indirect_lock. |
---|
| 151 | + */ |
---|
| 152 | + if (WARN_ON(temac_indirect_busywait(lp))) |
---|
| 153 | + return -ETIMEDOUT; |
---|
| 154 | + /* Initiate read from indirect register */ |
---|
| 155 | + temac_iow(lp, XTE_CTL0_OFFSET, reg); |
---|
| 156 | + /* Wait for indirect register access to complete. We really |
---|
| 157 | + * should not see timeouts, and could even end up causing |
---|
| 158 | + * problem for following indirect access, so let's make a bit |
---|
| 159 | + * of WARN noise. |
---|
| 160 | + */ |
---|
| 161 | + if (WARN_ON(temac_indirect_busywait(lp))) |
---|
| 162 | + return -ETIMEDOUT; |
---|
| 163 | + /* Value is ready now */ |
---|
| 164 | + return temac_ior(lp, XTE_LSW0_OFFSET); |
---|
| 165 | +} |
---|
| 166 | + |
---|
| 167 | +/* |
---|
| 168 | + * temac_indirect_out32 - Indirect register write access. This function |
---|
| 169 | + * must be called without lp->indirect_lock being held. |
---|
111 | 170 | */ |
---|
112 | 171 | void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) |
---|
113 | 172 | { |
---|
114 | | - if (temac_indirect_busywait(lp)) |
---|
| 173 | + unsigned long flags; |
---|
| 174 | + |
---|
| 175 | + spin_lock_irqsave(lp->indirect_lock, flags); |
---|
| 176 | + temac_indirect_out32_locked(lp, reg, value); |
---|
| 177 | + spin_unlock_irqrestore(lp->indirect_lock, flags); |
---|
| 178 | +} |
---|
| 179 | + |
---|
| 180 | +/* |
---|
| 181 | + * temac_indirect_out32_locked - Indirect register write access. This |
---|
| 182 | + * function must be called with lp->indirect_lock being held. Use |
---|
| 183 | + * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid |
---|
| 184 | + * repeated lock/unlock and to ensure uninterrupted access to indirect |
---|
| 185 | + * registers. |
---|
| 186 | + */ |
---|
| 187 | +void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value) |
---|
| 188 | +{ |
---|
| 189 | + /* As in temac_indirect_in32_locked(), we should normally not |
---|
| 190 | + * spin here. And if it happens, we actually end up silently |
---|
| 191 | + * ignoring the write request. Ouch. |
---|
| 192 | + */ |
---|
| 193 | + if (WARN_ON(temac_indirect_busywait(lp))) |
---|
115 | 194 | return; |
---|
| 195 | + /* Initiate write to indirect register */ |
---|
116 | 196 | temac_iow(lp, XTE_LSW0_OFFSET, value); |
---|
117 | 197 | temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); |
---|
118 | | - temac_indirect_busywait(lp); |
---|
| 198 | + /* As in temac_indirect_in32_locked(), we should not see timeouts |
---|
| 199 | + * here. And if it happens, we continue before the write has |
---|
| 200 | + * completed. Not good. |
---|
| 201 | + */ |
---|
| 202 | + WARN_ON(temac_indirect_busywait(lp)); |
---|
119 | 203 | } |
---|
120 | 204 | |
---|
121 | | -/** |
---|
122 | | - * temac_dma_in32 - Memory mapped DMA read, this function expects a |
---|
123 | | - * register input that is based on DCR word addresses which |
---|
124 | | - * are then converted to memory mapped byte addresses |
---|
| 205 | +/* |
---|
| 206 | + * temac_dma_in32_* - Memory mapped DMA read, these function expects a |
---|
| 207 | + * register input that is based on DCR word addresses which are then |
---|
| 208 | + * converted to memory mapped byte addresses. To be assigned to |
---|
| 209 | + * lp->dma_in32. |
---|
125 | 210 | */ |
---|
126 | | -static u32 temac_dma_in32(struct temac_local *lp, int reg) |
---|
| 211 | +static u32 temac_dma_in32_be(struct temac_local *lp, int reg) |
---|
127 | 212 | { |
---|
128 | | - return in_be32(lp->sdma_regs + (reg << 2)); |
---|
| 213 | + return ioread32be(lp->sdma_regs + (reg << 2)); |
---|
129 | 214 | } |
---|
130 | 215 | |
---|
131 | | -/** |
---|
132 | | - * temac_dma_out32 - Memory mapped DMA read, this function expects a |
---|
133 | | - * register input that is based on DCR word addresses which |
---|
134 | | - * are then converted to memory mapped byte addresses |
---|
135 | | - */ |
---|
136 | | -static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) |
---|
| 216 | +static u32 temac_dma_in32_le(struct temac_local *lp, int reg) |
---|
137 | 217 | { |
---|
138 | | - out_be32(lp->sdma_regs + (reg << 2), value); |
---|
| 218 | + return ioread32(lp->sdma_regs + (reg << 2)); |
---|
| 219 | +} |
---|
| 220 | + |
---|
| 221 | +/* |
---|
| 222 | + * temac_dma_out32_* - Memory mapped DMA read, these function expects |
---|
| 223 | + * a register input that is based on DCR word addresses which are then |
---|
| 224 | + * converted to memory mapped byte addresses. To be assigned to |
---|
| 225 | + * lp->dma_out32. |
---|
| 226 | + */ |
---|
| 227 | +static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value) |
---|
| 228 | +{ |
---|
| 229 | + iowrite32be(value, lp->sdma_regs + (reg << 2)); |
---|
| 230 | +} |
---|
| 231 | + |
---|
| 232 | +static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value) |
---|
| 233 | +{ |
---|
| 234 | + iowrite32(value, lp->sdma_regs + (reg << 2)); |
---|
139 | 235 | } |
---|
140 | 236 | |
---|
141 | 237 | /* DMA register access functions can be DCR based or memory mapped. |
---|
.. | .. |
---|
144 | 240 | */ |
---|
145 | 241 | #ifdef CONFIG_PPC_DCR |
---|
146 | 242 | |
---|
147 | | -/** |
---|
| 243 | +/* |
---|
148 | 244 | * temac_dma_dcr_in32 - DCR based DMA read |
---|
149 | 245 | */ |
---|
150 | 246 | static u32 temac_dma_dcr_in(struct temac_local *lp, int reg) |
---|
.. | .. |
---|
152 | 248 | return dcr_read(lp->sdma_dcrs, reg); |
---|
153 | 249 | } |
---|
154 | 250 | |
---|
155 | | -/** |
---|
| 251 | +/* |
---|
156 | 252 | * temac_dma_dcr_out32 - DCR based DMA write |
---|
157 | 253 | */ |
---|
158 | 254 | static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value) |
---|
.. | .. |
---|
160 | 256 | dcr_write(lp->sdma_dcrs, reg, value); |
---|
161 | 257 | } |
---|
162 | 258 | |
---|
163 | | -/** |
---|
| 259 | +/* |
---|
164 | 260 | * temac_dcr_setup - If the DMA is DCR based, then setup the address and |
---|
165 | 261 | * I/O functions |
---|
166 | 262 | */ |
---|
.. | .. |
---|
187 | 283 | |
---|
188 | 284 | /* |
---|
189 | 285 | * temac_dcr_setup - This is a stub for when DCR is not supported, |
---|
190 | | - * such as with MicroBlaze |
---|
| 286 | + * such as with MicroBlaze and x86 |
---|
191 | 287 | */ |
---|
192 | 288 | static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, |
---|
193 | 289 | struct device_node *np) |
---|
.. | .. |
---|
197 | 293 | |
---|
198 | 294 | #endif |
---|
199 | 295 | |
---|
200 | | -/** |
---|
| 296 | +/* |
---|
201 | 297 | * temac_dma_bd_release - Release buffer descriptor rings |
---|
202 | 298 | */ |
---|
203 | 299 | static void temac_dma_bd_release(struct net_device *ndev) |
---|
.. | .. |
---|
208 | 304 | /* Reset Local Link (DMA) */ |
---|
209 | 305 | lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); |
---|
210 | 306 | |
---|
211 | | - for (i = 0; i < RX_BD_NUM; i++) { |
---|
| 307 | + for (i = 0; i < lp->rx_bd_num; i++) { |
---|
212 | 308 | if (!lp->rx_skb[i]) |
---|
213 | 309 | break; |
---|
214 | 310 | else { |
---|
.. | .. |
---|
219 | 315 | } |
---|
220 | 316 | if (lp->rx_bd_v) |
---|
221 | 317 | dma_free_coherent(ndev->dev.parent, |
---|
222 | | - sizeof(*lp->rx_bd_v) * RX_BD_NUM, |
---|
223 | | - lp->rx_bd_v, lp->rx_bd_p); |
---|
| 318 | + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, |
---|
| 319 | + lp->rx_bd_v, lp->rx_bd_p); |
---|
224 | 320 | if (lp->tx_bd_v) |
---|
225 | 321 | dma_free_coherent(ndev->dev.parent, |
---|
226 | | - sizeof(*lp->tx_bd_v) * TX_BD_NUM, |
---|
227 | | - lp->tx_bd_v, lp->tx_bd_p); |
---|
228 | | - kfree(lp->rx_skb); |
---|
| 322 | + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, |
---|
| 323 | + lp->tx_bd_v, lp->tx_bd_p); |
---|
229 | 324 | } |
---|
230 | 325 | |
---|
231 | | -/** |
---|
| 326 | +/* |
---|
232 | 327 | * temac_dma_bd_init - Setup buffer descriptor rings |
---|
233 | 328 | */ |
---|
234 | 329 | static int temac_dma_bd_init(struct net_device *ndev) |
---|
235 | 330 | { |
---|
236 | 331 | struct temac_local *lp = netdev_priv(ndev); |
---|
237 | 332 | struct sk_buff *skb; |
---|
| 333 | + dma_addr_t skb_dma_addr; |
---|
238 | 334 | int i; |
---|
239 | 335 | |
---|
240 | | - lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL); |
---|
| 336 | + lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num, |
---|
| 337 | + sizeof(*lp->rx_skb), GFP_KERNEL); |
---|
241 | 338 | if (!lp->rx_skb) |
---|
242 | 339 | goto out; |
---|
243 | 340 | |
---|
244 | 341 | /* allocate the tx and rx ring buffer descriptors. */ |
---|
245 | 342 | /* returns a virtual address and a physical address. */ |
---|
246 | | - lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, |
---|
247 | | - sizeof(*lp->tx_bd_v) * TX_BD_NUM, |
---|
248 | | - &lp->tx_bd_p, GFP_KERNEL); |
---|
| 343 | + lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
---|
| 344 | + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, |
---|
| 345 | + &lp->tx_bd_p, GFP_KERNEL); |
---|
249 | 346 | if (!lp->tx_bd_v) |
---|
250 | 347 | goto out; |
---|
251 | 348 | |
---|
252 | | - lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, |
---|
253 | | - sizeof(*lp->rx_bd_v) * RX_BD_NUM, |
---|
254 | | - &lp->rx_bd_p, GFP_KERNEL); |
---|
| 349 | + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
---|
| 350 | + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, |
---|
| 351 | + &lp->rx_bd_p, GFP_KERNEL); |
---|
255 | 352 | if (!lp->rx_bd_v) |
---|
256 | 353 | goto out; |
---|
257 | 354 | |
---|
258 | | - for (i = 0; i < TX_BD_NUM; i++) { |
---|
259 | | - lp->tx_bd_v[i].next = lp->tx_bd_p + |
---|
260 | | - sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); |
---|
| 355 | + for (i = 0; i < lp->tx_bd_num; i++) { |
---|
| 356 | + lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p |
---|
| 357 | + + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num)); |
---|
261 | 358 | } |
---|
262 | 359 | |
---|
263 | | - for (i = 0; i < RX_BD_NUM; i++) { |
---|
264 | | - lp->rx_bd_v[i].next = lp->rx_bd_p + |
---|
265 | | - sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); |
---|
| 360 | + for (i = 0; i < lp->rx_bd_num; i++) { |
---|
| 361 | + lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p |
---|
| 362 | + + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num)); |
---|
266 | 363 | |
---|
267 | 364 | skb = netdev_alloc_skb_ip_align(ndev, |
---|
268 | 365 | XTE_MAX_JUMBO_FRAME_SIZE); |
---|
.. | .. |
---|
271 | 368 | |
---|
272 | 369 | lp->rx_skb[i] = skb; |
---|
273 | 370 | /* returns physical address of skb->data */ |
---|
274 | | - lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, |
---|
275 | | - skb->data, |
---|
276 | | - XTE_MAX_JUMBO_FRAME_SIZE, |
---|
277 | | - DMA_FROM_DEVICE); |
---|
278 | | - lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE; |
---|
279 | | - lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND; |
---|
| 371 | + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, |
---|
| 372 | + XTE_MAX_JUMBO_FRAME_SIZE, |
---|
| 373 | + DMA_FROM_DEVICE); |
---|
| 374 | + if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) |
---|
| 375 | + goto out; |
---|
| 376 | + lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr); |
---|
| 377 | + lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); |
---|
| 378 | + lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); |
---|
280 | 379 | } |
---|
281 | 380 | |
---|
282 | | - lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 | |
---|
283 | | - CHNL_CTRL_IRQ_EN | |
---|
284 | | - CHNL_CTRL_IRQ_DLY_EN | |
---|
285 | | - CHNL_CTRL_IRQ_COAL_EN); |
---|
286 | | - /* 0x10220483 */ |
---|
287 | | - /* 0x00100483 */ |
---|
288 | | - lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 | |
---|
289 | | - CHNL_CTRL_IRQ_EN | |
---|
290 | | - CHNL_CTRL_IRQ_DLY_EN | |
---|
291 | | - CHNL_CTRL_IRQ_COAL_EN | |
---|
292 | | - CHNL_CTRL_IRQ_IOE); |
---|
293 | | - /* 0xff010283 */ |
---|
294 | | - |
---|
295 | | - lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); |
---|
296 | | - lp->dma_out(lp, RX_TAILDESC_PTR, |
---|
297 | | - lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); |
---|
298 | | - lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); |
---|
| 381 | + /* Configure DMA channel (irq setup) */ |
---|
| 382 | + lp->dma_out(lp, TX_CHNL_CTRL, |
---|
| 383 | + lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 | |
---|
| 384 | + 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used! |
---|
| 385 | + CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | |
---|
| 386 | + CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); |
---|
| 387 | + lp->dma_out(lp, RX_CHNL_CTRL, |
---|
| 388 | + lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 | |
---|
| 389 | + CHNL_CTRL_IRQ_IOE | |
---|
| 390 | + CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | |
---|
| 391 | + CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); |
---|
299 | 392 | |
---|
300 | 393 | /* Init descriptor indexes */ |
---|
301 | 394 | lp->tx_bd_ci = 0; |
---|
302 | | - lp->tx_bd_next = 0; |
---|
303 | 395 | lp->tx_bd_tail = 0; |
---|
304 | 396 | lp->rx_bd_ci = 0; |
---|
| 397 | + lp->rx_bd_tail = lp->rx_bd_num - 1; |
---|
| 398 | + |
---|
| 399 | + /* Enable RX DMA transfers */ |
---|
| 400 | + wmb(); |
---|
| 401 | + lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); |
---|
| 402 | + lp->dma_out(lp, RX_TAILDESC_PTR, |
---|
| 403 | + lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail)); |
---|
| 404 | + |
---|
| 405 | + /* Prepare for TX DMA transfer */ |
---|
| 406 | + lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); |
---|
305 | 407 | |
---|
306 | 408 | return 0; |
---|
307 | 409 | |
---|
.. | .. |
---|
317 | 419 | static void temac_do_set_mac_address(struct net_device *ndev) |
---|
318 | 420 | { |
---|
319 | 421 | struct temac_local *lp = netdev_priv(ndev); |
---|
| 422 | + unsigned long flags; |
---|
320 | 423 | |
---|
321 | 424 | /* set up unicast MAC address filter set its mac address */ |
---|
322 | | - mutex_lock(&lp->indirect_mutex); |
---|
323 | | - temac_indirect_out32(lp, XTE_UAW0_OFFSET, |
---|
324 | | - (ndev->dev_addr[0]) | |
---|
325 | | - (ndev->dev_addr[1] << 8) | |
---|
326 | | - (ndev->dev_addr[2] << 16) | |
---|
327 | | - (ndev->dev_addr[3] << 24)); |
---|
| 425 | + spin_lock_irqsave(lp->indirect_lock, flags); |
---|
| 426 | + temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET, |
---|
| 427 | + (ndev->dev_addr[0]) | |
---|
| 428 | + (ndev->dev_addr[1] << 8) | |
---|
| 429 | + (ndev->dev_addr[2] << 16) | |
---|
| 430 | + (ndev->dev_addr[3] << 24)); |
---|
328 | 431 | /* There are reserved bits in EUAW1 |
---|
329 | 432 | * so don't affect them Set MAC bits [47:32] in EUAW1 */ |
---|
330 | | - temac_indirect_out32(lp, XTE_UAW1_OFFSET, |
---|
331 | | - (ndev->dev_addr[4] & 0x000000ff) | |
---|
332 | | - (ndev->dev_addr[5] << 8)); |
---|
333 | | - mutex_unlock(&lp->indirect_mutex); |
---|
| 433 | + temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET, |
---|
| 434 | + (ndev->dev_addr[4] & 0x000000ff) | |
---|
| 435 | + (ndev->dev_addr[5] << 8)); |
---|
| 436 | + spin_unlock_irqrestore(lp->indirect_lock, flags); |
---|
334 | 437 | } |
---|
335 | 438 | |
---|
336 | 439 | static int temac_init_mac_address(struct net_device *ndev, const void *address) |
---|
337 | 440 | { |
---|
338 | | - memcpy(ndev->dev_addr, address, ETH_ALEN); |
---|
| 441 | + ether_addr_copy(ndev->dev_addr, address); |
---|
339 | 442 | if (!is_valid_ether_addr(ndev->dev_addr)) |
---|
340 | 443 | eth_hw_addr_random(ndev); |
---|
341 | 444 | temac_do_set_mac_address(ndev); |
---|
.. | .. |
---|
356 | 459 | static void temac_set_multicast_list(struct net_device *ndev) |
---|
357 | 460 | { |
---|
358 | 461 | struct temac_local *lp = netdev_priv(ndev); |
---|
359 | | - u32 multi_addr_msw, multi_addr_lsw, val; |
---|
360 | | - int i; |
---|
| 462 | + u32 multi_addr_msw, multi_addr_lsw; |
---|
| 463 | + int i = 0; |
---|
| 464 | + unsigned long flags; |
---|
| 465 | + bool promisc_mode_disabled = false; |
---|
361 | 466 | |
---|
362 | | - mutex_lock(&lp->indirect_mutex); |
---|
363 | | - if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || |
---|
364 | | - netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) { |
---|
365 | | - /* |
---|
366 | | - * We must make the kernel realise we had to move |
---|
367 | | - * into promisc mode or we start all out war on |
---|
368 | | - * the cable. If it was a promisc request the |
---|
369 | | - * flag is already set. If not we assert it. |
---|
370 | | - */ |
---|
371 | | - ndev->flags |= IFF_PROMISC; |
---|
| 467 | + if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) || |
---|
| 468 | + (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) { |
---|
372 | 469 | temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); |
---|
373 | 470 | dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); |
---|
374 | | - } else if (!netdev_mc_empty(ndev)) { |
---|
| 471 | + return; |
---|
| 472 | + } |
---|
| 473 | + |
---|
| 474 | + spin_lock_irqsave(lp->indirect_lock, flags); |
---|
| 475 | + |
---|
| 476 | + if (!netdev_mc_empty(ndev)) { |
---|
375 | 477 | struct netdev_hw_addr *ha; |
---|
376 | 478 | |
---|
377 | | - i = 0; |
---|
378 | 479 | netdev_for_each_mc_addr(ha, ndev) { |
---|
379 | | - if (i >= MULTICAST_CAM_TABLE_NUM) |
---|
| 480 | + if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM)) |
---|
380 | 481 | break; |
---|
381 | 482 | multi_addr_msw = ((ha->addr[3] << 24) | |
---|
382 | 483 | (ha->addr[2] << 16) | |
---|
383 | 484 | (ha->addr[1] << 8) | |
---|
384 | 485 | (ha->addr[0])); |
---|
385 | | - temac_indirect_out32(lp, XTE_MAW0_OFFSET, |
---|
386 | | - multi_addr_msw); |
---|
| 486 | + temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, |
---|
| 487 | + multi_addr_msw); |
---|
387 | 488 | multi_addr_lsw = ((ha->addr[5] << 8) | |
---|
388 | 489 | (ha->addr[4]) | (i << 16)); |
---|
389 | | - temac_indirect_out32(lp, XTE_MAW1_OFFSET, |
---|
390 | | - multi_addr_lsw); |
---|
| 490 | + temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, |
---|
| 491 | + multi_addr_lsw); |
---|
391 | 492 | i++; |
---|
392 | 493 | } |
---|
393 | | - } else { |
---|
394 | | - val = temac_indirect_in32(lp, XTE_AFM_OFFSET); |
---|
395 | | - temac_indirect_out32(lp, XTE_AFM_OFFSET, |
---|
396 | | - val & ~XTE_AFM_EPPRM_MASK); |
---|
397 | | - temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0); |
---|
398 | | - temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0); |
---|
399 | | - dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); |
---|
400 | 494 | } |
---|
401 | | - mutex_unlock(&lp->indirect_mutex); |
---|
| 495 | + |
---|
| 496 | + /* Clear all or remaining/unused address table entries */ |
---|
| 497 | + while (i < MULTICAST_CAM_TABLE_NUM) { |
---|
| 498 | + temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0); |
---|
| 499 | + temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16); |
---|
| 500 | + i++; |
---|
| 501 | + } |
---|
| 502 | + |
---|
| 503 | + /* Enable address filter block if currently disabled */ |
---|
| 504 | + if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET) |
---|
| 505 | + & XTE_AFM_EPPRM_MASK) { |
---|
| 506 | + temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0); |
---|
| 507 | + promisc_mode_disabled = true; |
---|
| 508 | + } |
---|
| 509 | + |
---|
| 510 | + spin_unlock_irqrestore(lp->indirect_lock, flags); |
---|
| 511 | + |
---|
| 512 | + if (promisc_mode_disabled) |
---|
| 513 | + dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); |
---|
402 | 514 | } |
---|
403 | 515 | |
---|
404 | 516 | static struct temac_option { |
---|
.. | .. |
---|
481 | 593 | {} |
---|
482 | 594 | }; |
---|
483 | 595 | |
---|
484 | | -/** |
---|
| 596 | +/* |
---|
485 | 597 | * temac_setoptions |
---|
486 | 598 | */ |
---|
487 | 599 | static u32 temac_setoptions(struct net_device *ndev, u32 options) |
---|
.. | .. |
---|
489 | 601 | struct temac_local *lp = netdev_priv(ndev); |
---|
490 | 602 | struct temac_option *tp = &temac_options[0]; |
---|
491 | 603 | int reg; |
---|
| 604 | + unsigned long flags; |
---|
492 | 605 | |
---|
493 | | - mutex_lock(&lp->indirect_mutex); |
---|
| 606 | + spin_lock_irqsave(lp->indirect_lock, flags); |
---|
494 | 607 | while (tp->opt) { |
---|
495 | | - reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or; |
---|
496 | | - if (options & tp->opt) |
---|
| 608 | + reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or; |
---|
| 609 | + if (options & tp->opt) { |
---|
497 | 610 | reg |= tp->m_or; |
---|
498 | | - temac_indirect_out32(lp, tp->reg, reg); |
---|
| 611 | + temac_indirect_out32_locked(lp, tp->reg, reg); |
---|
| 612 | + } |
---|
499 | 613 | tp++; |
---|
500 | 614 | } |
---|
| 615 | + spin_unlock_irqrestore(lp->indirect_lock, flags); |
---|
501 | 616 | lp->options |= options; |
---|
502 | | - mutex_unlock(&lp->indirect_mutex); |
---|
503 | 617 | |
---|
504 | 618 | return 0; |
---|
505 | 619 | } |
---|
.. | .. |
---|
510 | 624 | struct temac_local *lp = netdev_priv(ndev); |
---|
511 | 625 | u32 timeout; |
---|
512 | 626 | u32 val; |
---|
| 627 | + unsigned long flags; |
---|
513 | 628 | |
---|
514 | 629 | /* Perform a software reset */ |
---|
515 | 630 | |
---|
.. | .. |
---|
518 | 633 | |
---|
519 | 634 | dev_dbg(&ndev->dev, "%s()\n", __func__); |
---|
520 | 635 | |
---|
521 | | - mutex_lock(&lp->indirect_mutex); |
---|
522 | 636 | /* Reset the receiver and wait for it to finish reset */ |
---|
523 | 637 | temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK); |
---|
524 | 638 | timeout = 1000; |
---|
.. | .. |
---|
544 | 658 | } |
---|
545 | 659 | |
---|
546 | 660 | /* Disable the receiver */ |
---|
547 | | - val = temac_indirect_in32(lp, XTE_RXC1_OFFSET); |
---|
548 | | - temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK); |
---|
| 661 | + spin_lock_irqsave(lp->indirect_lock, flags); |
---|
| 662 | + val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET); |
---|
| 663 | + temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, |
---|
| 664 | + val & ~XTE_RXC1_RXEN_MASK); |
---|
| 665 | + spin_unlock_irqrestore(lp->indirect_lock, flags); |
---|
549 | 666 | |
---|
550 | 667 | /* Reset Local Link (DMA) */ |
---|
551 | 668 | lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); |
---|
.. | .. |
---|
565 | 682 | "temac_device_reset descriptor allocation failed\n"); |
---|
566 | 683 | } |
---|
567 | 684 | |
---|
568 | | - temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0); |
---|
569 | | - temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0); |
---|
570 | | - temac_indirect_out32(lp, XTE_TXC_OFFSET, 0); |
---|
571 | | - temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK); |
---|
572 | | - |
---|
573 | | - mutex_unlock(&lp->indirect_mutex); |
---|
| 685 | + spin_lock_irqsave(lp->indirect_lock, flags); |
---|
| 686 | + temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0); |
---|
| 687 | + temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0); |
---|
| 688 | + temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0); |
---|
| 689 | + temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK); |
---|
| 690 | + spin_unlock_irqrestore(lp->indirect_lock, flags); |
---|
574 | 691 | |
---|
575 | 692 | /* Sync default options with HW |
---|
576 | 693 | * but leave receiver and transmitter disabled. */ |
---|
.. | .. |
---|
594 | 711 | struct phy_device *phy = ndev->phydev; |
---|
595 | 712 | u32 mii_speed; |
---|
596 | 713 | int link_state; |
---|
| 714 | + unsigned long flags; |
---|
597 | 715 | |
---|
598 | 716 | /* hash together the state values to decide if something has changed */ |
---|
599 | 717 | link_state = phy->speed | (phy->duplex << 1) | phy->link; |
---|
600 | 718 | |
---|
601 | | - mutex_lock(&lp->indirect_mutex); |
---|
602 | 719 | if (lp->last_link != link_state) { |
---|
603 | | - mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET); |
---|
| 720 | + spin_lock_irqsave(lp->indirect_lock, flags); |
---|
| 721 | + mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET); |
---|
604 | 722 | mii_speed &= ~XTE_EMCFG_LINKSPD_MASK; |
---|
605 | 723 | |
---|
606 | 724 | switch (phy->speed) { |
---|
.. | .. |
---|
610 | 728 | } |
---|
611 | 729 | |
---|
612 | 730 | /* Write new speed setting out to TEMAC */ |
---|
613 | | - temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed); |
---|
| 731 | + temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed); |
---|
| 732 | + spin_unlock_irqrestore(lp->indirect_lock, flags); |
---|
| 733 | + |
---|
614 | 734 | lp->last_link = link_state; |
---|
615 | 735 | phy_print_status(phy); |
---|
616 | 736 | } |
---|
617 | | - mutex_unlock(&lp->indirect_mutex); |
---|
618 | 737 | } |
---|
| 738 | + |
---|
| 739 | +#ifdef CONFIG_64BIT |
---|
| 740 | + |
---|
| 741 | +static void ptr_to_txbd(void *p, struct cdmac_bd *bd) |
---|
| 742 | +{ |
---|
| 743 | + bd->app3 = (u32)(((u64)p) >> 32); |
---|
| 744 | + bd->app4 = (u32)((u64)p & 0xFFFFFFFF); |
---|
| 745 | +} |
---|
| 746 | + |
---|
| 747 | +static void *ptr_from_txbd(struct cdmac_bd *bd) |
---|
| 748 | +{ |
---|
| 749 | + return (void *)(((u64)(bd->app3) << 32) | bd->app4); |
---|
| 750 | +} |
---|
| 751 | + |
---|
| 752 | +#else |
---|
| 753 | + |
---|
| 754 | +static void ptr_to_txbd(void *p, struct cdmac_bd *bd) |
---|
| 755 | +{ |
---|
| 756 | + bd->app4 = (u32)p; |
---|
| 757 | +} |
---|
| 758 | + |
---|
| 759 | +static void *ptr_from_txbd(struct cdmac_bd *bd) |
---|
| 760 | +{ |
---|
| 761 | + return (void *)(bd->app4); |
---|
| 762 | +} |
---|
| 763 | + |
---|
| 764 | +#endif |
---|
619 | 765 | |
---|
620 | 766 | static void temac_start_xmit_done(struct net_device *ndev) |
---|
621 | 767 | { |
---|
622 | 768 | struct temac_local *lp = netdev_priv(ndev); |
---|
623 | 769 | struct cdmac_bd *cur_p; |
---|
624 | 770 | unsigned int stat = 0; |
---|
| 771 | + struct sk_buff *skb; |
---|
625 | 772 | |
---|
626 | 773 | cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; |
---|
627 | | - stat = cur_p->app0; |
---|
| 774 | + stat = be32_to_cpu(cur_p->app0); |
---|
628 | 775 | |
---|
629 | 776 | while (stat & STS_CTRL_APP0_CMPLT) { |
---|
630 | | - dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len, |
---|
631 | | - DMA_TO_DEVICE); |
---|
632 | | - if (cur_p->app4) |
---|
633 | | - dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); |
---|
634 | | - cur_p->app0 = 0; |
---|
| 777 | + /* Make sure that the other fields are read after bd is |
---|
| 778 | + * released by dma |
---|
| 779 | + */ |
---|
| 780 | + rmb(); |
---|
| 781 | + dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), |
---|
| 782 | + be32_to_cpu(cur_p->len), DMA_TO_DEVICE); |
---|
| 783 | + skb = (struct sk_buff *)ptr_from_txbd(cur_p); |
---|
| 784 | + if (skb) |
---|
| 785 | + dev_consume_skb_irq(skb); |
---|
635 | 786 | cur_p->app1 = 0; |
---|
636 | 787 | cur_p->app2 = 0; |
---|
637 | 788 | cur_p->app3 = 0; |
---|
638 | 789 | cur_p->app4 = 0; |
---|
639 | 790 | |
---|
640 | 791 | ndev->stats.tx_packets++; |
---|
641 | | - ndev->stats.tx_bytes += cur_p->len; |
---|
| 792 | + ndev->stats.tx_bytes += be32_to_cpu(cur_p->len); |
---|
| 793 | + |
---|
| 794 | + /* app0 must be visible last, as it is used to flag |
---|
| 795 | + * availability of the bd |
---|
| 796 | + */ |
---|
| 797 | + smp_mb(); |
---|
| 798 | + cur_p->app0 = 0; |
---|
642 | 799 | |
---|
643 | 800 | lp->tx_bd_ci++; |
---|
644 | | - if (lp->tx_bd_ci >= TX_BD_NUM) |
---|
| 801 | + if (lp->tx_bd_ci >= lp->tx_bd_num) |
---|
645 | 802 | lp->tx_bd_ci = 0; |
---|
646 | 803 | |
---|
647 | 804 | cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; |
---|
648 | | - stat = cur_p->app0; |
---|
| 805 | + stat = be32_to_cpu(cur_p->app0); |
---|
649 | 806 | } |
---|
| 807 | + |
---|
| 808 | + /* Matches barrier in temac_start_xmit */ |
---|
| 809 | + smp_mb(); |
---|
650 | 810 | |
---|
651 | 811 | netif_wake_queue(ndev); |
---|
652 | 812 | } |
---|
.. | .. |
---|
663 | 823 | if (cur_p->app0) |
---|
664 | 824 | return NETDEV_TX_BUSY; |
---|
665 | 825 | |
---|
| 826 | + /* Make sure to read next bd app0 after this one */ |
---|
| 827 | + rmb(); |
---|
| 828 | + |
---|
666 | 829 | tail++; |
---|
667 | | - if (tail >= TX_BD_NUM) |
---|
| 830 | + if (tail >= lp->tx_bd_num) |
---|
668 | 831 | tail = 0; |
---|
669 | 832 | |
---|
670 | 833 | cur_p = &lp->tx_bd_v[tail]; |
---|
.. | .. |
---|
679 | 842 | { |
---|
680 | 843 | struct temac_local *lp = netdev_priv(ndev); |
---|
681 | 844 | struct cdmac_bd *cur_p; |
---|
682 | | - dma_addr_t start_p, tail_p; |
---|
| 845 | + dma_addr_t tail_p, skb_dma_addr; |
---|
683 | 846 | int ii; |
---|
684 | 847 | unsigned long num_frag; |
---|
685 | 848 | skb_frag_t *frag; |
---|
686 | 849 | |
---|
687 | 850 | num_frag = skb_shinfo(skb)->nr_frags; |
---|
688 | 851 | frag = &skb_shinfo(skb)->frags[0]; |
---|
689 | | - start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; |
---|
690 | 852 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
---|
691 | 853 | |
---|
692 | | - if (temac_check_tx_bd_space(lp, num_frag)) { |
---|
693 | | - if (!netif_queue_stopped(ndev)) |
---|
694 | | - netif_stop_queue(ndev); |
---|
695 | | - return NETDEV_TX_BUSY; |
---|
| 854 | + if (temac_check_tx_bd_space(lp, num_frag + 1)) { |
---|
| 855 | + if (netif_queue_stopped(ndev)) |
---|
| 856 | + return NETDEV_TX_BUSY; |
---|
| 857 | + |
---|
| 858 | + netif_stop_queue(ndev); |
---|
| 859 | + |
---|
| 860 | + /* Matches barrier in temac_start_xmit_done */ |
---|
| 861 | + smp_mb(); |
---|
| 862 | + |
---|
| 863 | + /* Space might have just been freed - check again */ |
---|
| 864 | + if (temac_check_tx_bd_space(lp, num_frag + 1)) |
---|
| 865 | + return NETDEV_TX_BUSY; |
---|
| 866 | + |
---|
| 867 | + netif_wake_queue(ndev); |
---|
696 | 868 | } |
---|
697 | 869 | |
---|
698 | 870 | cur_p->app0 = 0; |
---|
.. | .. |
---|
700 | 872 | unsigned int csum_start_off = skb_checksum_start_offset(skb); |
---|
701 | 873 | unsigned int csum_index_off = csum_start_off + skb->csum_offset; |
---|
702 | 874 | |
---|
703 | | - cur_p->app0 |= 1; /* TX Checksum Enabled */ |
---|
704 | | - cur_p->app1 = (csum_start_off << 16) | csum_index_off; |
---|
| 875 | + cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */ |
---|
| 876 | + cur_p->app1 = cpu_to_be32((csum_start_off << 16) |
---|
| 877 | + | csum_index_off); |
---|
705 | 878 | cur_p->app2 = 0; /* initial checksum seed */ |
---|
706 | 879 | } |
---|
707 | 880 | |
---|
708 | | - cur_p->app0 |= STS_CTRL_APP0_SOP; |
---|
709 | | - cur_p->len = skb_headlen(skb); |
---|
710 | | - cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, |
---|
711 | | - skb_headlen(skb), DMA_TO_DEVICE); |
---|
712 | | - cur_p->app4 = (unsigned long)skb; |
---|
| 881 | + cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP); |
---|
| 882 | + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, |
---|
| 883 | + skb_headlen(skb), DMA_TO_DEVICE); |
---|
| 884 | + cur_p->len = cpu_to_be32(skb_headlen(skb)); |
---|
| 885 | + if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) { |
---|
| 886 | + dev_kfree_skb_any(skb); |
---|
| 887 | + ndev->stats.tx_dropped++; |
---|
| 888 | + return NETDEV_TX_OK; |
---|
| 889 | + } |
---|
| 890 | + cur_p->phys = cpu_to_be32(skb_dma_addr); |
---|
713 | 891 | |
---|
714 | 892 | for (ii = 0; ii < num_frag; ii++) { |
---|
715 | | - lp->tx_bd_tail++; |
---|
716 | | - if (lp->tx_bd_tail >= TX_BD_NUM) |
---|
| 893 | + if (++lp->tx_bd_tail >= lp->tx_bd_num) |
---|
717 | 894 | lp->tx_bd_tail = 0; |
---|
718 | 895 | |
---|
719 | 896 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
---|
720 | | - cur_p->phys = dma_map_single(ndev->dev.parent, |
---|
721 | | - skb_frag_address(frag), |
---|
722 | | - skb_frag_size(frag), DMA_TO_DEVICE); |
---|
723 | | - cur_p->len = skb_frag_size(frag); |
---|
| 897 | + skb_dma_addr = dma_map_single(ndev->dev.parent, |
---|
| 898 | + skb_frag_address(frag), |
---|
| 899 | + skb_frag_size(frag), |
---|
| 900 | + DMA_TO_DEVICE); |
---|
| 901 | + if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) { |
---|
| 902 | + if (--lp->tx_bd_tail < 0) |
---|
| 903 | + lp->tx_bd_tail = lp->tx_bd_num - 1; |
---|
| 904 | + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
---|
| 905 | + while (--ii >= 0) { |
---|
| 906 | + --frag; |
---|
| 907 | + dma_unmap_single(ndev->dev.parent, |
---|
| 908 | + be32_to_cpu(cur_p->phys), |
---|
| 909 | + skb_frag_size(frag), |
---|
| 910 | + DMA_TO_DEVICE); |
---|
| 911 | + if (--lp->tx_bd_tail < 0) |
---|
| 912 | + lp->tx_bd_tail = lp->tx_bd_num - 1; |
---|
| 913 | + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
---|
| 914 | + } |
---|
| 915 | + dma_unmap_single(ndev->dev.parent, |
---|
| 916 | + be32_to_cpu(cur_p->phys), |
---|
| 917 | + skb_headlen(skb), DMA_TO_DEVICE); |
---|
| 918 | + dev_kfree_skb_any(skb); |
---|
| 919 | + ndev->stats.tx_dropped++; |
---|
| 920 | + return NETDEV_TX_OK; |
---|
| 921 | + } |
---|
| 922 | + cur_p->phys = cpu_to_be32(skb_dma_addr); |
---|
| 923 | + cur_p->len = cpu_to_be32(skb_frag_size(frag)); |
---|
724 | 924 | cur_p->app0 = 0; |
---|
725 | 925 | frag++; |
---|
726 | 926 | } |
---|
727 | | - cur_p->app0 |= STS_CTRL_APP0_EOP; |
---|
| 927 | + cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP); |
---|
| 928 | + |
---|
| 929 | + /* Mark last fragment with skb address, so it can be consumed |
---|
| 930 | + * in temac_start_xmit_done() |
---|
| 931 | + */ |
---|
| 932 | + ptr_to_txbd((void *)skb, cur_p); |
---|
728 | 933 | |
---|
729 | 934 | tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; |
---|
730 | 935 | lp->tx_bd_tail++; |
---|
731 | | - if (lp->tx_bd_tail >= TX_BD_NUM) |
---|
| 936 | + if (lp->tx_bd_tail >= lp->tx_bd_num) |
---|
732 | 937 | lp->tx_bd_tail = 0; |
---|
733 | 938 | |
---|
734 | 939 | skb_tx_timestamp(skb); |
---|
735 | 940 | |
---|
736 | 941 | /* Kick off the transfer */ |
---|
| 942 | + wmb(); |
---|
737 | 943 | lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ |
---|
738 | 944 | |
---|
739 | 945 | if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) |
---|
.. | .. |
---|
742 | 948 | return NETDEV_TX_OK; |
---|
743 | 949 | } |
---|
744 | 950 | |
---|
| 951 | +static int ll_temac_recv_buffers_available(struct temac_local *lp) |
---|
| 952 | +{ |
---|
| 953 | + int available; |
---|
| 954 | + |
---|
| 955 | + if (!lp->rx_skb[lp->rx_bd_ci]) |
---|
| 956 | + return 0; |
---|
| 957 | + available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; |
---|
| 958 | + if (available <= 0) |
---|
| 959 | + available += lp->rx_bd_num; |
---|
| 960 | + return available; |
---|
| 961 | +} |
---|
745 | 962 | |
---|
746 | 963 | static void ll_temac_recv(struct net_device *ndev) |
---|
747 | 964 | { |
---|
748 | 965 | struct temac_local *lp = netdev_priv(ndev); |
---|
749 | | - struct sk_buff *skb, *new_skb; |
---|
750 | | - unsigned int bdstat; |
---|
751 | | - struct cdmac_bd *cur_p; |
---|
752 | | - dma_addr_t tail_p; |
---|
753 | | - int length; |
---|
754 | 966 | unsigned long flags; |
---|
| 967 | + int rx_bd; |
---|
| 968 | + bool update_tail = false; |
---|
755 | 969 | |
---|
756 | 970 | spin_lock_irqsave(&lp->rx_lock, flags); |
---|
757 | 971 | |
---|
758 | | - tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; |
---|
759 | | - cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; |
---|
| 972 | + /* Process all received buffers, passing them on network |
---|
| 973 | + * stack. After this, the buffer descriptors will be in an |
---|
| 974 | + * un-allocated stage, where no skb is allocated for it, and |
---|
| 975 | + * they are therefore not available for TEMAC/DMA. |
---|
| 976 | + */ |
---|
| 977 | + do { |
---|
| 978 | + struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci]; |
---|
| 979 | + struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci]; |
---|
| 980 | + unsigned int bdstat = be32_to_cpu(bd->app0); |
---|
| 981 | + int length; |
---|
760 | 982 | |
---|
761 | | - bdstat = cur_p->app0; |
---|
762 | | - while ((bdstat & STS_CTRL_APP0_CMPLT)) { |
---|
| 983 | + /* While this should not normally happen, we can end |
---|
| 984 | + * here when GFP_ATOMIC allocations fail, and we |
---|
| 985 | + * therefore have un-allocated buffers. |
---|
| 986 | + */ |
---|
| 987 | + if (!skb) |
---|
| 988 | + break; |
---|
763 | 989 | |
---|
764 | | - skb = lp->rx_skb[lp->rx_bd_ci]; |
---|
765 | | - length = cur_p->app4 & 0x3FFF; |
---|
| 990 | + /* Loop over all completed buffer descriptors */ |
---|
| 991 | + if (!(bdstat & STS_CTRL_APP0_CMPLT)) |
---|
| 992 | + break; |
---|
766 | 993 | |
---|
767 | | - dma_unmap_single(ndev->dev.parent, cur_p->phys, length, |
---|
768 | | - DMA_FROM_DEVICE); |
---|
| 994 | + dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys), |
---|
| 995 | + XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); |
---|
| 996 | + /* The buffer is not valid for DMA anymore */ |
---|
| 997 | + bd->phys = 0; |
---|
| 998 | + bd->len = 0; |
---|
769 | 999 | |
---|
| 1000 | + length = be32_to_cpu(bd->app4) & 0x3FFF; |
---|
770 | 1001 | skb_put(skb, length); |
---|
771 | 1002 | skb->protocol = eth_type_trans(skb, ndev); |
---|
772 | 1003 | skb_checksum_none_assert(skb); |
---|
.. | .. |
---|
776 | 1007 | (skb->protocol == htons(ETH_P_IP)) && |
---|
777 | 1008 | (skb->len > 64)) { |
---|
778 | 1009 | |
---|
779 | | - skb->csum = cur_p->app3 & 0xFFFF; |
---|
| 1010 | + /* Convert from device endianness (be32) to cpu |
---|
| 1011 | + * endiannes, and if necessary swap the bytes |
---|
| 1012 | + * (back) for proper IP checksum byte order |
---|
| 1013 | + * (be16). |
---|
| 1014 | + */ |
---|
| 1015 | + skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF); |
---|
780 | 1016 | skb->ip_summed = CHECKSUM_COMPLETE; |
---|
781 | 1017 | } |
---|
782 | 1018 | |
---|
783 | 1019 | if (!skb_defer_rx_timestamp(skb)) |
---|
784 | 1020 | netif_rx(skb); |
---|
| 1021 | + /* The skb buffer is now owned by network stack above */ |
---|
| 1022 | + lp->rx_skb[lp->rx_bd_ci] = NULL; |
---|
785 | 1023 | |
---|
786 | 1024 | ndev->stats.rx_packets++; |
---|
787 | 1025 | ndev->stats.rx_bytes += length; |
---|
788 | 1026 | |
---|
789 | | - new_skb = netdev_alloc_skb_ip_align(ndev, |
---|
790 | | - XTE_MAX_JUMBO_FRAME_SIZE); |
---|
791 | | - if (!new_skb) { |
---|
792 | | - spin_unlock_irqrestore(&lp->rx_lock, flags); |
---|
793 | | - return; |
---|
| 1027 | + rx_bd = lp->rx_bd_ci; |
---|
| 1028 | + if (++lp->rx_bd_ci >= lp->rx_bd_num) |
---|
| 1029 | + lp->rx_bd_ci = 0; |
---|
| 1030 | + } while (rx_bd != lp->rx_bd_tail); |
---|
| 1031 | + |
---|
| 1032 | + /* DMA operations will halt when the last buffer descriptor is |
---|
| 1033 | + * processed (ie. the one pointed to by RX_TAILDESC_PTR). |
---|
| 1034 | + * When that happens, no more interrupt events will be |
---|
| 1035 | + * generated. No IRQ_COAL or IRQ_DLY, and not even an |
---|
| 1036 | + * IRQ_ERR. To avoid stalling, we schedule a delayed work |
---|
| 1037 | + * when there is a potential risk of that happening. The work |
---|
| 1038 | + * will call this function, and thus re-schedule itself until |
---|
| 1039 | + * enough buffers are available again. |
---|
| 1040 | + */ |
---|
| 1041 | + if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx) |
---|
| 1042 | + schedule_delayed_work(&lp->restart_work, HZ / 1000); |
---|
| 1043 | + |
---|
| 1044 | + /* Allocate new buffers for those buffer descriptors that were |
---|
| 1045 | + * passed to network stack. Note that GFP_ATOMIC allocations |
---|
| 1046 | + * can fail (e.g. when a larger burst of GFP_ATOMIC |
---|
| 1047 | + * allocations occurs), so while we try to allocate all |
---|
| 1048 | + * buffers in the same interrupt where they were processed, we |
---|
| 1049 | + * continue with what we could get in case of allocation |
---|
| 1050 | + * failure. Allocation of remaining buffers will be retried |
---|
| 1051 | + * in following calls. |
---|
| 1052 | + */ |
---|
| 1053 | + while (1) { |
---|
| 1054 | + struct sk_buff *skb; |
---|
| 1055 | + struct cdmac_bd *bd; |
---|
| 1056 | + dma_addr_t skb_dma_addr; |
---|
| 1057 | + |
---|
| 1058 | + rx_bd = lp->rx_bd_tail + 1; |
---|
| 1059 | + if (rx_bd >= lp->rx_bd_num) |
---|
| 1060 | + rx_bd = 0; |
---|
| 1061 | + bd = &lp->rx_bd_v[rx_bd]; |
---|
| 1062 | + |
---|
| 1063 | + if (bd->phys) |
---|
| 1064 | + break; /* All skb's allocated */ |
---|
| 1065 | + |
---|
| 1066 | + skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); |
---|
| 1067 | + if (!skb) { |
---|
| 1068 | + dev_warn(&ndev->dev, "skb alloc failed\n"); |
---|
| 1069 | + break; |
---|
794 | 1070 | } |
---|
795 | 1071 | |
---|
796 | | - cur_p->app0 = STS_CTRL_APP0_IRQONEND; |
---|
797 | | - cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, |
---|
798 | | - XTE_MAX_JUMBO_FRAME_SIZE, |
---|
799 | | - DMA_FROM_DEVICE); |
---|
800 | | - cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE; |
---|
801 | | - lp->rx_skb[lp->rx_bd_ci] = new_skb; |
---|
| 1072 | + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, |
---|
| 1073 | + XTE_MAX_JUMBO_FRAME_SIZE, |
---|
| 1074 | + DMA_FROM_DEVICE); |
---|
| 1075 | + if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, |
---|
| 1076 | + skb_dma_addr))) { |
---|
| 1077 | + dev_kfree_skb_any(skb); |
---|
| 1078 | + break; |
---|
| 1079 | + } |
---|
802 | 1080 | |
---|
803 | | - lp->rx_bd_ci++; |
---|
804 | | - if (lp->rx_bd_ci >= RX_BD_NUM) |
---|
805 | | - lp->rx_bd_ci = 0; |
---|
| 1081 | + bd->phys = cpu_to_be32(skb_dma_addr); |
---|
| 1082 | + bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); |
---|
| 1083 | + bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); |
---|
| 1084 | + lp->rx_skb[rx_bd] = skb; |
---|
806 | 1085 | |
---|
807 | | - cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; |
---|
808 | | - bdstat = cur_p->app0; |
---|
| 1086 | + lp->rx_bd_tail = rx_bd; |
---|
| 1087 | + update_tail = true; |
---|
809 | 1088 | } |
---|
810 | | - lp->dma_out(lp, RX_TAILDESC_PTR, tail_p); |
---|
| 1089 | + |
---|
| 1090 | + /* Move tail pointer when buffers have been allocated */ |
---|
| 1091 | + if (update_tail) { |
---|
| 1092 | + lp->dma_out(lp, RX_TAILDESC_PTR, |
---|
| 1093 | + lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail); |
---|
| 1094 | + } |
---|
811 | 1095 | |
---|
812 | 1096 | spin_unlock_irqrestore(&lp->rx_lock, flags); |
---|
| 1097 | +} |
---|
| 1098 | + |
---|
| 1099 | +/* Function scheduled to ensure a restart in case of DMA halt |
---|
| 1100 | + * condition caused by running out of buffer descriptors. |
---|
| 1101 | + */ |
---|
| 1102 | +static void ll_temac_restart_work_func(struct work_struct *work) |
---|
| 1103 | +{ |
---|
| 1104 | + struct temac_local *lp = container_of(work, struct temac_local, |
---|
| 1105 | + restart_work.work); |
---|
| 1106 | + struct net_device *ndev = lp->ndev; |
---|
| 1107 | + |
---|
| 1108 | + ll_temac_recv(ndev); |
---|
813 | 1109 | } |
---|
814 | 1110 | |
---|
815 | 1111 | static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev) |
---|
.. | .. |
---|
823 | 1119 | |
---|
824 | 1120 | if (status & (IRQ_COAL | IRQ_DLY)) |
---|
825 | 1121 | temac_start_xmit_done(lp->ndev); |
---|
826 | | - if (status & 0x080) |
---|
827 | | - dev_err(&ndev->dev, "DMA error 0x%x\n", status); |
---|
| 1122 | + if (status & (IRQ_ERR | IRQ_DMAERR)) |
---|
| 1123 | + dev_err_ratelimited(&ndev->dev, |
---|
| 1124 | + "TX error 0x%x TX_CHNL_STS=0x%08x\n", |
---|
| 1125 | + status, lp->dma_in(lp, TX_CHNL_STS)); |
---|
828 | 1126 | |
---|
829 | 1127 | return IRQ_HANDLED; |
---|
830 | 1128 | } |
---|
.. | .. |
---|
841 | 1139 | |
---|
842 | 1140 | if (status & (IRQ_COAL | IRQ_DLY)) |
---|
843 | 1141 | ll_temac_recv(lp->ndev); |
---|
| 1142 | + if (status & (IRQ_ERR | IRQ_DMAERR)) |
---|
| 1143 | + dev_err_ratelimited(&ndev->dev, |
---|
| 1144 | + "RX error 0x%x RX_CHNL_STS=0x%08x\n", |
---|
| 1145 | + status, lp->dma_in(lp, RX_CHNL_STS)); |
---|
844 | 1146 | |
---|
845 | 1147 | return IRQ_HANDLED; |
---|
846 | 1148 | } |
---|
.. | .. |
---|
860 | 1162 | dev_err(lp->dev, "of_phy_connect() failed\n"); |
---|
861 | 1163 | return -ENODEV; |
---|
862 | 1164 | } |
---|
863 | | - |
---|
| 1165 | + phy_start(phydev); |
---|
| 1166 | + } else if (strlen(lp->phy_name) > 0) { |
---|
| 1167 | + phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link, |
---|
| 1168 | + lp->phy_interface); |
---|
| 1169 | + if (IS_ERR(phydev)) { |
---|
| 1170 | + dev_err(lp->dev, "phy_connect() failed\n"); |
---|
| 1171 | + return PTR_ERR(phydev); |
---|
| 1172 | + } |
---|
864 | 1173 | phy_start(phydev); |
---|
865 | 1174 | } |
---|
866 | 1175 | |
---|
.. | .. |
---|
891 | 1200 | |
---|
892 | 1201 | dev_dbg(&ndev->dev, "temac_close()\n"); |
---|
893 | 1202 | |
---|
| 1203 | + cancel_delayed_work_sync(&lp->restart_work); |
---|
| 1204 | + |
---|
894 | 1205 | free_irq(lp->tx_irq, ndev); |
---|
895 | 1206 | free_irq(lp->rx_irq, ndev); |
---|
896 | 1207 | |
---|
.. | .. |
---|
919 | 1230 | } |
---|
920 | 1231 | #endif |
---|
921 | 1232 | |
---|
922 | | -static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) |
---|
923 | | -{ |
---|
924 | | - if (!netif_running(ndev)) |
---|
925 | | - return -EINVAL; |
---|
926 | | - |
---|
927 | | - if (!ndev->phydev) |
---|
928 | | - return -EINVAL; |
---|
929 | | - |
---|
930 | | - return phy_mii_ioctl(ndev->phydev, rq, cmd); |
---|
931 | | -} |
---|
932 | | - |
---|
933 | 1233 | static const struct net_device_ops temac_netdev_ops = { |
---|
934 | 1234 | .ndo_open = temac_open, |
---|
935 | 1235 | .ndo_stop = temac_stop, |
---|
936 | 1236 | .ndo_start_xmit = temac_start_xmit, |
---|
| 1237 | + .ndo_set_rx_mode = temac_set_multicast_list, |
---|
937 | 1238 | .ndo_set_mac_address = temac_set_mac_address, |
---|
938 | 1239 | .ndo_validate_addr = eth_validate_addr, |
---|
939 | | - .ndo_do_ioctl = temac_ioctl, |
---|
| 1240 | + .ndo_do_ioctl = phy_do_ioctl_running, |
---|
940 | 1241 | #ifdef CONFIG_NET_POLL_CONTROLLER |
---|
941 | 1242 | .ndo_poll_controller = temac_poll_controller, |
---|
942 | 1243 | #endif |
---|
.. | .. |
---|
971 | 1272 | .attrs = temac_device_attrs, |
---|
972 | 1273 | }; |
---|
973 | 1274 | |
---|
974 | | -/* ethtool support */ |
---|
| 1275 | +/* --------------------------------------------------------------------- |
---|
| 1276 | + * ethtool support |
---|
| 1277 | + */ |
---|
| 1278 | + |
---|
| 1279 | +static void ll_temac_ethtools_get_ringparam(struct net_device *ndev, |
---|
| 1280 | + struct ethtool_ringparam *ering) |
---|
| 1281 | +{ |
---|
| 1282 | + struct temac_local *lp = netdev_priv(ndev); |
---|
| 1283 | + |
---|
| 1284 | + ering->rx_max_pending = RX_BD_NUM_MAX; |
---|
| 1285 | + ering->rx_mini_max_pending = 0; |
---|
| 1286 | + ering->rx_jumbo_max_pending = 0; |
---|
| 1287 | + ering->tx_max_pending = TX_BD_NUM_MAX; |
---|
| 1288 | + ering->rx_pending = lp->rx_bd_num; |
---|
| 1289 | + ering->rx_mini_pending = 0; |
---|
| 1290 | + ering->rx_jumbo_pending = 0; |
---|
| 1291 | + ering->tx_pending = lp->tx_bd_num; |
---|
| 1292 | +} |
---|
| 1293 | + |
---|
| 1294 | +static int ll_temac_ethtools_set_ringparam(struct net_device *ndev, |
---|
| 1295 | + struct ethtool_ringparam *ering) |
---|
| 1296 | +{ |
---|
| 1297 | + struct temac_local *lp = netdev_priv(ndev); |
---|
| 1298 | + |
---|
| 1299 | + if (ering->rx_pending > RX_BD_NUM_MAX || |
---|
| 1300 | + ering->rx_mini_pending || |
---|
| 1301 | + ering->rx_jumbo_pending || |
---|
| 1302 | + ering->rx_pending > TX_BD_NUM_MAX) |
---|
| 1303 | + return -EINVAL; |
---|
| 1304 | + |
---|
| 1305 | + if (netif_running(ndev)) |
---|
| 1306 | + return -EBUSY; |
---|
| 1307 | + |
---|
| 1308 | + lp->rx_bd_num = ering->rx_pending; |
---|
| 1309 | + lp->tx_bd_num = ering->tx_pending; |
---|
| 1310 | + return 0; |
---|
| 1311 | +} |
---|
| 1312 | + |
---|
| 1313 | +static int ll_temac_ethtools_get_coalesce(struct net_device *ndev, |
---|
| 1314 | + struct ethtool_coalesce *ec) |
---|
| 1315 | +{ |
---|
| 1316 | + struct temac_local *lp = netdev_priv(ndev); |
---|
| 1317 | + |
---|
| 1318 | + ec->rx_max_coalesced_frames = lp->coalesce_count_rx; |
---|
| 1319 | + ec->tx_max_coalesced_frames = lp->coalesce_count_tx; |
---|
| 1320 | + ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100; |
---|
| 1321 | + ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100; |
---|
| 1322 | + return 0; |
---|
| 1323 | +} |
---|
| 1324 | + |
---|
| 1325 | +static int ll_temac_ethtools_set_coalesce(struct net_device *ndev, |
---|
| 1326 | + struct ethtool_coalesce *ec) |
---|
| 1327 | +{ |
---|
| 1328 | + struct temac_local *lp = netdev_priv(ndev); |
---|
| 1329 | + |
---|
| 1330 | + if (netif_running(ndev)) { |
---|
| 1331 | + netdev_err(ndev, |
---|
| 1332 | + "Please stop netif before applying configuration\n"); |
---|
| 1333 | + return -EFAULT; |
---|
| 1334 | + } |
---|
| 1335 | + |
---|
| 1336 | + if (ec->rx_max_coalesced_frames) |
---|
| 1337 | + lp->coalesce_count_rx = ec->rx_max_coalesced_frames; |
---|
| 1338 | + if (ec->tx_max_coalesced_frames) |
---|
| 1339 | + lp->coalesce_count_tx = ec->tx_max_coalesced_frames; |
---|
| 1340 | + /* With typical LocalLink clock speed of 200 MHz and |
---|
| 1341 | + * C_PRESCALAR=1023, each delay count corresponds to 5.12 us. |
---|
| 1342 | + */ |
---|
| 1343 | + if (ec->rx_coalesce_usecs) |
---|
| 1344 | + lp->coalesce_delay_rx = |
---|
| 1345 | + min(255U, (ec->rx_coalesce_usecs * 100) / 512); |
---|
| 1346 | + if (ec->tx_coalesce_usecs) |
---|
| 1347 | + lp->coalesce_delay_tx = |
---|
| 1348 | + min(255U, (ec->tx_coalesce_usecs * 100) / 512); |
---|
| 1349 | + |
---|
| 1350 | + return 0; |
---|
| 1351 | +} |
---|
| 1352 | + |
---|
975 | 1353 | static const struct ethtool_ops temac_ethtool_ops = { |
---|
| 1354 | + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
---|
| 1355 | + ETHTOOL_COALESCE_MAX_FRAMES, |
---|
976 | 1356 | .nway_reset = phy_ethtool_nway_reset, |
---|
977 | 1357 | .get_link = ethtool_op_get_link, |
---|
978 | 1358 | .get_ts_info = ethtool_op_get_ts_info, |
---|
979 | 1359 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
---|
980 | 1360 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
---|
| 1361 | + .get_ringparam = ll_temac_ethtools_get_ringparam, |
---|
| 1362 | + .set_ringparam = ll_temac_ethtools_set_ringparam, |
---|
| 1363 | + .get_coalesce = ll_temac_ethtools_get_coalesce, |
---|
| 1364 | + .set_coalesce = ll_temac_ethtools_set_coalesce, |
---|
981 | 1365 | }; |
---|
982 | 1366 | |
---|
983 | | -static int temac_of_probe(struct platform_device *op) |
---|
| 1367 | +static int temac_probe(struct platform_device *pdev) |
---|
984 | 1368 | { |
---|
985 | | - struct device_node *np; |
---|
| 1369 | + struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev); |
---|
| 1370 | + struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np; |
---|
986 | 1371 | struct temac_local *lp; |
---|
987 | 1372 | struct net_device *ndev; |
---|
988 | 1373 | const void *addr; |
---|
989 | 1374 | __be32 *p; |
---|
| 1375 | + bool little_endian; |
---|
990 | 1376 | int rc = 0; |
---|
991 | 1377 | |
---|
992 | 1378 | /* Init network device structure */ |
---|
993 | | - ndev = alloc_etherdev(sizeof(*lp)); |
---|
| 1379 | + ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp)); |
---|
994 | 1380 | if (!ndev) |
---|
995 | 1381 | return -ENOMEM; |
---|
996 | 1382 | |
---|
997 | | - platform_set_drvdata(op, ndev); |
---|
998 | | - SET_NETDEV_DEV(ndev, &op->dev); |
---|
999 | | - ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ |
---|
| 1383 | + platform_set_drvdata(pdev, ndev); |
---|
| 1384 | + SET_NETDEV_DEV(ndev, &pdev->dev); |
---|
1000 | 1385 | ndev->features = NETIF_F_SG; |
---|
1001 | 1386 | ndev->netdev_ops = &temac_netdev_ops; |
---|
1002 | 1387 | ndev->ethtool_ops = &temac_ethtool_ops; |
---|
.. | .. |
---|
1017 | 1402 | /* setup temac private info structure */ |
---|
1018 | 1403 | lp = netdev_priv(ndev); |
---|
1019 | 1404 | lp->ndev = ndev; |
---|
1020 | | - lp->dev = &op->dev; |
---|
| 1405 | + lp->dev = &pdev->dev; |
---|
1021 | 1406 | lp->options = XTE_OPTION_DEFAULTS; |
---|
| 1407 | + lp->rx_bd_num = RX_BD_NUM_DEFAULT; |
---|
| 1408 | + lp->tx_bd_num = TX_BD_NUM_DEFAULT; |
---|
1022 | 1409 | spin_lock_init(&lp->rx_lock); |
---|
1023 | | - mutex_init(&lp->indirect_mutex); |
---|
| 1410 | + INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); |
---|
| 1411 | + |
---|
| 1412 | + /* Setup mutex for synchronization of indirect register access */ |
---|
| 1413 | + if (pdata) { |
---|
| 1414 | + if (!pdata->indirect_lock) { |
---|
| 1415 | + dev_err(&pdev->dev, |
---|
| 1416 | + "indirect_lock missing in platform_data\n"); |
---|
| 1417 | + return -EINVAL; |
---|
| 1418 | + } |
---|
| 1419 | + lp->indirect_lock = pdata->indirect_lock; |
---|
| 1420 | + } else { |
---|
| 1421 | + lp->indirect_lock = devm_kmalloc(&pdev->dev, |
---|
| 1422 | + sizeof(*lp->indirect_lock), |
---|
| 1423 | + GFP_KERNEL); |
---|
| 1424 | + if (!lp->indirect_lock) |
---|
| 1425 | + return -ENOMEM; |
---|
| 1426 | + spin_lock_init(lp->indirect_lock); |
---|
| 1427 | + } |
---|
1024 | 1428 | |
---|
1025 | 1429 | /* map device registers */ |
---|
1026 | | - lp->regs = of_iomap(op->dev.of_node, 0); |
---|
1027 | | - if (!lp->regs) { |
---|
1028 | | - dev_err(&op->dev, "could not map temac regs.\n"); |
---|
1029 | | - rc = -ENOMEM; |
---|
1030 | | - goto nodev; |
---|
| 1430 | + lp->regs = devm_platform_ioremap_resource_byname(pdev, 0); |
---|
| 1431 | + if (IS_ERR(lp->regs)) { |
---|
| 1432 | + dev_err(&pdev->dev, "could not map TEMAC registers\n"); |
---|
| 1433 | + return -ENOMEM; |
---|
| 1434 | + } |
---|
| 1435 | + |
---|
| 1436 | + /* Select register access functions with the specified |
---|
| 1437 | + * endianness mode. Default for OF devices is big-endian. |
---|
| 1438 | + */ |
---|
| 1439 | + little_endian = false; |
---|
| 1440 | + if (temac_np) { |
---|
| 1441 | + if (of_get_property(temac_np, "little-endian", NULL)) |
---|
| 1442 | + little_endian = true; |
---|
| 1443 | + } else if (pdata) { |
---|
| 1444 | + little_endian = pdata->reg_little_endian; |
---|
| 1445 | + } |
---|
| 1446 | + if (little_endian) { |
---|
| 1447 | + lp->temac_ior = _temac_ior_le; |
---|
| 1448 | + lp->temac_iow = _temac_iow_le; |
---|
| 1449 | + } else { |
---|
| 1450 | + lp->temac_ior = _temac_ior_be; |
---|
| 1451 | + lp->temac_iow = _temac_iow_be; |
---|
1031 | 1452 | } |
---|
1032 | 1453 | |
---|
1033 | 1454 | /* Setup checksum offload, but default to off if not specified */ |
---|
1034 | 1455 | lp->temac_features = 0; |
---|
1035 | | - p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); |
---|
1036 | | - if (p && be32_to_cpu(*p)) { |
---|
1037 | | - lp->temac_features |= TEMAC_FEATURE_TX_CSUM; |
---|
| 1456 | + if (temac_np) { |
---|
| 1457 | + p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL); |
---|
| 1458 | + if (p && be32_to_cpu(*p)) |
---|
| 1459 | + lp->temac_features |= TEMAC_FEATURE_TX_CSUM; |
---|
| 1460 | + p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL); |
---|
| 1461 | + if (p && be32_to_cpu(*p)) |
---|
| 1462 | + lp->temac_features |= TEMAC_FEATURE_RX_CSUM; |
---|
| 1463 | + } else if (pdata) { |
---|
| 1464 | + if (pdata->txcsum) |
---|
| 1465 | + lp->temac_features |= TEMAC_FEATURE_TX_CSUM; |
---|
| 1466 | + if (pdata->rxcsum) |
---|
| 1467 | + lp->temac_features |= TEMAC_FEATURE_RX_CSUM; |
---|
| 1468 | + } |
---|
| 1469 | + if (lp->temac_features & TEMAC_FEATURE_TX_CSUM) |
---|
1038 | 1470 | /* Can checksum TCP/UDP over IPv4. */ |
---|
1039 | 1471 | ndev->features |= NETIF_F_IP_CSUM; |
---|
1040 | | - } |
---|
1041 | | - p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); |
---|
1042 | | - if (p && be32_to_cpu(*p)) |
---|
1043 | | - lp->temac_features |= TEMAC_FEATURE_RX_CSUM; |
---|
1044 | 1472 | |
---|
1045 | | - /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ |
---|
1046 | | - np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); |
---|
1047 | | - if (!np) { |
---|
1048 | | - dev_err(&op->dev, "could not find DMA node\n"); |
---|
1049 | | - rc = -ENODEV; |
---|
1050 | | - goto err_iounmap; |
---|
1051 | | - } |
---|
| 1473 | + /* Defaults for IRQ delay/coalescing setup. These are |
---|
| 1474 | + * configuration values, so does not belong in device-tree. |
---|
| 1475 | + */ |
---|
| 1476 | + lp->coalesce_delay_tx = 0x10; |
---|
| 1477 | + lp->coalesce_count_tx = 0x22; |
---|
| 1478 | + lp->coalesce_delay_rx = 0xff; |
---|
| 1479 | + lp->coalesce_count_rx = 0x07; |
---|
1052 | 1480 | |
---|
1053 | | - /* Setup the DMA register accesses, could be DCR or memory mapped */ |
---|
1054 | | - if (temac_dcr_setup(lp, op, np)) { |
---|
| 1481 | + /* Setup LocalLink DMA */ |
---|
| 1482 | + if (temac_np) { |
---|
| 1483 | + /* Find the DMA node, map the DMA registers, and |
---|
| 1484 | + * decode the DMA IRQs. |
---|
| 1485 | + */ |
---|
| 1486 | + dma_np = of_parse_phandle(temac_np, "llink-connected", 0); |
---|
| 1487 | + if (!dma_np) { |
---|
| 1488 | + dev_err(&pdev->dev, "could not find DMA node\n"); |
---|
| 1489 | + return -ENODEV; |
---|
| 1490 | + } |
---|
1055 | 1491 | |
---|
1056 | | - /* no DCR in the device tree, try non-DCR */ |
---|
1057 | | - lp->sdma_regs = of_iomap(np, 0); |
---|
1058 | | - if (lp->sdma_regs) { |
---|
1059 | | - lp->dma_in = temac_dma_in32; |
---|
1060 | | - lp->dma_out = temac_dma_out32; |
---|
1061 | | - dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); |
---|
| 1492 | + /* Setup the DMA register accesses, could be DCR or |
---|
| 1493 | + * memory mapped. |
---|
| 1494 | + */ |
---|
| 1495 | + if (temac_dcr_setup(lp, pdev, dma_np)) { |
---|
| 1496 | + /* no DCR in the device tree, try non-DCR */ |
---|
| 1497 | + lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0, |
---|
| 1498 | + NULL); |
---|
| 1499 | + if (IS_ERR(lp->sdma_regs)) { |
---|
| 1500 | + dev_err(&pdev->dev, |
---|
| 1501 | + "unable to map DMA registers\n"); |
---|
| 1502 | + of_node_put(dma_np); |
---|
| 1503 | + return PTR_ERR(lp->sdma_regs); |
---|
| 1504 | + } |
---|
| 1505 | + if (of_get_property(dma_np, "little-endian", NULL)) { |
---|
| 1506 | + lp->dma_in = temac_dma_in32_le; |
---|
| 1507 | + lp->dma_out = temac_dma_out32_le; |
---|
| 1508 | + } else { |
---|
| 1509 | + lp->dma_in = temac_dma_in32_be; |
---|
| 1510 | + lp->dma_out = temac_dma_out32_be; |
---|
| 1511 | + } |
---|
| 1512 | + dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs); |
---|
| 1513 | + } |
---|
| 1514 | + |
---|
| 1515 | + /* Get DMA RX and TX interrupts */ |
---|
| 1516 | + lp->rx_irq = irq_of_parse_and_map(dma_np, 0); |
---|
| 1517 | + lp->tx_irq = irq_of_parse_and_map(dma_np, 1); |
---|
| 1518 | + |
---|
| 1519 | + /* Finished with the DMA node; drop the reference */ |
---|
| 1520 | + of_node_put(dma_np); |
---|
| 1521 | + } else if (pdata) { |
---|
| 1522 | + /* 2nd memory resource specifies DMA registers */ |
---|
| 1523 | + lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1); |
---|
| 1524 | + if (IS_ERR(lp->sdma_regs)) { |
---|
| 1525 | + dev_err(&pdev->dev, |
---|
| 1526 | + "could not map DMA registers\n"); |
---|
| 1527 | + return PTR_ERR(lp->sdma_regs); |
---|
| 1528 | + } |
---|
| 1529 | + if (pdata->dma_little_endian) { |
---|
| 1530 | + lp->dma_in = temac_dma_in32_le; |
---|
| 1531 | + lp->dma_out = temac_dma_out32_le; |
---|
1062 | 1532 | } else { |
---|
1063 | | - dev_err(&op->dev, "unable to map DMA registers\n"); |
---|
1064 | | - of_node_put(np); |
---|
1065 | | - goto err_iounmap; |
---|
| 1533 | + lp->dma_in = temac_dma_in32_be; |
---|
| 1534 | + lp->dma_out = temac_dma_out32_be; |
---|
| 1535 | + } |
---|
| 1536 | + |
---|
| 1537 | + /* Get DMA RX and TX interrupts */ |
---|
| 1538 | + lp->rx_irq = platform_get_irq(pdev, 0); |
---|
| 1539 | + lp->tx_irq = platform_get_irq(pdev, 1); |
---|
| 1540 | + |
---|
| 1541 | + /* IRQ delay/coalescing setup */ |
---|
| 1542 | + if (pdata->tx_irq_timeout || pdata->tx_irq_count) { |
---|
| 1543 | + lp->coalesce_delay_tx = pdata->tx_irq_timeout; |
---|
| 1544 | + lp->coalesce_count_tx = pdata->tx_irq_count; |
---|
| 1545 | + } |
---|
| 1546 | + if (pdata->rx_irq_timeout || pdata->rx_irq_count) { |
---|
| 1547 | + lp->coalesce_delay_rx = pdata->rx_irq_timeout; |
---|
| 1548 | + lp->coalesce_count_rx = pdata->rx_irq_count; |
---|
1066 | 1549 | } |
---|
1067 | 1550 | } |
---|
1068 | 1551 | |
---|
1069 | | - lp->rx_irq = irq_of_parse_and_map(np, 0); |
---|
1070 | | - lp->tx_irq = irq_of_parse_and_map(np, 1); |
---|
1071 | | - |
---|
1072 | | - of_node_put(np); /* Finished with the DMA node; drop the reference */ |
---|
1073 | | - |
---|
1074 | | - if (!lp->rx_irq || !lp->tx_irq) { |
---|
1075 | | - dev_err(&op->dev, "could not determine irqs\n"); |
---|
1076 | | - rc = -ENOMEM; |
---|
1077 | | - goto err_iounmap_2; |
---|
| 1552 | + /* Error handle returned DMA RX and TX interrupts */ |
---|
| 1553 | + if (lp->rx_irq < 0) { |
---|
| 1554 | + if (lp->rx_irq != -EPROBE_DEFER) |
---|
| 1555 | + dev_err(&pdev->dev, "could not get DMA RX irq\n"); |
---|
| 1556 | + return lp->rx_irq; |
---|
| 1557 | + } |
---|
| 1558 | + if (lp->tx_irq < 0) { |
---|
| 1559 | + if (lp->tx_irq != -EPROBE_DEFER) |
---|
| 1560 | + dev_err(&pdev->dev, "could not get DMA TX irq\n"); |
---|
| 1561 | + return lp->tx_irq; |
---|
1078 | 1562 | } |
---|
1079 | 1563 | |
---|
1080 | | - |
---|
1081 | | - /* Retrieve the MAC address */ |
---|
1082 | | - addr = of_get_mac_address(op->dev.of_node); |
---|
1083 | | - if (!addr) { |
---|
1084 | | - dev_err(&op->dev, "could not find MAC address\n"); |
---|
1085 | | - rc = -ENODEV; |
---|
1086 | | - goto err_iounmap_2; |
---|
| 1564 | + if (temac_np) { |
---|
| 1565 | + /* Retrieve the MAC address */ |
---|
| 1566 | + addr = of_get_mac_address(temac_np); |
---|
| 1567 | + if (IS_ERR(addr)) { |
---|
| 1568 | + dev_err(&pdev->dev, "could not find MAC address\n"); |
---|
| 1569 | + return -ENODEV; |
---|
| 1570 | + } |
---|
| 1571 | + temac_init_mac_address(ndev, addr); |
---|
| 1572 | + } else if (pdata) { |
---|
| 1573 | + temac_init_mac_address(ndev, pdata->mac_addr); |
---|
1087 | 1574 | } |
---|
1088 | | - temac_init_mac_address(ndev, addr); |
---|
1089 | 1575 | |
---|
1090 | | - rc = temac_mdio_setup(lp, op->dev.of_node); |
---|
| 1576 | + rc = temac_mdio_setup(lp, pdev); |
---|
1091 | 1577 | if (rc) |
---|
1092 | | - dev_warn(&op->dev, "error registering MDIO bus\n"); |
---|
| 1578 | + dev_warn(&pdev->dev, "error registering MDIO bus\n"); |
---|
1093 | 1579 | |
---|
1094 | | - lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); |
---|
1095 | | - if (lp->phy_node) |
---|
1096 | | - dev_dbg(lp->dev, "using PHY node %pOF (%p)\n", np, np); |
---|
| 1580 | + if (temac_np) { |
---|
| 1581 | + lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0); |
---|
| 1582 | + if (lp->phy_node) |
---|
| 1583 | + dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np); |
---|
| 1584 | + } else if (pdata) { |
---|
| 1585 | + snprintf(lp->phy_name, sizeof(lp->phy_name), |
---|
| 1586 | + PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr); |
---|
| 1587 | + lp->phy_interface = pdata->phy_interface; |
---|
| 1588 | + } |
---|
1097 | 1589 | |
---|
1098 | 1590 | /* Add the device attributes */ |
---|
1099 | 1591 | rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); |
---|
1100 | 1592 | if (rc) { |
---|
1101 | 1593 | dev_err(lp->dev, "Error creating sysfs files\n"); |
---|
1102 | | - goto err_iounmap_2; |
---|
| 1594 | + goto err_sysfs_create; |
---|
1103 | 1595 | } |
---|
1104 | 1596 | |
---|
1105 | 1597 | rc = register_netdev(lp->ndev); |
---|
.. | .. |
---|
1110 | 1602 | |
---|
1111 | 1603 | return 0; |
---|
1112 | 1604 | |
---|
1113 | | - err_register_ndev: |
---|
| 1605 | +err_register_ndev: |
---|
1114 | 1606 | sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); |
---|
1115 | | - err_iounmap_2: |
---|
1116 | | - if (lp->sdma_regs) |
---|
1117 | | - iounmap(lp->sdma_regs); |
---|
1118 | | - err_iounmap: |
---|
1119 | | - iounmap(lp->regs); |
---|
1120 | | - nodev: |
---|
1121 | | - free_netdev(ndev); |
---|
1122 | | - ndev = NULL; |
---|
| 1607 | +err_sysfs_create: |
---|
| 1608 | + if (lp->phy_node) |
---|
| 1609 | + of_node_put(lp->phy_node); |
---|
| 1610 | + temac_mdio_teardown(lp); |
---|
1123 | 1611 | return rc; |
---|
1124 | 1612 | } |
---|
1125 | 1613 | |
---|
1126 | | -static int temac_of_remove(struct platform_device *op) |
---|
| 1614 | +static int temac_remove(struct platform_device *pdev) |
---|
1127 | 1615 | { |
---|
1128 | | - struct net_device *ndev = platform_get_drvdata(op); |
---|
| 1616 | + struct net_device *ndev = platform_get_drvdata(pdev); |
---|
1129 | 1617 | struct temac_local *lp = netdev_priv(ndev); |
---|
1130 | 1618 | |
---|
1131 | | - temac_mdio_teardown(lp); |
---|
1132 | 1619 | unregister_netdev(ndev); |
---|
1133 | 1620 | sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); |
---|
1134 | | - of_node_put(lp->phy_node); |
---|
1135 | | - lp->phy_node = NULL; |
---|
1136 | | - iounmap(lp->regs); |
---|
1137 | | - if (lp->sdma_regs) |
---|
1138 | | - iounmap(lp->sdma_regs); |
---|
1139 | | - free_netdev(ndev); |
---|
| 1621 | + if (lp->phy_node) |
---|
| 1622 | + of_node_put(lp->phy_node); |
---|
| 1623 | + temac_mdio_teardown(lp); |
---|
1140 | 1624 | return 0; |
---|
1141 | 1625 | } |
---|
1142 | 1626 | |
---|
.. | .. |
---|
1149 | 1633 | }; |
---|
1150 | 1634 | MODULE_DEVICE_TABLE(of, temac_of_match); |
---|
1151 | 1635 | |
---|
1152 | | -static struct platform_driver temac_of_driver = { |
---|
1153 | | - .probe = temac_of_probe, |
---|
1154 | | - .remove = temac_of_remove, |
---|
| 1636 | +static struct platform_driver temac_driver = { |
---|
| 1637 | + .probe = temac_probe, |
---|
| 1638 | + .remove = temac_remove, |
---|
1155 | 1639 | .driver = { |
---|
1156 | 1640 | .name = "xilinx_temac", |
---|
1157 | 1641 | .of_match_table = temac_of_match, |
---|
1158 | 1642 | }, |
---|
1159 | 1643 | }; |
---|
1160 | 1644 | |
---|
1161 | | -module_platform_driver(temac_of_driver); |
---|
| 1645 | +module_platform_driver(temac_driver); |
---|
1162 | 1646 | |
---|
1163 | 1647 | MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver"); |
---|
1164 | 1648 | MODULE_AUTHOR("Yoshio Kashiwagi"); |
---|