.. | .. |
---|
74 | 74 | static void moxart_mac_free_memory(struct net_device *ndev) |
---|
75 | 75 | { |
---|
76 | 76 | struct moxart_mac_priv_t *priv = netdev_priv(ndev); |
---|
77 | | - int i; |
---|
78 | | - |
---|
79 | | - for (i = 0; i < RX_DESC_NUM; i++) |
---|
80 | | - dma_unmap_single(&ndev->dev, priv->rx_mapping[i], |
---|
81 | | - priv->rx_buf_size, DMA_FROM_DEVICE); |
---|
82 | 77 | |
---|
83 | 78 | if (priv->tx_desc_base) |
---|
84 | | - dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, |
---|
| 79 | + dma_free_coherent(&priv->pdev->dev, |
---|
| 80 | + TX_REG_DESC_SIZE * TX_DESC_NUM, |
---|
85 | 81 | priv->tx_desc_base, priv->tx_base); |
---|
86 | 82 | |
---|
87 | 83 | if (priv->rx_desc_base) |
---|
88 | | - dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM, |
---|
| 84 | + dma_free_coherent(&priv->pdev->dev, |
---|
| 85 | + RX_REG_DESC_SIZE * RX_DESC_NUM, |
---|
89 | 86 | priv->rx_desc_base, priv->rx_base); |
---|
90 | 87 | |
---|
91 | 88 | kfree(priv->tx_buf_base); |
---|
.. | .. |
---|
145 | 142 | desc + RX_REG_OFFSET_DESC1); |
---|
146 | 143 | |
---|
147 | 144 | priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; |
---|
148 | | - priv->rx_mapping[i] = dma_map_single(&ndev->dev, |
---|
| 145 | + priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev, |
---|
149 | 146 | priv->rx_buf[i], |
---|
150 | 147 | priv->rx_buf_size, |
---|
151 | 148 | DMA_FROM_DEVICE); |
---|
152 | | - if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) |
---|
| 149 | + if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i])) |
---|
153 | 150 | netdev_err(ndev, "DMA mapping error\n"); |
---|
154 | 151 | |
---|
155 | 152 | moxart_desc_write(priv->rx_mapping[i], |
---|
.. | .. |
---|
191 | 188 | static int moxart_mac_stop(struct net_device *ndev) |
---|
192 | 189 | { |
---|
193 | 190 | struct moxart_mac_priv_t *priv = netdev_priv(ndev); |
---|
| 191 | + int i; |
---|
194 | 192 | |
---|
195 | 193 | napi_disable(&priv->napi); |
---|
196 | 194 | |
---|
.. | .. |
---|
201 | 199 | |
---|
202 | 200 | /* disable all functions */ |
---|
203 | 201 | writel(0, priv->base + REG_MAC_CTRL); |
---|
| 202 | + |
---|
| 203 | + /* unmap areas mapped in moxart_mac_setup_desc_ring() */ |
---|
| 204 | + for (i = 0; i < RX_DESC_NUM; i++) |
---|
| 205 | + dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i], |
---|
| 206 | + priv->rx_buf_size, DMA_FROM_DEVICE); |
---|
204 | 207 | |
---|
205 | 208 | return 0; |
---|
206 | 209 | } |
---|
.. | .. |
---|
238 | 241 | if (len > RX_BUF_SIZE) |
---|
239 | 242 | len = RX_BUF_SIZE; |
---|
240 | 243 | |
---|
241 | | - dma_sync_single_for_cpu(&ndev->dev, |
---|
| 244 | + dma_sync_single_for_cpu(&priv->pdev->dev, |
---|
242 | 245 | priv->rx_mapping[rx_head], |
---|
243 | 246 | priv->rx_buf_size, DMA_FROM_DEVICE); |
---|
244 | 247 | skb = netdev_alloc_skb_ip_align(ndev, len); |
---|
.. | .. |
---|
292 | 295 | unsigned int tx_tail = priv->tx_tail; |
---|
293 | 296 | |
---|
294 | 297 | while (tx_tail != tx_head) { |
---|
295 | | - dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail], |
---|
| 298 | + dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail], |
---|
296 | 299 | priv->tx_len[tx_tail], DMA_TO_DEVICE); |
---|
297 | 300 | |
---|
298 | 301 | ndev->stats.tx_packets++; |
---|
299 | 302 | ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len; |
---|
300 | 303 | |
---|
301 | | - dev_kfree_skb_irq(priv->tx_skb[tx_tail]); |
---|
| 304 | + dev_consume_skb_irq(priv->tx_skb[tx_tail]); |
---|
302 | 305 | priv->tx_skb[tx_tail] = NULL; |
---|
303 | 306 | |
---|
304 | 307 | tx_tail = TX_NEXT(tx_tail); |
---|
.. | .. |
---|
329 | 332 | return IRQ_HANDLED; |
---|
330 | 333 | } |
---|
331 | 334 | |
---|
332 | | -static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
---|
| 335 | +static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb, |
---|
| 336 | + struct net_device *ndev) |
---|
333 | 337 | { |
---|
334 | 338 | struct moxart_mac_priv_t *priv = netdev_priv(ndev); |
---|
335 | 339 | void *desc; |
---|
336 | 340 | unsigned int len; |
---|
337 | 341 | unsigned int tx_head; |
---|
338 | 342 | u32 txdes1; |
---|
339 | | - int ret = NETDEV_TX_BUSY; |
---|
| 343 | + netdev_tx_t ret = NETDEV_TX_BUSY; |
---|
340 | 344 | |
---|
341 | 345 | spin_lock_irq(&priv->txlock); |
---|
342 | 346 | |
---|
.. | .. |
---|
355 | 359 | |
---|
356 | 360 | len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; |
---|
357 | 361 | |
---|
358 | | - priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data, |
---|
| 362 | + priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data, |
---|
359 | 363 | len, DMA_TO_DEVICE); |
---|
360 | | - if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) { |
---|
| 364 | + if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) { |
---|
361 | 365 | netdev_err(ndev, "DMA mapping error\n"); |
---|
362 | 366 | goto out_unlock; |
---|
363 | 367 | } |
---|
.. | .. |
---|
376 | 380 | len = ETH_ZLEN; |
---|
377 | 381 | } |
---|
378 | 382 | |
---|
379 | | - dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head], |
---|
| 383 | + dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head], |
---|
380 | 384 | priv->tx_buf_size, DMA_TO_DEVICE); |
---|
381 | 385 | |
---|
382 | 386 | txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); |
---|
.. | .. |
---|
476 | 480 | |
---|
477 | 481 | priv = netdev_priv(ndev); |
---|
478 | 482 | priv->ndev = ndev; |
---|
| 483 | + priv->pdev = pdev; |
---|
479 | 484 | |
---|
480 | | - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
---|
481 | | - ndev->base_addr = res->start; |
---|
482 | | - priv->base = devm_ioremap_resource(p_dev, res); |
---|
| 485 | + priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); |
---|
483 | 486 | if (IS_ERR(priv->base)) { |
---|
484 | 487 | dev_err(p_dev, "devm_ioremap_resource failed\n"); |
---|
485 | 488 | ret = PTR_ERR(priv->base); |
---|
486 | 489 | goto init_fail; |
---|
487 | 490 | } |
---|
| 491 | + ndev->base_addr = res->start; |
---|
488 | 492 | |
---|
489 | 493 | spin_lock_init(&priv->txlock); |
---|
490 | 494 | |
---|
491 | 495 | priv->tx_buf_size = TX_BUF_SIZE; |
---|
492 | 496 | priv->rx_buf_size = RX_BUF_SIZE; |
---|
493 | 497 | |
---|
494 | | - priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * |
---|
| 498 | + priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE * |
---|
495 | 499 | TX_DESC_NUM, &priv->tx_base, |
---|
496 | 500 | GFP_DMA | GFP_KERNEL); |
---|
497 | 501 | if (!priv->tx_desc_base) { |
---|
.. | .. |
---|
499 | 503 | goto init_fail; |
---|
500 | 504 | } |
---|
501 | 505 | |
---|
502 | | - priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * |
---|
| 506 | + priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE * |
---|
503 | 507 | RX_DESC_NUM, &priv->rx_base, |
---|
504 | 508 | GFP_DMA | GFP_KERNEL); |
---|
505 | 509 | if (!priv->rx_desc_base) { |
---|