.. | .. |
---|
306 | 306 | static int lance_close(struct net_device *dev); |
---|
307 | 307 | static struct net_device_stats *lance_get_stats(struct net_device *dev); |
---|
308 | 308 | static void set_multicast_list(struct net_device *dev); |
---|
309 | | -static void lance_tx_timeout (struct net_device *dev); |
---|
| 309 | +static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue); |
---|
310 | 310 | |
---|
311 | 311 | |
---|
312 | 312 | |
---|
.. | .. |
---|
913 | 913 | } |
---|
914 | 914 | |
---|
915 | 915 | |
---|
916 | | -static void lance_tx_timeout (struct net_device *dev) |
---|
| 916 | +static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue) |
---|
917 | 917 | { |
---|
918 | 918 | struct lance_private *lp = (struct lance_private *) dev->ml_priv; |
---|
919 | 919 | int ioaddr = dev->base_addr; |
---|
.. | .. |
---|
997 | 997 | skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); |
---|
998 | 998 | lp->tx_ring[entry].base = |
---|
999 | 999 | ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; |
---|
1000 | | - dev_kfree_skb(skb); |
---|
| 1000 | + dev_consume_skb_irq(skb); |
---|
1001 | 1001 | } else { |
---|
1002 | 1002 | lp->tx_skbuff[entry] = skb; |
---|
1003 | 1003 | lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000; |
---|
.. | .. |
---|
1084 | 1084 | /* We must free the original skb if it's not a data-only copy |
---|
1085 | 1085 | in the bounce buffer. */ |
---|
1086 | 1086 | if (lp->tx_skbuff[entry]) { |
---|
1087 | | - dev_kfree_skb_irq(lp->tx_skbuff[entry]); |
---|
| 1087 | + dev_consume_skb_irq(lp->tx_skbuff[entry]); |
---|
1088 | 1088 | lp->tx_skbuff[entry] = NULL; |
---|
1089 | 1089 | } |
---|
1090 | 1090 | dirty_tx++; |
---|