.. | .. |
---|
1204 | 1204 | if (!q_vector) { |
---|
1205 | 1205 | q_vector = kzalloc(size, GFP_KERNEL); |
---|
1206 | 1206 | } else if (size > ksize(q_vector)) { |
---|
1207 | | - kfree_rcu(q_vector, rcu); |
---|
1208 | | - q_vector = kzalloc(size, GFP_KERNEL); |
---|
| 1207 | + struct igb_q_vector *new_q_vector; |
---|
| 1208 | + |
---|
| 1209 | + new_q_vector = kzalloc(size, GFP_KERNEL); |
---|
| 1210 | + if (new_q_vector) |
---|
| 1211 | + kfree_rcu(q_vector, rcu); |
---|
| 1212 | + q_vector = new_q_vector; |
---|
1209 | 1213 | } else { |
---|
1210 | 1214 | memset(q_vector, 0, size); |
---|
1211 | 1215 | } |
---|
.. | .. |
---|
3817 | 3821 | igb_release_hw_control(adapter); |
---|
3818 | 3822 | |
---|
3819 | 3823 | #ifdef CONFIG_PCI_IOV |
---|
3820 | | - rtnl_lock(); |
---|
3821 | 3824 | igb_disable_sriov(pdev); |
---|
3822 | | - rtnl_unlock(); |
---|
3823 | 3825 | #endif |
---|
3824 | 3826 | |
---|
3825 | 3827 | unregister_netdev(netdev); |
---|
.. | .. |
---|
3855 | 3857 | struct pci_dev *pdev = adapter->pdev; |
---|
3856 | 3858 | struct e1000_hw *hw = &adapter->hw; |
---|
3857 | 3859 | |
---|
3858 | | - /* Virtualization features not supported on i210 family. */ |
---|
3859 | | - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) |
---|
| 3860 | + /* Virtualization features not supported on i210 and 82580 family. */ |
---|
| 3861 | + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) || |
---|
| 3862 | + (hw->mac.type == e1000_82580)) |
---|
3860 | 3863 | return; |
---|
3861 | 3864 | |
---|
3862 | 3865 | /* Of the below we really only want the effect of getting |
---|
.. | .. |
---|
4729 | 4732 | static void igb_set_rx_buffer_len(struct igb_adapter *adapter, |
---|
4730 | 4733 | struct igb_ring *rx_ring) |
---|
4731 | 4734 | { |
---|
| 4735 | +#if (PAGE_SIZE < 8192) |
---|
| 4736 | + struct e1000_hw *hw = &adapter->hw; |
---|
| 4737 | +#endif |
---|
| 4738 | + |
---|
4732 | 4739 | /* set build_skb and buffer size flags */ |
---|
4733 | 4740 | clear_ring_build_skb_enabled(rx_ring); |
---|
4734 | 4741 | clear_ring_uses_large_buffer(rx_ring); |
---|
.. | .. |
---|
4739 | 4746 | set_ring_build_skb_enabled(rx_ring); |
---|
4740 | 4747 | |
---|
4741 | 4748 | #if (PAGE_SIZE < 8192) |
---|
4742 | | - if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) |
---|
4743 | | - return; |
---|
4744 | | - |
---|
4745 | | - set_ring_uses_large_buffer(rx_ring); |
---|
| 4749 | + if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB || |
---|
| 4750 | + rd32(E1000_RCTL) & E1000_RCTL_SBP) |
---|
| 4751 | + set_ring_uses_large_buffer(rx_ring); |
---|
4746 | 4752 | #endif |
---|
4747 | 4753 | } |
---|
4748 | 4754 | |
---|
.. | .. |
---|
5879 | 5885 | */ |
---|
5880 | 5886 | if (tx_ring->launchtime_enable) { |
---|
5881 | 5887 | ts = ktime_to_timespec64(first->skb->tstamp); |
---|
5882 | | - first->skb->tstamp = ktime_set(0, 0); |
---|
| 5888 | + skb_txtime_consumed(first->skb); |
---|
5883 | 5889 | context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); |
---|
5884 | 5890 | } else { |
---|
5885 | 5891 | context_desc->seqnum_seed = 0; |
---|
.. | .. |
---|
9451 | 9457 | struct net_device *netdev = pci_get_drvdata(pdev); |
---|
9452 | 9458 | struct igb_adapter *adapter = netdev_priv(netdev); |
---|
9453 | 9459 | |
---|
| 9460 | + if (state == pci_channel_io_normal) { |
---|
| 9461 | + dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n"); |
---|
| 9462 | + return PCI_ERS_RESULT_CAN_RECOVER; |
---|
| 9463 | + } |
---|
| 9464 | + |
---|
9454 | 9465 | netif_device_detach(netdev); |
---|
9455 | 9466 | |
---|
9456 | 9467 | if (state == pci_channel_io_perm_failure) |
---|