.. | .. |
---|
137 | 137 | } |
---|
138 | 138 | } |
---|
139 | 139 | |
---|
| 140 | +static int netif_local_xmit_active(struct net_device *dev) |
---|
| 141 | +{ |
---|
| 142 | + int i; |
---|
| 143 | + |
---|
| 144 | + for (i = 0; i < dev->num_tx_queues; i++) { |
---|
| 145 | + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
---|
| 146 | + |
---|
| 147 | + if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id()) |
---|
| 148 | + return 1; |
---|
| 149 | + } |
---|
| 150 | + |
---|
| 151 | + return 0; |
---|
| 152 | +} |
---|
| 153 | + |
---|
140 | 154 | static void poll_one_napi(struct napi_struct *napi) |
---|
141 | 155 | { |
---|
142 | 156 | int work; |
---|
.. | .. |
---|
183 | 197 | if (!ni || down_trylock(&ni->dev_lock)) |
---|
184 | 198 | return; |
---|
185 | 199 | |
---|
186 | | - if (!netif_running(dev)) { |
---|
| 200 | + /* Some drivers will take the same locks in poll and xmit, |
---|
| 201 | + * we can't poll if local CPU is already in xmit. |
---|
| 202 | + */ |
---|
| 203 | + if (!netif_running(dev) || netif_local_xmit_active(dev)) { |
---|
187 | 204 | up(&ni->dev_lock); |
---|
188 | 205 | return; |
---|
189 | 206 | } |
---|