.. | .. |
---|
219 | 219 | static inline void rps_lock(struct softnet_data *sd) |
---|
220 | 220 | { |
---|
221 | 221 | #ifdef CONFIG_RPS |
---|
222 | | - spin_lock(&sd->input_pkt_queue.lock); |
---|
| 222 | + raw_spin_lock(&sd->input_pkt_queue.raw_lock); |
---|
223 | 223 | #endif |
---|
224 | 224 | } |
---|
225 | 225 | |
---|
226 | 226 | static inline void rps_unlock(struct softnet_data *sd) |
---|
227 | 227 | { |
---|
228 | 228 | #ifdef CONFIG_RPS |
---|
229 | | - spin_unlock(&sd->input_pkt_queue.lock); |
---|
| 229 | + raw_spin_unlock(&sd->input_pkt_queue.raw_lock); |
---|
230 | 230 | #endif |
---|
231 | 231 | } |
---|
232 | 232 | |
---|
.. | .. |
---|
2723 | 2723 | sd->output_queue_tailp = &q->next_sched; |
---|
2724 | 2724 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
---|
2725 | 2725 | local_irq_restore(flags); |
---|
| 2726 | + preempt_check_resched_rt(); |
---|
2726 | 2727 | } |
---|
2727 | 2728 | |
---|
2728 | 2729 | void __netif_schedule(struct Qdisc *q) |
---|
.. | .. |
---|
2785 | 2786 | __this_cpu_write(softnet_data.completion_queue, skb); |
---|
2786 | 2787 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
---|
2787 | 2788 | local_irq_restore(flags); |
---|
| 2789 | + preempt_check_resched_rt(); |
---|
2788 | 2790 | } |
---|
2789 | 2791 | EXPORT_SYMBOL(__dev_kfree_skb_irq); |
---|
2790 | 2792 | |
---|
.. | .. |
---|
3468 | 3470 | * This permits qdisc->running owner to get the lock more |
---|
3469 | 3471 | * often and dequeue packets faster. |
---|
3470 | 3472 | */ |
---|
| 3473 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 3474 | + contended = true; |
---|
| 3475 | +#else |
---|
3471 | 3476 | contended = qdisc_is_running(q); |
---|
| 3477 | +#endif |
---|
3472 | 3478 | if (unlikely(contended)) |
---|
3473 | 3479 | spin_lock(&q->busylock); |
---|
3474 | 3480 | |
---|
.. | .. |
---|
3829 | 3835 | if (dev->flags & IFF_UP) { |
---|
3830 | 3836 | int cpu = smp_processor_id(); /* ok because BHs are off */ |
---|
3831 | 3837 | |
---|
| 3838 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 3839 | + if (READ_ONCE(txq->xmit_lock_owner) != current) { |
---|
| 3840 | +#else |
---|
3832 | 3841 | /* Other cpus might concurrently change txq->xmit_lock_owner |
---|
3833 | 3842 | * to -1 or to their cpu id, but not to our id. |
---|
3834 | 3843 | */ |
---|
3835 | 3844 | if (READ_ONCE(txq->xmit_lock_owner) != cpu) { |
---|
| 3845 | +#endif |
---|
3836 | 3846 | if (dev_xmit_recursion()) |
---|
3837 | 3847 | goto recursion_alert; |
---|
3838 | 3848 | |
---|
.. | .. |
---|
4267 | 4277 | rps_unlock(sd); |
---|
4268 | 4278 | |
---|
4269 | 4279 | local_irq_restore(flags); |
---|
| 4280 | + preempt_check_resched_rt(); |
---|
4270 | 4281 | |
---|
4271 | 4282 | atomic_long_inc(&skb->dev->rx_dropped); |
---|
4272 | 4283 | kfree_skb(skb); |
---|
.. | .. |
---|
4481 | 4492 | struct rps_dev_flow voidflow, *rflow = &voidflow; |
---|
4482 | 4493 | int cpu; |
---|
4483 | 4494 | |
---|
4484 | | - preempt_disable(); |
---|
| 4495 | + migrate_disable(); |
---|
4485 | 4496 | rcu_read_lock(); |
---|
4486 | 4497 | |
---|
4487 | 4498 | cpu = get_rps_cpu(skb->dev, skb, &rflow); |
---|
.. | .. |
---|
4491 | 4502 | ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); |
---|
4492 | 4503 | |
---|
4493 | 4504 | rcu_read_unlock(); |
---|
4494 | | - preempt_enable(); |
---|
| 4505 | + migrate_enable(); |
---|
4495 | 4506 | } else |
---|
4496 | 4507 | #endif |
---|
4497 | 4508 | { |
---|
4498 | 4509 | unsigned int qtail; |
---|
4499 | 4510 | |
---|
4500 | | - ret = enqueue_to_backlog(skb, get_cpu(), &qtail); |
---|
4501 | | - put_cpu(); |
---|
| 4511 | + ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); |
---|
| 4512 | + put_cpu_light(); |
---|
4502 | 4513 | } |
---|
4503 | 4514 | return ret; |
---|
4504 | 4515 | } |
---|
.. | .. |
---|
4532 | 4543 | |
---|
4533 | 4544 | trace_netif_rx_ni_entry(skb); |
---|
4534 | 4545 | |
---|
4535 | | - preempt_disable(); |
---|
| 4546 | + local_bh_disable(); |
---|
4536 | 4547 | err = netif_rx_internal(skb); |
---|
4537 | | - if (local_softirq_pending()) |
---|
4538 | | - do_softirq(); |
---|
4539 | | - preempt_enable(); |
---|
| 4548 | + local_bh_enable(); |
---|
4540 | 4549 | |
---|
4541 | 4550 | return err; |
---|
4542 | 4551 | } |
---|
.. | .. |
---|
5818 | 5827 | sd->rps_ipi_list = NULL; |
---|
5819 | 5828 | |
---|
5820 | 5829 | local_irq_enable(); |
---|
| 5830 | + preempt_check_resched_rt(); |
---|
5821 | 5831 | |
---|
5822 | 5832 | /* Send pending IPI's to kick RPS processing on remote cpus. */ |
---|
5823 | 5833 | net_rps_send_ipi(remsd); |
---|
5824 | 5834 | } else |
---|
5825 | 5835 | #endif |
---|
5826 | 5836 | local_irq_enable(); |
---|
| 5837 | + preempt_check_resched_rt(); |
---|
5827 | 5838 | } |
---|
5828 | 5839 | |
---|
5829 | 5840 | static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) |
---|
.. | .. |
---|
5853 | 5864 | while (again) { |
---|
5854 | 5865 | struct sk_buff *skb; |
---|
5855 | 5866 | |
---|
| 5867 | + local_irq_disable(); |
---|
5856 | 5868 | while ((skb = __skb_dequeue(&sd->process_queue))) { |
---|
| 5869 | + local_irq_enable(); |
---|
5857 | 5870 | rcu_read_lock(); |
---|
5858 | 5871 | __netif_receive_skb(skb); |
---|
5859 | 5872 | rcu_read_unlock(); |
---|
5860 | 5873 | input_queue_head_incr(sd); |
---|
5861 | 5874 | if (++work >= quota) |
---|
5862 | 5875 | goto state_changed; |
---|
| 5876 | + local_irq_disable(); |
---|
5863 | 5877 | } |
---|
5864 | 5878 | |
---|
5865 | | - local_irq_disable(); |
---|
5866 | 5879 | rps_lock(sd); |
---|
5867 | 5880 | if (skb_queue_empty(&sd->input_pkt_queue)) { |
---|
5868 | 5881 | /* |
---|
.. | .. |
---|
5904 | 5917 | local_irq_save(flags); |
---|
5905 | 5918 | ____napi_schedule(this_cpu_ptr(&softnet_data), n); |
---|
5906 | 5919 | local_irq_restore(flags); |
---|
| 5920 | + preempt_check_resched_rt(); |
---|
5907 | 5921 | } |
---|
5908 | 5922 | EXPORT_SYMBOL(__napi_schedule); |
---|
5909 | 5923 | |
---|
.. | .. |
---|
5940 | 5954 | } |
---|
5941 | 5955 | EXPORT_SYMBOL(napi_schedule_prep); |
---|
5942 | 5956 | |
---|
| 5957 | +#ifndef CONFIG_PREEMPT_RT_FULL |
---|
5943 | 5958 | /** |
---|
5944 | 5959 | * __napi_schedule_irqoff - schedule for receive |
---|
5945 | 5960 | * @n: entry to schedule |
---|
.. | .. |
---|
5958 | 5973 | __napi_schedule(n); |
---|
5959 | 5974 | } |
---|
5960 | 5975 | EXPORT_SYMBOL(__napi_schedule_irqoff); |
---|
| 5976 | +#endif |
---|
5961 | 5977 | |
---|
5962 | 5978 | bool napi_complete_done(struct napi_struct *n, int work_done) |
---|
5963 | 5979 | { |
---|
.. | .. |
---|
6352 | 6368 | unsigned long time_limit = jiffies + |
---|
6353 | 6369 | usecs_to_jiffies(netdev_budget_usecs); |
---|
6354 | 6370 | int budget = netdev_budget; |
---|
| 6371 | + struct sk_buff_head tofree_q; |
---|
| 6372 | + struct sk_buff *skb; |
---|
6355 | 6373 | LIST_HEAD(list); |
---|
6356 | 6374 | LIST_HEAD(repoll); |
---|
6357 | 6375 | |
---|
| 6376 | + __skb_queue_head_init(&tofree_q); |
---|
| 6377 | + |
---|
6358 | 6378 | local_irq_disable(); |
---|
| 6379 | + skb_queue_splice_init(&sd->tofree_queue, &tofree_q); |
---|
6359 | 6380 | list_splice_init(&sd->poll_list, &list); |
---|
6360 | 6381 | local_irq_enable(); |
---|
| 6382 | + |
---|
| 6383 | + while ((skb = __skb_dequeue(&tofree_q))) |
---|
| 6384 | + kfree_skb(skb); |
---|
6361 | 6385 | |
---|
6362 | 6386 | for (;;) { |
---|
6363 | 6387 | struct napi_struct *n; |
---|
.. | .. |
---|
6388 | 6412 | list_splice_tail(&repoll, &list); |
---|
6389 | 6413 | list_splice(&list, &sd->poll_list); |
---|
6390 | 6414 | if (!list_empty(&sd->poll_list)) |
---|
6391 | | - __raise_softirq_irqoff(NET_RX_SOFTIRQ); |
---|
| 6415 | + __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ); |
---|
6392 | 6416 | |
---|
6393 | 6417 | net_rps_action_and_irq_enable(sd); |
---|
6394 | 6418 | out: |
---|
.. | .. |
---|
8595 | 8619 | /* Initialize queue lock */ |
---|
8596 | 8620 | spin_lock_init(&queue->_xmit_lock); |
---|
8597 | 8621 | netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); |
---|
8598 | | - queue->xmit_lock_owner = -1; |
---|
| 8622 | + netdev_queue_clear_owner(queue); |
---|
8599 | 8623 | netdev_queue_numa_node_write(queue, NUMA_NO_NODE); |
---|
8600 | 8624 | queue->dev = dev; |
---|
8601 | 8625 | #ifdef CONFIG_BQL |
---|
.. | .. |
---|
9542 | 9566 | |
---|
9543 | 9567 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
---|
9544 | 9568 | local_irq_enable(); |
---|
| 9569 | + preempt_check_resched_rt(); |
---|
9545 | 9570 | |
---|
9546 | 9571 | #ifdef CONFIG_RPS |
---|
9547 | 9572 | remsd = oldsd->rps_ipi_list; |
---|
.. | .. |
---|
9555 | 9580 | netif_rx_ni(skb); |
---|
9556 | 9581 | input_queue_head_incr(oldsd); |
---|
9557 | 9582 | } |
---|
9558 | | - while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { |
---|
| 9583 | + while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { |
---|
9559 | 9584 | netif_rx_ni(skb); |
---|
9560 | 9585 | input_queue_head_incr(oldsd); |
---|
| 9586 | + } |
---|
| 9587 | + while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { |
---|
| 9588 | + kfree_skb(skb); |
---|
9561 | 9589 | } |
---|
9562 | 9590 | |
---|
9563 | 9591 | return 0; |
---|
.. | .. |
---|
9869 | 9897 | |
---|
9870 | 9898 | INIT_WORK(flush, flush_backlog); |
---|
9871 | 9899 | |
---|
9872 | | - skb_queue_head_init(&sd->input_pkt_queue); |
---|
9873 | | - skb_queue_head_init(&sd->process_queue); |
---|
| 9900 | + skb_queue_head_init_raw(&sd->input_pkt_queue); |
---|
| 9901 | + skb_queue_head_init_raw(&sd->process_queue); |
---|
| 9902 | + skb_queue_head_init_raw(&sd->tofree_queue); |
---|
9874 | 9903 | #ifdef CONFIG_XFRM_OFFLOAD |
---|
9875 | 9904 | skb_queue_head_init(&sd->xfrm_backlog); |
---|
9876 | 9905 | #endif |
---|