.. | .. |
---|
576 | 576 | .ops = &noop_qdisc_ops, |
---|
577 | 577 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), |
---|
578 | 578 | .dev_queue = &noop_netdev_queue, |
---|
| 579 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 580 | + .running = __SEQLOCK_UNLOCKED(noop_qdisc.running), |
---|
| 581 | +#else |
---|
579 | 582 | .running = SEQCNT_ZERO(noop_qdisc.running), |
---|
| 583 | +#endif |
---|
580 | 584 | .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), |
---|
581 | 585 | .gso_skb = { |
---|
582 | 586 | .next = (struct sk_buff *)&noop_qdisc.gso_skb, |
---|
.. | .. |
---|
877 | 881 | lockdep_set_class(&sch->busylock, |
---|
878 | 882 | dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); |
---|
879 | 883 | |
---|
| 884 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 885 | + seqlock_init(&sch->running); |
---|
| 886 | + lockdep_set_class(&sch->running.seqcount, |
---|
| 887 | + dev->qdisc_running_key ?: &qdisc_running_key); |
---|
| 888 | + lockdep_set_class(&sch->running.lock, |
---|
| 889 | + dev->qdisc_running_key ?: &qdisc_running_key); |
---|
| 890 | +#else |
---|
880 | 891 | seqcount_init(&sch->running); |
---|
881 | 892 | lockdep_set_class(&sch->running, |
---|
882 | 893 | dev->qdisc_running_key ?: &qdisc_running_key); |
---|
| 894 | +#endif |
---|
883 | 895 | |
---|
884 | 896 | sch->ops = ops; |
---|
885 | 897 | sch->flags = ops->static_flags; |
---|
.. | .. |
---|
1253 | 1265 | /* Wait for outstanding qdisc_run calls. */ |
---|
1254 | 1266 | list_for_each_entry(dev, head, close_list) { |
---|
1255 | 1267 | while (some_qdisc_is_busy(dev)) |
---|
1256 | | - yield(); |
---|
| 1268 | + msleep(1); |
---|
1257 | 1269 | /* The new qdisc is assigned at this point so we can safely |
---|
1258 | 1270 | * unwind stale skb lists and qdisc statistics |
---|
1259 | 1271 | */ |
---|