.. | .. |
---|
28 | 28 | static LIST_HEAD(mirred_list); |
---|
29 | 29 | static DEFINE_SPINLOCK(mirred_list_lock); |
---|
30 | 30 | |
---|
31 | | -#define MIRRED_RECURSION_LIMIT 4 |
---|
32 | | -static DEFINE_PER_CPU(unsigned int, mirred_rec_level); |
---|
| 31 | +#define MIRRED_NEST_LIMIT 4 |
---|
| 32 | +static DEFINE_PER_CPU(unsigned int, mirred_nest_level); |
---|
33 | 33 | |
---|
34 | 34 | static bool tcf_mirred_is_act_redirect(int action) |
---|
35 | 35 | { |
---|
.. | .. |
---|
206 | 206 | return err; |
---|
207 | 207 | } |
---|
208 | 208 | |
---|
| 209 | +static bool is_mirred_nested(void) |
---|
| 210 | +{ |
---|
| 211 | + return unlikely(__this_cpu_read(mirred_nest_level) > 1); |
---|
| 212 | +} |
---|
| 213 | + |
---|
| 214 | +static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb) |
---|
| 215 | +{ |
---|
| 216 | + int err; |
---|
| 217 | + |
---|
| 218 | + if (!want_ingress) |
---|
| 219 | + err = dev_queue_xmit(skb); |
---|
| 220 | + else if (is_mirred_nested()) |
---|
| 221 | + err = netif_rx(skb); |
---|
| 222 | + else |
---|
| 223 | + err = netif_receive_skb(skb); |
---|
| 224 | + |
---|
| 225 | + return err; |
---|
| 226 | +} |
---|
| 227 | + |
---|
209 | 228 | static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, |
---|
210 | 229 | struct tcf_result *res) |
---|
211 | 230 | { |
---|
.. | .. |
---|
213 | 232 | struct sk_buff *skb2 = skb; |
---|
214 | 233 | bool m_mac_header_xmit; |
---|
215 | 234 | struct net_device *dev; |
---|
216 | | - unsigned int rec_level; |
---|
| 235 | + unsigned int nest_level; |
---|
217 | 236 | int retval, err = 0; |
---|
218 | 237 | bool use_reinsert; |
---|
219 | 238 | bool want_ingress; |
---|
.. | .. |
---|
224 | 243 | int mac_len; |
---|
225 | 244 | bool at_nh; |
---|
226 | 245 | |
---|
227 | | - rec_level = __this_cpu_inc_return(mirred_rec_level); |
---|
228 | | - if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) { |
---|
| 246 | + nest_level = __this_cpu_inc_return(mirred_nest_level); |
---|
| 247 | + if (unlikely(nest_level > MIRRED_NEST_LIMIT)) { |
---|
229 | 248 | net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n", |
---|
230 | 249 | netdev_name(skb->dev)); |
---|
231 | | - __this_cpu_dec(mirred_rec_level); |
---|
| 250 | + __this_cpu_dec(mirred_nest_level); |
---|
232 | 251 | return TC_ACT_SHOT; |
---|
233 | 252 | } |
---|
234 | 253 | |
---|
.. | .. |
---|
244 | 263 | goto out; |
---|
245 | 264 | } |
---|
246 | 265 | |
---|
247 | | - if (unlikely(!(dev->flags & IFF_UP))) { |
---|
| 266 | + if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) { |
---|
248 | 267 | net_notice_ratelimited("tc mirred to Houston: device %s is down\n", |
---|
249 | 268 | dev->name); |
---|
250 | 269 | goto out; |
---|
.. | .. |
---|
295 | 314 | /* let's the caller reinsert the packet, if possible */ |
---|
296 | 315 | if (use_reinsert) { |
---|
297 | 316 | res->ingress = want_ingress; |
---|
298 | | - if (skb_tc_reinsert(skb, res)) |
---|
| 317 | + err = tcf_mirred_forward(res->ingress, skb); |
---|
| 318 | + if (err) |
---|
299 | 319 | tcf_action_inc_overlimit_qstats(&m->common); |
---|
300 | | - __this_cpu_dec(mirred_rec_level); |
---|
| 320 | + __this_cpu_dec(mirred_nest_level); |
---|
301 | 321 | return TC_ACT_CONSUMED; |
---|
302 | 322 | } |
---|
303 | 323 | } |
---|
304 | 324 | |
---|
305 | | - if (!want_ingress) |
---|
306 | | - err = dev_queue_xmit(skb2); |
---|
307 | | - else |
---|
308 | | - err = netif_receive_skb(skb2); |
---|
309 | | - |
---|
| 325 | + err = tcf_mirred_forward(want_ingress, skb2); |
---|
310 | 326 | if (err) { |
---|
311 | 327 | out: |
---|
312 | 328 | tcf_action_inc_overlimit_qstats(&m->common); |
---|
313 | 329 | if (tcf_mirred_is_act_redirect(m_eaction)) |
---|
314 | 330 | retval = TC_ACT_SHOT; |
---|
315 | 331 | } |
---|
316 | | - __this_cpu_dec(mirred_rec_level); |
---|
| 332 | + __this_cpu_dec(mirred_nest_level); |
---|
317 | 333 | |
---|
318 | 334 | return retval; |
---|
319 | 335 | } |
---|