hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/core/rtnetlink.c
....@@ -929,24 +929,27 @@
929929 nla_total_size(sizeof(struct ifla_vf_rate)) +
930930 nla_total_size(sizeof(struct ifla_vf_link_state)) +
931931 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
932
- nla_total_size(0) + /* nest IFLA_VF_STATS */
933
- /* IFLA_VF_STATS_RX_PACKETS */
934
- nla_total_size_64bit(sizeof(__u64)) +
935
- /* IFLA_VF_STATS_TX_PACKETS */
936
- nla_total_size_64bit(sizeof(__u64)) +
937
- /* IFLA_VF_STATS_RX_BYTES */
938
- nla_total_size_64bit(sizeof(__u64)) +
939
- /* IFLA_VF_STATS_TX_BYTES */
940
- nla_total_size_64bit(sizeof(__u64)) +
941
- /* IFLA_VF_STATS_BROADCAST */
942
- nla_total_size_64bit(sizeof(__u64)) +
943
- /* IFLA_VF_STATS_MULTICAST */
944
- nla_total_size_64bit(sizeof(__u64)) +
945
- /* IFLA_VF_STATS_RX_DROPPED */
946
- nla_total_size_64bit(sizeof(__u64)) +
947
- /* IFLA_VF_STATS_TX_DROPPED */
948
- nla_total_size_64bit(sizeof(__u64)) +
949932 nla_total_size(sizeof(struct ifla_vf_trust)));
933
+ if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
934
+ size += num_vfs *
935
+ (nla_total_size(0) + /* nest IFLA_VF_STATS */
936
+ /* IFLA_VF_STATS_RX_PACKETS */
937
+ nla_total_size_64bit(sizeof(__u64)) +
938
+ /* IFLA_VF_STATS_TX_PACKETS */
939
+ nla_total_size_64bit(sizeof(__u64)) +
940
+ /* IFLA_VF_STATS_RX_BYTES */
941
+ nla_total_size_64bit(sizeof(__u64)) +
942
+ /* IFLA_VF_STATS_TX_BYTES */
943
+ nla_total_size_64bit(sizeof(__u64)) +
944
+ /* IFLA_VF_STATS_BROADCAST */
945
+ nla_total_size_64bit(sizeof(__u64)) +
946
+ /* IFLA_VF_STATS_MULTICAST */
947
+ nla_total_size_64bit(sizeof(__u64)) +
948
+ /* IFLA_VF_STATS_RX_DROPPED */
949
+ nla_total_size_64bit(sizeof(__u64)) +
950
+ /* IFLA_VF_STATS_TX_DROPPED */
951
+ nla_total_size_64bit(sizeof(__u64)));
952
+ }
950953 return size;
951954 } else
952955 return 0;
....@@ -1221,7 +1224,8 @@
12211224 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
12221225 struct net_device *dev,
12231226 int vfs_num,
1224
- struct nlattr *vfinfo)
1227
+ struct nlattr *vfinfo,
1228
+ u32 ext_filter_mask)
12251229 {
12261230 struct ifla_vf_rss_query_en vf_rss_query_en;
12271231 struct nlattr *vf, *vfstats, *vfvlanlist;
....@@ -1327,33 +1331,35 @@
13271331 goto nla_put_vf_failure;
13281332 }
13291333 nla_nest_end(skb, vfvlanlist);
1330
- memset(&vf_stats, 0, sizeof(vf_stats));
1331
- if (dev->netdev_ops->ndo_get_vf_stats)
1332
- dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1333
- &vf_stats);
1334
- vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1335
- if (!vfstats)
1336
- goto nla_put_vf_failure;
1337
- if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1338
- vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1339
- nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1340
- vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1341
- nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1342
- vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1343
- nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1344
- vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1345
- nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1346
- vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1347
- nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1348
- vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1349
- nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1350
- vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1351
- nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1352
- vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1353
- nla_nest_cancel(skb, vfstats);
1354
- goto nla_put_vf_failure;
1334
+ if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1335
+ memset(&vf_stats, 0, sizeof(vf_stats));
1336
+ if (dev->netdev_ops->ndo_get_vf_stats)
1337
+ dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1338
+ &vf_stats);
1339
+ vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1340
+ if (!vfstats)
1341
+ goto nla_put_vf_failure;
1342
+ if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1343
+ vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1344
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1345
+ vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1346
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1347
+ vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1348
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1349
+ vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1350
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1351
+ vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1352
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1353
+ vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1354
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1355
+ vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1356
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1357
+ vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1358
+ nla_nest_cancel(skb, vfstats);
1359
+ goto nla_put_vf_failure;
1360
+ }
1361
+ nla_nest_end(skb, vfstats);
13551362 }
1356
- nla_nest_end(skb, vfstats);
13571363 nla_nest_end(skb, vf);
13581364 return 0;
13591365
....@@ -1386,7 +1392,7 @@
13861392 return -EMSGSIZE;
13871393
13881394 for (i = 0; i < num_vfs; i++) {
1389
- if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1395
+ if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask))
13901396 return -EMSGSIZE;
13911397 }
13921398
....@@ -2155,13 +2161,27 @@
21552161 return err;
21562162 }
21572163
2158
-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
2159
- struct netlink_ext_ack *exterr)
2164
+int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
2165
+ struct netlink_ext_ack *exterr)
21602166 {
2161
- return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
2167
+ const struct ifinfomsg *ifmp;
2168
+ const struct nlattr *attrs;
2169
+ size_t len;
2170
+
2171
+ ifmp = nla_data(nla_peer);
2172
+ attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
2173
+ len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
2174
+
2175
+ if (ifmp->ifi_index < 0) {
2176
+ NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
2177
+ "ifindex can't be negative");
2178
+ return -EINVAL;
2179
+ }
2180
+
2181
+ return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
21622182 exterr);
21632183 }
2164
-EXPORT_SYMBOL(rtnl_nla_parse_ifla);
2184
+EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
21652185
21662186 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
21672187 {
....@@ -3252,6 +3272,7 @@
32523272 struct ifinfomsg *ifm;
32533273 char ifname[IFNAMSIZ];
32543274 struct nlattr **data;
3275
+ bool link_specified;
32553276 int err;
32563277
32573278 #ifdef CONFIG_MODULES
....@@ -3272,12 +3293,19 @@
32723293 ifname[0] = '\0';
32733294
32743295 ifm = nlmsg_data(nlh);
3275
- if (ifm->ifi_index > 0)
3296
+ if (ifm->ifi_index > 0) {
3297
+ link_specified = true;
32763298 dev = __dev_get_by_index(net, ifm->ifi_index);
3277
- else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3299
+ } else if (ifm->ifi_index < 0) {
3300
+ NL_SET_ERR_MSG(extack, "ifindex can't be negative");
3301
+ return -EINVAL;
3302
+ } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3303
+ link_specified = true;
32783304 dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
3279
- else
3305
+ } else {
3306
+ link_specified = false;
32803307 dev = NULL;
3308
+ }
32813309
32823310 master_dev = NULL;
32833311 m_ops = NULL;
....@@ -3380,7 +3408,12 @@
33803408 }
33813409
33823410 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3383
- if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
3411
+ /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3412
+ * or it's for a group
3413
+ */
3414
+ if (link_specified)
3415
+ return -ENODEV;
3416
+ if (tb[IFLA_GROUP])
33843417 return rtnl_group_changelink(skb, net,
33853418 nla_get_u32(tb[IFLA_GROUP]),
33863419 ifm, extack, tb);
....@@ -3883,7 +3916,7 @@
38833916 ndm->ndm_ifindex = dev->ifindex;
38843917 ndm->ndm_state = ndm_state;
38853918
3886
- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
3919
+ if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
38873920 goto nla_put_failure;
38883921 if (vid)
38893922 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
....@@ -3897,10 +3930,10 @@
38973930 return -EMSGSIZE;
38983931 }
38993932
3900
-static inline size_t rtnl_fdb_nlmsg_size(void)
3933
+static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
39013934 {
39023935 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
3903
- nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
3936
+ nla_total_size(dev->addr_len) + /* NDA_LLADDR */
39043937 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
39053938 0;
39063939 }
....@@ -3912,7 +3945,7 @@
39123945 struct sk_buff *skb;
39133946 int err = -ENOBUFS;
39143947
3915
- skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
3948
+ skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
39163949 if (!skb)
39173950 goto errout;
39183951
....@@ -4891,13 +4924,17 @@
48914924 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
48924925 if (br_spec) {
48934926 nla_for_each_nested(attr, br_spec, rem) {
4894
- if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
4927
+ if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
48954928 if (nla_len(attr) < sizeof(flags))
48964929 return -EINVAL;
48974930
48984931 have_flags = true;
48994932 flags = nla_get_u16(attr);
4900
- break;
4933
+ }
4934
+
4935
+ if (nla_type(attr) == IFLA_BRIDGE_MODE) {
4936
+ if (nla_len(attr) < sizeof(u16))
4937
+ return -EINVAL;
49014938 }
49024939 }
49034940 }