.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (c) 2009, Microsoft Corporation. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify it |
---|
5 | | - * under the terms and conditions of the GNU General Public License, |
---|
6 | | - * version 2, as published by the Free Software Foundation. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
---|
9 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
10 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
11 | | - * more details. |
---|
12 | | - * |
---|
13 | | - * You should have received a copy of the GNU General Public License along with |
---|
14 | | - * this program; if not, see <http://www.gnu.org/licenses/>. |
---|
15 | 4 | * |
---|
16 | 5 | * Authors: |
---|
17 | 6 | * Haiyang Zhang <haiyangz@microsoft.com> |
---|
.. | .. |
---|
133 | 122 | vfree(nvdev->send_buf); |
---|
134 | 123 | kfree(nvdev->send_section_map); |
---|
135 | 124 | |
---|
136 | | - for (i = 0; i < VRSS_CHANNEL_MAX; i++) |
---|
| 125 | + for (i = 0; i < VRSS_CHANNEL_MAX; i++) { |
---|
| 126 | + xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq); |
---|
137 | 127 | vfree(nvdev->chan_table[i].mrc.slots); |
---|
| 128 | + } |
---|
138 | 129 | |
---|
139 | 130 | kfree(nvdev); |
---|
140 | 131 | } |
---|
.. | .. |
---|
397 | 388 | net_device->recv_section_size = resp->sections[0].sub_alloc_size; |
---|
398 | 389 | net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; |
---|
399 | 390 | |
---|
400 | | - /* Setup receive completion ring */ |
---|
401 | | - net_device->recv_completion_cnt |
---|
402 | | - = round_up(net_device->recv_section_cnt + 1, |
---|
403 | | - PAGE_SIZE / sizeof(u64)); |
---|
| 391 | + /* Ensure buffer will not overflow */ |
---|
| 392 | + if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size * |
---|
| 393 | + (u64)net_device->recv_section_cnt > (u64)buf_size) { |
---|
| 394 | + netdev_err(ndev, "invalid recv_section_size %u\n", |
---|
| 395 | + net_device->recv_section_size); |
---|
| 396 | + ret = -EINVAL; |
---|
| 397 | + goto cleanup; |
---|
| 398 | + } |
---|
| 399 | + |
---|
| 400 | + /* Setup receive completion ring. |
---|
| 401 | + * Add 1 to the recv_section_cnt because at least one entry in a |
---|
| 402 | + * ring buffer has to be empty. |
---|
| 403 | + */ |
---|
| 404 | + net_device->recv_completion_cnt = net_device->recv_section_cnt + 1; |
---|
404 | 405 | ret = netvsc_alloc_recv_comp_ring(net_device, 0); |
---|
405 | 406 | if (ret) |
---|
406 | 407 | goto cleanup; |
---|
.. | .. |
---|
468 | 469 | /* Parse the response */ |
---|
469 | 470 | net_device->send_section_size = init_packet->msg. |
---|
470 | 471 | v1_msg.send_send_buf_complete.section_size; |
---|
| 472 | + if (net_device->send_section_size < NETVSC_MTU_MIN) { |
---|
| 473 | + netdev_err(ndev, "invalid send_section_size %u\n", |
---|
| 474 | + net_device->send_section_size); |
---|
| 475 | + ret = -EINVAL; |
---|
| 476 | + goto cleanup; |
---|
| 477 | + } |
---|
471 | 478 | |
---|
472 | 479 | /* Section count is simply the size divided by the section size. */ |
---|
473 | 480 | net_device->send_section_cnt = buf_size / net_device->send_section_size; |
---|
.. | .. |
---|
542 | 549 | /* Teaming bit is needed to receive link speed updates */ |
---|
543 | 550 | init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; |
---|
544 | 551 | } |
---|
| 552 | + |
---|
| 553 | + if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61) |
---|
| 554 | + init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1; |
---|
545 | 555 | |
---|
546 | 556 | trace_nvsp_send(ndev, init_packet); |
---|
547 | 557 | |
---|
.. | .. |
---|
641 | 651 | |
---|
642 | 652 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); |
---|
643 | 653 | |
---|
644 | | - /* And disassociate NAPI context from device */ |
---|
645 | | - for (i = 0; i < net_device->num_chn; i++) |
---|
| 654 | + /* Disable NAPI and disassociate its context from the device. */ |
---|
| 655 | + for (i = 0; i < net_device->num_chn; i++) { |
---|
| 656 | + /* See also vmbus_reset_channel_cb(). */ |
---|
| 657 | + napi_disable(&net_device->chan_table[i].napi); |
---|
646 | 658 | netif_napi_del(&net_device->chan_table[i].napi); |
---|
| 659 | + } |
---|
647 | 660 | |
---|
648 | 661 | /* |
---|
649 | 662 | * At this point, no one should be accessing net_device |
---|
.. | .. |
---|
733 | 746 | int budget) |
---|
734 | 747 | { |
---|
735 | 748 | const struct nvsp_message *nvsp_packet = hv_pkt_data(desc); |
---|
| 749 | + u32 msglen = hv_pkt_datalen(desc); |
---|
| 750 | + |
---|
| 751 | + /* Ensure packet is big enough to read header fields */ |
---|
| 752 | + if (msglen < sizeof(struct nvsp_message_header)) { |
---|
| 753 | + netdev_err(ndev, "nvsp_message length too small: %u\n", msglen); |
---|
| 754 | + return; |
---|
| 755 | + } |
---|
736 | 756 | |
---|
737 | 757 | switch (nvsp_packet->hdr.msg_type) { |
---|
738 | 758 | case NVSP_MSG_TYPE_INIT_COMPLETE: |
---|
| 759 | + if (msglen < sizeof(struct nvsp_message_header) + |
---|
| 760 | + sizeof(struct nvsp_message_init_complete)) { |
---|
| 761 | + netdev_err(ndev, "nvsp_msg length too small: %u\n", |
---|
| 762 | + msglen); |
---|
| 763 | + return; |
---|
| 764 | + } |
---|
| 765 | + fallthrough; |
---|
| 766 | + |
---|
739 | 767 | case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: |
---|
| 768 | + if (msglen < sizeof(struct nvsp_message_header) + |
---|
| 769 | + sizeof(struct nvsp_1_message_send_receive_buffer_complete)) { |
---|
| 770 | + netdev_err(ndev, "nvsp_msg1 length too small: %u\n", |
---|
| 771 | + msglen); |
---|
| 772 | + return; |
---|
| 773 | + } |
---|
| 774 | + fallthrough; |
---|
| 775 | + |
---|
740 | 776 | case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE: |
---|
| 777 | + if (msglen < sizeof(struct nvsp_message_header) + |
---|
| 778 | + sizeof(struct nvsp_1_message_send_send_buffer_complete)) { |
---|
| 779 | + netdev_err(ndev, "nvsp_msg1 length too small: %u\n", |
---|
| 780 | + msglen); |
---|
| 781 | + return; |
---|
| 782 | + } |
---|
| 783 | + fallthrough; |
---|
| 784 | + |
---|
741 | 785 | case NVSP_MSG5_TYPE_SUBCHANNEL: |
---|
| 786 | + if (msglen < sizeof(struct nvsp_message_header) + |
---|
| 787 | + sizeof(struct nvsp_5_subchannel_complete)) { |
---|
| 788 | + netdev_err(ndev, "nvsp_msg5 length too small: %u\n", |
---|
| 789 | + msglen); |
---|
| 790 | + return; |
---|
| 791 | + } |
---|
742 | 792 | /* Copy the response back */ |
---|
743 | 793 | memcpy(&net_device->channel_init_pkt, nvsp_packet, |
---|
744 | 794 | sizeof(struct nvsp_message)); |
---|
.. | .. |
---|
796 | 846 | } |
---|
797 | 847 | |
---|
798 | 848 | for (i = 0; i < page_count; i++) { |
---|
799 | | - char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT); |
---|
| 849 | + char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT); |
---|
800 | 850 | u32 offset = pb[i].offset; |
---|
801 | 851 | u32 len = pb[i].len; |
---|
802 | 852 | |
---|
.. | .. |
---|
908 | 958 | struct hv_netvsc_packet *packet, |
---|
909 | 959 | struct rndis_message *rndis_msg, |
---|
910 | 960 | struct hv_page_buffer *pb, |
---|
911 | | - struct sk_buff *skb) |
---|
| 961 | + struct sk_buff *skb, |
---|
| 962 | + bool xdp_tx) |
---|
912 | 963 | { |
---|
913 | 964 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
---|
914 | 965 | struct netvsc_device *net_device |
---|
.. | .. |
---|
931 | 982 | packet->send_buf_index = NETVSC_INVALID_INDEX; |
---|
932 | 983 | packet->cp_partial = false; |
---|
933 | 984 | |
---|
934 | | - /* Send control message directly without accessing msd (Multi-Send |
---|
935 | | - * Data) field which may be changed during data packet processing. |
---|
| 985 | + /* Send a control message or XDP packet directly without accessing |
---|
| 986 | + * msd (Multi-Send Data) field which may be changed during data packet |
---|
| 987 | + * processing. |
---|
936 | 988 | */ |
---|
937 | | - if (!skb) |
---|
| 989 | + if (!skb || xdp_tx) |
---|
938 | 990 | return netvsc_send_pkt(device, packet, net_device, pb, skb); |
---|
939 | 991 | |
---|
940 | 992 | /* batch packets in send buffer if possible */ |
---|
.. | .. |
---|
966 | 1018 | /* Keep aggregating only if stack says more data is coming |
---|
967 | 1019 | * and not doing mixed modes send and not flow blocked |
---|
968 | 1020 | */ |
---|
969 | | - xmit_more = skb->xmit_more && |
---|
| 1021 | + xmit_more = netdev_xmit_more() && |
---|
970 | 1022 | !packet->cp_partial && |
---|
971 | 1023 | !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); |
---|
972 | 1024 | |
---|
.. | .. |
---|
1116 | 1168 | |
---|
1117 | 1169 | static int netvsc_receive(struct net_device *ndev, |
---|
1118 | 1170 | struct netvsc_device *net_device, |
---|
1119 | | - struct vmbus_channel *channel, |
---|
1120 | | - const struct vmpacket_descriptor *desc, |
---|
1121 | | - const struct nvsp_message *nvsp) |
---|
| 1171 | + struct netvsc_channel *nvchan, |
---|
| 1172 | + const struct vmpacket_descriptor *desc) |
---|
1122 | 1173 | { |
---|
1123 | 1174 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
---|
| 1175 | + struct vmbus_channel *channel = nvchan->channel; |
---|
1124 | 1176 | const struct vmtransfer_page_packet_header *vmxferpage_packet |
---|
1125 | 1177 | = container_of(desc, const struct vmtransfer_page_packet_header, d); |
---|
| 1178 | + const struct nvsp_message *nvsp = hv_pkt_data(desc); |
---|
| 1179 | + u32 msglen = hv_pkt_datalen(desc); |
---|
1126 | 1180 | u16 q_idx = channel->offermsg.offer.sub_channel_index; |
---|
1127 | 1181 | char *recv_buf = net_device->recv_buf; |
---|
1128 | 1182 | u32 status = NVSP_STAT_SUCCESS; |
---|
1129 | 1183 | int i; |
---|
1130 | 1184 | int count = 0; |
---|
1131 | 1185 | |
---|
| 1186 | + /* Ensure packet is big enough to read header fields */ |
---|
| 1187 | + if (msglen < sizeof(struct nvsp_message_header)) { |
---|
| 1188 | + netif_err(net_device_ctx, rx_err, ndev, |
---|
| 1189 | + "invalid nvsp header, length too small: %u\n", |
---|
| 1190 | + msglen); |
---|
| 1191 | + return 0; |
---|
| 1192 | + } |
---|
| 1193 | + |
---|
1132 | 1194 | /* Make sure this is a valid nvsp packet */ |
---|
1133 | 1195 | if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { |
---|
1134 | 1196 | netif_err(net_device_ctx, rx_err, ndev, |
---|
1135 | 1197 | "Unknown nvsp packet type received %u\n", |
---|
1136 | 1198 | nvsp->hdr.msg_type); |
---|
| 1199 | + return 0; |
---|
| 1200 | + } |
---|
| 1201 | + |
---|
| 1202 | + /* Validate xfer page pkt header */ |
---|
| 1203 | + if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) { |
---|
| 1204 | + netif_err(net_device_ctx, rx_err, ndev, |
---|
| 1205 | + "Invalid xfer page pkt, offset too small: %u\n", |
---|
| 1206 | + desc->offset8 << 3); |
---|
1137 | 1207 | return 0; |
---|
1138 | 1208 | } |
---|
1139 | 1209 | |
---|
.. | .. |
---|
1147 | 1217 | |
---|
1148 | 1218 | count = vmxferpage_packet->range_cnt; |
---|
1149 | 1219 | |
---|
| 1220 | + /* Check count for a valid value */ |
---|
| 1221 | + if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) { |
---|
| 1222 | + netif_err(net_device_ctx, rx_err, ndev, |
---|
| 1223 | + "Range count is not valid: %d\n", |
---|
| 1224 | + count); |
---|
| 1225 | + return 0; |
---|
| 1226 | + } |
---|
| 1227 | + |
---|
1150 | 1228 | /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ |
---|
1151 | 1229 | for (i = 0; i < count; i++) { |
---|
1152 | 1230 | u32 offset = vmxferpage_packet->ranges[i].byte_offset; |
---|
.. | .. |
---|
1154 | 1232 | void *data; |
---|
1155 | 1233 | int ret; |
---|
1156 | 1234 | |
---|
1157 | | - if (unlikely(offset + buflen > net_device->recv_buf_size)) { |
---|
| 1235 | + if (unlikely(offset > net_device->recv_buf_size || |
---|
| 1236 | + buflen > net_device->recv_buf_size - offset)) { |
---|
| 1237 | + nvchan->rsc.cnt = 0; |
---|
1158 | 1238 | status = NVSP_STAT_FAIL; |
---|
1159 | 1239 | netif_err(net_device_ctx, rx_err, ndev, |
---|
1160 | 1240 | "Packet offset:%u + len:%u too big\n", |
---|
.. | .. |
---|
1165 | 1245 | |
---|
1166 | 1246 | data = recv_buf + offset; |
---|
1167 | 1247 | |
---|
| 1248 | + nvchan->rsc.is_last = (i == count - 1); |
---|
| 1249 | + |
---|
1168 | 1250 | trace_rndis_recv(ndev, q_idx, data); |
---|
1169 | 1251 | |
---|
1170 | 1252 | /* Pass it to the upper layer */ |
---|
1171 | 1253 | ret = rndis_filter_receive(ndev, net_device, |
---|
1172 | | - channel, data, buflen); |
---|
| 1254 | + nvchan, data, buflen); |
---|
1173 | 1255 | |
---|
1174 | | - if (unlikely(ret != NVSP_STAT_SUCCESS)) |
---|
| 1256 | + if (unlikely(ret != NVSP_STAT_SUCCESS)) { |
---|
| 1257 | + /* Drop incomplete packet */ |
---|
| 1258 | + nvchan->rsc.cnt = 0; |
---|
1175 | 1259 | status = NVSP_STAT_FAIL; |
---|
| 1260 | + } |
---|
1176 | 1261 | } |
---|
1177 | 1262 | |
---|
1178 | 1263 | enq_receive_complete(ndev, net_device, q_idx, |
---|
.. | .. |
---|
1189 | 1274 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
---|
1190 | 1275 | u32 count, offset, *tab; |
---|
1191 | 1276 | int i; |
---|
| 1277 | + |
---|
| 1278 | + /* Ensure packet is big enough to read send_table fields */ |
---|
| 1279 | + if (msglen < sizeof(struct nvsp_message_header) + |
---|
| 1280 | + sizeof(struct nvsp_5_send_indirect_table)) { |
---|
| 1281 | + netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen); |
---|
| 1282 | + return; |
---|
| 1283 | + } |
---|
1192 | 1284 | |
---|
1193 | 1285 | count = nvmsg->msg.v5_msg.send_table.count; |
---|
1194 | 1286 | offset = nvmsg->msg.v5_msg.send_table.offset; |
---|
.. | .. |
---|
1221 | 1313 | } |
---|
1222 | 1314 | |
---|
1223 | 1315 | static void netvsc_send_vf(struct net_device *ndev, |
---|
1224 | | - const struct nvsp_message *nvmsg) |
---|
| 1316 | + const struct nvsp_message *nvmsg, |
---|
| 1317 | + u32 msglen) |
---|
1225 | 1318 | { |
---|
1226 | 1319 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
---|
1227 | 1320 | |
---|
| 1321 | + /* Ensure packet is big enough to read its fields */ |
---|
| 1322 | + if (msglen < sizeof(struct nvsp_message_header) + |
---|
| 1323 | + sizeof(struct nvsp_4_send_vf_association)) { |
---|
| 1324 | + netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen); |
---|
| 1325 | + return; |
---|
| 1326 | + } |
---|
| 1327 | + |
---|
1228 | 1328 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; |
---|
1229 | 1329 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; |
---|
| 1330 | + |
---|
| 1331 | + if (net_device_ctx->vf_alloc) |
---|
| 1332 | + complete(&net_device_ctx->vf_add); |
---|
| 1333 | + |
---|
1230 | 1334 | netdev_info(ndev, "VF slot %u %s\n", |
---|
1231 | 1335 | net_device_ctx->vf_serial, |
---|
1232 | 1336 | net_device_ctx->vf_alloc ? "added" : "removed"); |
---|
.. | .. |
---|
1234 | 1338 | |
---|
1235 | 1339 | static void netvsc_receive_inband(struct net_device *ndev, |
---|
1236 | 1340 | struct netvsc_device *nvscdev, |
---|
1237 | | - const struct nvsp_message *nvmsg, |
---|
1238 | | - u32 msglen) |
---|
| 1341 | + const struct vmpacket_descriptor *desc) |
---|
1239 | 1342 | { |
---|
| 1343 | + const struct nvsp_message *nvmsg = hv_pkt_data(desc); |
---|
| 1344 | + u32 msglen = hv_pkt_datalen(desc); |
---|
| 1345 | + |
---|
| 1346 | + /* Ensure packet is big enough to read header fields */ |
---|
| 1347 | + if (msglen < sizeof(struct nvsp_message_header)) { |
---|
| 1348 | + netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen); |
---|
| 1349 | + return; |
---|
| 1350 | + } |
---|
| 1351 | + |
---|
1240 | 1352 | switch (nvmsg->hdr.msg_type) { |
---|
1241 | 1353 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: |
---|
1242 | 1354 | netvsc_send_table(ndev, nvscdev, nvmsg, msglen); |
---|
1243 | 1355 | break; |
---|
1244 | 1356 | |
---|
1245 | 1357 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: |
---|
1246 | | - netvsc_send_vf(ndev, nvmsg); |
---|
| 1358 | + netvsc_send_vf(ndev, nvmsg, msglen); |
---|
1247 | 1359 | break; |
---|
1248 | 1360 | } |
---|
1249 | 1361 | } |
---|
1250 | 1362 | |
---|
1251 | 1363 | static int netvsc_process_raw_pkt(struct hv_device *device, |
---|
1252 | | - struct vmbus_channel *channel, |
---|
| 1364 | + struct netvsc_channel *nvchan, |
---|
1253 | 1365 | struct netvsc_device *net_device, |
---|
1254 | 1366 | struct net_device *ndev, |
---|
1255 | 1367 | const struct vmpacket_descriptor *desc, |
---|
1256 | 1368 | int budget) |
---|
1257 | 1369 | { |
---|
| 1370 | + struct vmbus_channel *channel = nvchan->channel; |
---|
1258 | 1371 | const struct nvsp_message *nvmsg = hv_pkt_data(desc); |
---|
1259 | | - u32 msglen = hv_pkt_datalen(desc); |
---|
1260 | 1372 | |
---|
1261 | 1373 | trace_nvsp_recv(ndev, channel, nvmsg); |
---|
1262 | 1374 | |
---|
1263 | 1375 | switch (desc->type) { |
---|
1264 | 1376 | case VM_PKT_COMP: |
---|
1265 | | - netvsc_send_completion(ndev, net_device, channel, |
---|
1266 | | - desc, budget); |
---|
| 1377 | + netvsc_send_completion(ndev, net_device, channel, desc, budget); |
---|
1267 | 1378 | break; |
---|
1268 | 1379 | |
---|
1269 | 1380 | case VM_PKT_DATA_USING_XFER_PAGES: |
---|
1270 | | - return netvsc_receive(ndev, net_device, channel, |
---|
1271 | | - desc, nvmsg); |
---|
| 1381 | + return netvsc_receive(ndev, net_device, nvchan, desc); |
---|
1272 | 1382 | break; |
---|
1273 | 1383 | |
---|
1274 | 1384 | case VM_PKT_DATA_INBAND: |
---|
1275 | | - netvsc_receive_inband(ndev, net_device, nvmsg, msglen); |
---|
| 1385 | + netvsc_receive_inband(ndev, net_device, desc); |
---|
1276 | 1386 | break; |
---|
1277 | 1387 | |
---|
1278 | 1388 | default: |
---|
.. | .. |
---|
1311 | 1421 | nvchan->desc = hv_pkt_iter_first(channel); |
---|
1312 | 1422 | |
---|
1313 | 1423 | while (nvchan->desc && work_done < budget) { |
---|
1314 | | - work_done += netvsc_process_raw_pkt(device, channel, net_device, |
---|
| 1424 | + work_done += netvsc_process_raw_pkt(device, nvchan, net_device, |
---|
1315 | 1425 | ndev, nvchan->desc, budget); |
---|
1316 | 1426 | nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); |
---|
1317 | 1427 | } |
---|
.. | .. |
---|
1350 | 1460 | prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); |
---|
1351 | 1461 | |
---|
1352 | 1462 | if (napi_schedule_prep(&nvchan->napi)) { |
---|
1353 | | - /* disable interupts from host */ |
---|
| 1463 | + /* disable interrupts from host */ |
---|
1354 | 1464 | hv_begin_read(rbi); |
---|
1355 | 1465 | |
---|
1356 | 1466 | __napi_schedule_irqoff(&nvchan->napi); |
---|
.. | .. |
---|
1395 | 1505 | nvchan->net_device = net_device; |
---|
1396 | 1506 | u64_stats_init(&nvchan->tx_stats.syncp); |
---|
1397 | 1507 | u64_stats_init(&nvchan->rx_stats.syncp); |
---|
| 1508 | + |
---|
| 1509 | + ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i); |
---|
| 1510 | + |
---|
| 1511 | + if (ret) { |
---|
| 1512 | + netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret); |
---|
| 1513 | + goto cleanup2; |
---|
| 1514 | + } |
---|
| 1515 | + |
---|
| 1516 | + ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq, |
---|
| 1517 | + MEM_TYPE_PAGE_SHARED, NULL); |
---|
| 1518 | + |
---|
| 1519 | + if (ret) { |
---|
| 1520 | + netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret); |
---|
| 1521 | + goto cleanup2; |
---|
| 1522 | + } |
---|
1398 | 1523 | } |
---|
1399 | 1524 | |
---|
1400 | 1525 | /* Enable NAPI handler before init callbacks */ |
---|
.. | .. |
---|
1440 | 1565 | |
---|
1441 | 1566 | cleanup: |
---|
1442 | 1567 | netif_napi_del(&net_device->chan_table[0].napi); |
---|
| 1568 | + |
---|
| 1569 | +cleanup2: |
---|
1443 | 1570 | free_netvsc_device(&net_device->rcu); |
---|
1444 | 1571 | |
---|
1445 | 1572 | return ERR_PTR(ret); |
---|