old mode 100644new mode 100755.. | .. |
---|
369 | 369 | rx_skb->dev = rwnx_vif->ndev; |
---|
370 | 370 | skb_reset_mac_header(rx_skb); |
---|
371 | 371 | |
---|
| 372 | + /* Update statistics */ |
---|
| 373 | + rwnx_vif->net_stats.rx_packets++; |
---|
| 374 | + rwnx_vif->net_stats.rx_bytes += rx_skb->len; |
---|
| 375 | + |
---|
372 | 376 | //printk("forward\n"); |
---|
373 | 377 | |
---|
374 | 378 | rx_skb->protocol = eth_type_trans(rx_skb, rwnx_vif->ndev); |
---|
375 | 379 | memset(rx_skb->cb, 0, sizeof(rx_skb->cb)); |
---|
376 | | - REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX); |
---|
377 | 380 | #if 0 //modify by aic |
---|
378 | 381 | netif_receive_skb(rx_skb); |
---|
379 | 382 | #else |
---|
.. | .. |
---|
395 | 398 | #endif |
---|
396 | 399 | } |
---|
397 | 400 | #endif |
---|
398 | | - REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX); |
---|
399 | 401 | |
---|
400 | | - /* Update statistics */ |
---|
401 | | - rwnx_vif->net_stats.rx_packets++; |
---|
402 | | - rwnx_vif->net_stats.rx_bytes += rx_skb->len; |
---|
403 | 402 | rwnx_hw->stats.last_rx = jiffies; |
---|
404 | 403 | } |
---|
405 | 404 | |
---|
.. | .. |
---|
505 | 504 | |
---|
506 | 505 | /* forward pkt to upper layer */ |
---|
507 | 506 | if (forward) { |
---|
| 507 | + /* Update statistics */ |
---|
| 508 | + rwnx_vif->net_stats.rx_packets++; |
---|
| 509 | + rwnx_vif->net_stats.rx_bytes += rx_skb->len; |
---|
| 510 | + |
---|
508 | 511 | rx_skb->protocol = eth_type_trans(rx_skb, rwnx_vif->ndev); |
---|
509 | 512 | #ifdef AICWF_ARP_OFFLOAD |
---|
510 | 513 | if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION || RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT) |
---|
511 | 514 | arpoffload_proc(rx_skb, rwnx_vif); |
---|
512 | 515 | #endif |
---|
513 | 516 | memset(rx_skb->cb, 0, sizeof(rx_skb->cb)); |
---|
514 | | - REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX); |
---|
515 | 517 | #if 0 //modify by aic |
---|
516 | 518 | netif_receive_skb(rx_skb); |
---|
517 | 519 | #else |
---|
.. | .. |
---|
533 | 535 | #endif |
---|
534 | 536 | } |
---|
535 | 537 | #endif |
---|
536 | | - REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX); |
---|
537 | 538 | |
---|
538 | | - /* Update statistics */ |
---|
539 | | - rwnx_vif->net_stats.rx_packets++; |
---|
540 | | - rwnx_vif->net_stats.rx_bytes += rx_skb->len; |
---|
541 | 539 | rwnx_hw->stats.last_rx = jiffies; |
---|
542 | 540 | } |
---|
543 | 541 | } |
---|
.. | .. |
---|
562 | 560 | struct rx_vector_1 *rxvect = &hw_rxhdr->hwvect.rx_vect1; |
---|
563 | 561 | |
---|
564 | 562 | //printk("rwnx_rx_mgmt\n"); |
---|
| 563 | + if (ieee80211_is_mgmt(mgmt->frame_control) && |
---|
| 564 | + (skb->len <= 24 || skb->len > 768)) { |
---|
| 565 | + printk("mgmt err\n"); |
---|
| 566 | + return; |
---|
| 567 | + } |
---|
565 | 568 | if (ieee80211_is_beacon(mgmt->frame_control)) { |
---|
566 | 569 | if ((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MESH_POINT) && |
---|
567 | 570 | hw_rxhdr->flags_new_peer) { |
---|
.. | .. |
---|
623 | 626 | { |
---|
624 | 627 | struct rwnx_vif *rwnx_vif; |
---|
625 | 628 | int vif_idx = hw_rxhdr->flags_vif_idx; |
---|
626 | | - |
---|
| 629 | +#ifdef CREATE_TRACE_POINTS |
---|
627 | 630 | trace_mgmt_rx(hw_rxhdr->phy_info.phy_prim20_freq, vif_idx, |
---|
628 | 631 | hw_rxhdr->flags_sta_idx, (struct ieee80211_mgmt *)skb->data); |
---|
629 | | - |
---|
| 632 | +#endif |
---|
630 | 633 | if (vif_idx == RWNX_INVALID_VIF) { |
---|
631 | 634 | list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) { |
---|
632 | 635 | if (!rwnx_vif->up) |
---|
.. | .. |
---|
1077 | 1080 | return 0; |
---|
1078 | 1081 | } |
---|
1079 | 1082 | |
---|
1080 | | -/** |
---|
1081 | | - * rwnx_unsup_rx_vec_ind() - IRQ handler callback for %IPC_IRQ_E2A_UNSUP_RX_VEC |
---|
1082 | | - * |
---|
1083 | | - * LMAC has triggered an IT saying that a rx vector of an unsupported frame has been |
---|
1084 | | - * captured and sent to upper layer. Then we need to fill the rx status, create a vendor |
---|
1085 | | - * specific header and fill it with the HT packet length. Finally, we need to specify at |
---|
1086 | | - * least 2 bytes of data and send the sk_buff to mac80211. |
---|
1087 | | - * |
---|
1088 | | - * @pthis: Pointer to main driver data |
---|
1089 | | - * @hostid: Pointer to IPC elem from e2aradars_pool |
---|
1090 | | - */ |
---|
1091 | | -u8 rwnx_unsup_rx_vec_ind(void *pthis, void *hostid) |
---|
1092 | | -{ |
---|
1093 | | - struct rwnx_hw *rwnx_hw = pthis; |
---|
1094 | | - struct rwnx_ipc_skb_elem *elem = hostid; |
---|
1095 | | - struct rx_vector_desc *rx_desc; |
---|
1096 | | - struct sk_buff *skb; |
---|
1097 | | - struct rx_vector_1 *rx_vect1; |
---|
1098 | | - struct phy_channel_info_desc *phy_info; |
---|
1099 | | - struct vendor_radiotap_hdr *rtap; |
---|
1100 | | - u16 ht_length; |
---|
1101 | | - struct rwnx_vif *rwnx_vif; |
---|
1102 | | - struct rx_vector_desc rx_vect_desc; |
---|
1103 | | - u8 rtap_len, vend_rtap_len = sizeof(*rtap); |
---|
1104 | | - |
---|
1105 | | - dma_sync_single_for_cpu(rwnx_hw->dev, elem->dma_addr, |
---|
1106 | | - sizeof(struct rx_vector_desc), DMA_FROM_DEVICE); |
---|
1107 | | - |
---|
1108 | | - skb = elem->skb; |
---|
1109 | | - if (((struct rx_vector_desc *) (skb->data))->pattern == 0) { |
---|
1110 | | - /*sync is needed even if the driver did not modify the memory*/ |
---|
1111 | | - dma_sync_single_for_device(rwnx_hw->dev, elem->dma_addr, |
---|
1112 | | - sizeof(struct rx_vector_desc), DMA_FROM_DEVICE); |
---|
1113 | | - return -1; |
---|
1114 | | - } |
---|
1115 | | - |
---|
1116 | | - if (rwnx_hw->monitor_vif == RWNX_INVALID_VIF) { |
---|
1117 | | - /* Unmap will synchronize buffer for CPU */ |
---|
1118 | | - dma_unmap_single(rwnx_hw->dev, elem->dma_addr, rwnx_hw->ipc_env->unsuprxvec_bufsz, |
---|
1119 | | - DMA_FROM_DEVICE); |
---|
1120 | | - elem->skb = NULL; |
---|
1121 | | - |
---|
1122 | | - /* Free skb */ |
---|
1123 | | - dev_kfree_skb(skb); |
---|
1124 | | - |
---|
1125 | | - /* Allocate and push a new buffer to fw to replace this one */ |
---|
1126 | | - if (rwnx_ipc_unsup_rx_vec_elem_allocs(rwnx_hw, elem)) |
---|
1127 | | - dev_err(rwnx_hw->dev, "Failed to alloc new unsupported rx vector buf\n"); |
---|
1128 | | - return -1; |
---|
1129 | | - } |
---|
1130 | | - |
---|
1131 | | - rwnx_vif = rwnx_hw->vif_table[rwnx_hw->monitor_vif]; |
---|
1132 | | - skb->dev = rwnx_vif->ndev; |
---|
1133 | | - memcpy(&rx_vect_desc, skb->data, sizeof(rx_vect_desc)); |
---|
1134 | | - rx_desc = &rx_vect_desc; |
---|
1135 | | - |
---|
1136 | | - rx_vect1 = (struct rx_vector_1 *) (rx_desc->rx_vect1); |
---|
1137 | | - rwnx_rx_vector_convert(rwnx_hw, rx_vect1, NULL); |
---|
1138 | | - phy_info = (struct phy_channel_info_desc *) (&rx_desc->phy_info); |
---|
1139 | | - if (rx_vect1->format_mod >= FORMATMOD_VHT) |
---|
1140 | | - ht_length = 0; |
---|
1141 | | - else |
---|
1142 | | - ht_length = (u16) le32_to_cpu(rx_vect1->ht.length); |
---|
1143 | | - |
---|
1144 | | - // Reserve space for radiotap |
---|
1145 | | - skb_reserve(skb, RADIOTAP_HDR_MAX_LEN); |
---|
1146 | | - |
---|
1147 | | - /* Fill vendor specific header with fake values */ |
---|
1148 | | - rtap = (struct vendor_radiotap_hdr *) skb->data; |
---|
1149 | | - rtap->oui[0] = 0x00; |
---|
1150 | | - rtap->oui[1] = 0x25; |
---|
1151 | | - rtap->oui[2] = 0x3A; |
---|
1152 | | - rtap->subns = 0; |
---|
1153 | | - rtap->len = sizeof(ht_length); |
---|
1154 | | - put_unaligned_le16(ht_length, rtap->data); |
---|
1155 | | - vend_rtap_len += rtap->len; |
---|
1156 | | - skb_put(skb, vend_rtap_len); |
---|
1157 | | - |
---|
1158 | | - /* Copy fake data */ |
---|
1159 | | - put_unaligned_le16(0, skb->data + vend_rtap_len); |
---|
1160 | | - skb_put(skb, UNSUP_RX_VEC_DATA_LEN); |
---|
1161 | | - |
---|
1162 | | - /* Get RadioTap Header length */ |
---|
1163 | | - rtap_len = rwnx_rx_rtap_hdrlen(rx_vect1, true); |
---|
1164 | | - |
---|
1165 | | - /* Check headroom space */ |
---|
1166 | | - if (skb_headroom(skb) < rtap_len) { |
---|
1167 | | - netdev_err(rwnx_vif->ndev, "not enough headroom %d need %d\n", skb_headroom(skb), rtap_len); |
---|
1168 | | - return -1; |
---|
1169 | | - } |
---|
1170 | | - |
---|
1171 | | - /* Add RadioTap Header */ |
---|
1172 | | - rwnx_rx_add_rtap_hdr(rwnx_hw, skb, rx_vect1, phy_info, NULL, |
---|
1173 | | - rtap_len, vend_rtap_len, BIT(0)); |
---|
1174 | | - |
---|
1175 | | - skb_reset_mac_header(skb); |
---|
1176 | | - skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
1177 | | - skb->pkt_type = PACKET_OTHERHOST; |
---|
1178 | | - skb->protocol = htons(ETH_P_802_2); |
---|
1179 | | - |
---|
1180 | | - /* Unmap will synchronize buffer for CPU */ |
---|
1181 | | - dma_unmap_single(rwnx_hw->dev, elem->dma_addr, rwnx_hw->ipc_env->unsuprxvec_bufsz, |
---|
1182 | | - DMA_FROM_DEVICE); |
---|
1183 | | - elem->skb = NULL; |
---|
1184 | | - |
---|
1185 | | - netif_receive_skb(skb); |
---|
1186 | | - |
---|
1187 | | - /* Allocate and push a new buffer to fw to replace this one */ |
---|
1188 | | - if (rwnx_ipc_unsup_rx_vec_elem_allocs(rwnx_hw, elem)) |
---|
1189 | | - netdev_err(rwnx_vif->ndev, "Failed to alloc new unsupported rx vector buf\n"); |
---|
1190 | | - return 0; |
---|
1191 | | -} |
---|
1192 | | - |
---|
1193 | | -/** |
---|
1194 | | - * rwnx_rxdataind - Process rx buffer |
---|
1195 | | - * |
---|
1196 | | - * @pthis: Pointer to the object attached to the IPC structure |
---|
1197 | | - * (points to struct rwnx_hw is this case) |
---|
1198 | | - * @hostid: Address of the RX descriptor |
---|
1199 | | - * |
---|
1200 | | - * This function is called for each buffer received by the fw |
---|
1201 | | - * |
---|
1202 | | - */ |
---|
1203 | | -u8 rwnx_rxdataind(void *pthis, void *hostid) |
---|
1204 | | -{ |
---|
1205 | | - struct rwnx_hw *rwnx_hw = pthis; |
---|
1206 | | - struct rwnx_ipc_elem *elem = hostid; |
---|
1207 | | - struct hw_rxhdr *hw_rxhdr; |
---|
1208 | | - struct rxdesc_tag *rxdesc; |
---|
1209 | | - struct rwnx_vif *rwnx_vif; |
---|
1210 | | - struct sk_buff *skb = NULL; |
---|
1211 | | - int rx_buff_idx; |
---|
1212 | | - int msdu_offset = sizeof(struct hw_rxhdr) + 2; |
---|
1213 | | - int peek_len = msdu_offset + sizeof(struct ethhdr); |
---|
1214 | | - u16_l status; |
---|
1215 | | - |
---|
1216 | | - REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_RWNXDATAIND); |
---|
1217 | | - |
---|
1218 | | - /* Get the ownership of the descriptor */ |
---|
1219 | | - dma_sync_single_for_cpu(rwnx_hw->dev, elem->dma_addr, |
---|
1220 | | - sizeof(struct rxdesc_tag), DMA_FROM_DEVICE); |
---|
1221 | | - |
---|
1222 | | - rxdesc = elem->addr; |
---|
1223 | | - status = rxdesc->status; |
---|
1224 | | - |
---|
1225 | | - /* check that frame is completely uploaded */ |
---|
1226 | | - if (!status) { |
---|
1227 | | - /* Get the ownership of the descriptor */ |
---|
1228 | | - dma_sync_single_for_device(rwnx_hw->dev, elem->dma_addr, |
---|
1229 | | - sizeof(struct rxdesc_tag), DMA_FROM_DEVICE); |
---|
1230 | | - return -1; |
---|
1231 | | - } |
---|
1232 | | - |
---|
1233 | | - /* Get the buffer linked with the received descriptor */ |
---|
1234 | | - rx_buff_idx = RWNX_RXBUFF_HOSTID_TO_IDX(rxdesc->host_id); |
---|
1235 | | - if (RWNX_RXBUFF_VALID_IDX(rx_buff_idx)) |
---|
1236 | | - skb = rwnx_hw->rxbuf_elems.skb[rx_buff_idx]; |
---|
1237 | | - |
---|
1238 | | - if (!skb) { |
---|
1239 | | - dev_err(rwnx_hw->dev, "RX Buff invalid idx [%d]\n", rx_buff_idx); |
---|
1240 | | - return -1; |
---|
1241 | | - } |
---|
1242 | | - |
---|
1243 | | - /* Check the pattern */ |
---|
1244 | | - if (RWNX_RXBUFF_PATTERN_GET(skb) != rwnx_rxbuff_pattern) { |
---|
1245 | | - dev_err(rwnx_hw->dev, "RX Buff Pattern not correct\n"); |
---|
1246 | | - BUG(); |
---|
1247 | | - } |
---|
1248 | | - |
---|
1249 | | - /* Check if we need to delete the buffer */ |
---|
1250 | | - if (status & RX_STAT_DELETE) { |
---|
1251 | | - /* Remove the SK buffer from the rxbuf_elems table */ |
---|
1252 | | - rwnx_ipc_rxbuf_elem_pull(rwnx_hw, skb); |
---|
1253 | | - /* Free the buffer */ |
---|
1254 | | - dev_kfree_skb(skb); |
---|
1255 | | - goto end; |
---|
1256 | | - } |
---|
1257 | | - |
---|
1258 | | - /* Check if we need to forward the buffer coming from a monitor interface */ |
---|
1259 | | - if (status & RX_STAT_MONITOR) { |
---|
1260 | | - struct sk_buff *skb_monitor; |
---|
1261 | | - struct hw_rxhdr hw_rxhdr_copy; |
---|
1262 | | - u8 rtap_len; |
---|
1263 | | - u16 frm_len; |
---|
1264 | | - |
---|
1265 | | - //Check if monitor interface exists and is open |
---|
1266 | | - rwnx_vif = rwnx_rx_get_vif(rwnx_hw, rwnx_hw->monitor_vif); |
---|
1267 | | - if (!rwnx_vif) { |
---|
1268 | | - dev_err(rwnx_hw->dev, "Received monitor frame but there is no monitor interface open\n"); |
---|
1269 | | - goto check_len_update; |
---|
1270 | | - } |
---|
1271 | | - |
---|
1272 | | - hw_rxhdr = (struct hw_rxhdr *)skb->data; |
---|
1273 | | - rwnx_rx_vector_convert(rwnx_hw, |
---|
1274 | | - &hw_rxhdr->hwvect.rx_vect1, |
---|
1275 | | - &hw_rxhdr->hwvect.rx_vect2); |
---|
1276 | | - rtap_len = rwnx_rx_rtap_hdrlen(&hw_rxhdr->hwvect.rx_vect1, false); |
---|
1277 | | - |
---|
1278 | | - // Move skb->data pointer to MAC Header or Ethernet header |
---|
1279 | | - skb->data += msdu_offset; |
---|
1280 | | - |
---|
1281 | | - //Save frame length |
---|
1282 | | - frm_len = le32_to_cpu(hw_rxhdr->hwvect.len); |
---|
1283 | | - |
---|
1284 | | - // Reserve space for frame |
---|
1285 | | - skb->len = frm_len; |
---|
1286 | | - |
---|
1287 | | - if (status == RX_STAT_MONITOR) { |
---|
1288 | | - /* Remove the SK buffer from the rxbuf_elems table. It will also |
---|
1289 | | - unmap the buffer and then sync the buffer for the cpu */ |
---|
1290 | | - rwnx_ipc_rxbuf_elem_pull(rwnx_hw, skb); |
---|
1291 | | - |
---|
1292 | | - //Check if there is enough space to add the radiotap header |
---|
1293 | | - if (skb_headroom(skb) > rtap_len) { |
---|
1294 | | - |
---|
1295 | | - skb_monitor = skb; |
---|
1296 | | - |
---|
1297 | | - //Duplicate the HW Rx Header to override with the radiotap header |
---|
1298 | | - memcpy(&hw_rxhdr_copy, hw_rxhdr, sizeof(hw_rxhdr_copy)); |
---|
1299 | | - |
---|
1300 | | - hw_rxhdr = &hw_rxhdr_copy; |
---|
1301 | | - } else { |
---|
1302 | | - //Duplicate the skb and extend the headroom |
---|
1303 | | - skb_monitor = skb_copy_expand(skb, rtap_len, 0, GFP_ATOMIC); |
---|
1304 | | - |
---|
1305 | | - //Reset original skb->data pointer |
---|
1306 | | - skb->data = (void *)hw_rxhdr; |
---|
1307 | | - } |
---|
1308 | | - } else { |
---|
1309 | | - //#ifdef CONFIG_RWNX_MON_DATA |
---|
1310 | | - #if 0 |
---|
1311 | | - // Check if MSDU |
---|
1312 | | - if (!hw_rxhdr->flags_is_80211_mpdu) { |
---|
1313 | | - // MSDU |
---|
1314 | | - //Extract MAC header |
---|
1315 | | - u16 machdr_len = hw_rxhdr->mac_hdr_backup.buf_len; |
---|
1316 | | - u8 *machdr_ptr = hw_rxhdr->mac_hdr_backup.buffer; |
---|
1317 | | - |
---|
1318 | | - //Pull Ethernet header from skb |
---|
1319 | | - skb_pull(skb, sizeof(struct ethhdr)); |
---|
1320 | | - |
---|
1321 | | - // Copy skb and extend for adding the radiotap header and the MAC header |
---|
1322 | | - skb_monitor = skb_copy_expand(skb, |
---|
1323 | | - rtap_len + machdr_len, |
---|
1324 | | - 0, GFP_ATOMIC); |
---|
1325 | | - |
---|
1326 | | - //Reserve space for the MAC Header |
---|
1327 | | - skb_push(skb_monitor, machdr_len); |
---|
1328 | | - |
---|
1329 | | - //Copy MAC Header |
---|
1330 | | - memcpy(skb_monitor->data, machdr_ptr, machdr_len); |
---|
1331 | | - |
---|
1332 | | - //Update frame length |
---|
1333 | | - frm_len += machdr_len - sizeof(struct ethhdr); |
---|
1334 | | - } else { |
---|
1335 | | - // MPDU |
---|
1336 | | - skb_monitor = skb_copy_expand(skb, rtap_len, 0, GFP_ATOMIC); |
---|
1337 | | - } |
---|
1338 | | - |
---|
1339 | | - //Reset original skb->data pointer |
---|
1340 | | - skb->data = (void *)hw_rxhdr; |
---|
1341 | | - #else |
---|
1342 | | - //Reset original skb->data pointer |
---|
1343 | | - skb->data = (void *)hw_rxhdr; |
---|
1344 | | - |
---|
1345 | | - wiphy_err(rwnx_hw->wiphy, "RX status %d is invalid when MON_DATA is disabled\n", status); |
---|
1346 | | - goto check_len_update; |
---|
1347 | | - #endif |
---|
1348 | | - } |
---|
1349 | | - |
---|
1350 | | - skb_reset_tail_pointer(skb); |
---|
1351 | | - skb->len = 0; |
---|
1352 | | - skb_reset_tail_pointer(skb_monitor); |
---|
1353 | | - skb_monitor->len = 0; |
---|
1354 | | - |
---|
1355 | | - skb_put(skb_monitor, frm_len); |
---|
1356 | | - if (rwnx_rx_monitor(rwnx_hw, rwnx_vif, skb_monitor, hw_rxhdr, rtap_len)) |
---|
1357 | | - dev_kfree_skb(skb_monitor); |
---|
1358 | | - |
---|
1359 | | - if (status == RX_STAT_MONITOR) { |
---|
1360 | | - status |= RX_STAT_ALLOC; |
---|
1361 | | - if (skb_monitor != skb) { |
---|
1362 | | - dev_kfree_skb(skb); |
---|
1363 | | - } |
---|
1364 | | - } |
---|
1365 | | - } |
---|
1366 | | - |
---|
1367 | | -check_len_update: |
---|
1368 | | - /* Check if we need to update the length */ |
---|
1369 | | - if (status & RX_STAT_LEN_UPDATE) { |
---|
1370 | | - dma_addr_t dma_addr = RWNX_RXBUFF_DMA_ADDR_GET(skb); |
---|
1371 | | - dma_sync_single_for_cpu(rwnx_hw->dev, dma_addr, |
---|
1372 | | - peek_len, DMA_FROM_DEVICE); |
---|
1373 | | - |
---|
1374 | | - hw_rxhdr = (struct hw_rxhdr *)skb->data; |
---|
1375 | | - |
---|
1376 | | - hw_rxhdr->hwvect.len = rxdesc->frame_len; |
---|
1377 | | - |
---|
1378 | | - if (status & RX_STAT_ETH_LEN_UPDATE) { |
---|
1379 | | - /* Update Length Field inside the Ethernet Header */ |
---|
1380 | | - struct ethhdr *hdr = (struct ethhdr *)((u8 *)hw_rxhdr + msdu_offset); |
---|
1381 | | - |
---|
1382 | | - hdr->h_proto = htons(rxdesc->frame_len - sizeof(struct ethhdr)); |
---|
1383 | | - } |
---|
1384 | | - |
---|
1385 | | - dma_sync_single_for_device(rwnx_hw->dev, dma_addr, |
---|
1386 | | - peek_len, DMA_BIDIRECTIONAL); |
---|
1387 | | - goto end; |
---|
1388 | | - } |
---|
1389 | | - |
---|
1390 | | - /* Check if it must be discarded after informing upper layer */ |
---|
1391 | | - if (status & RX_STAT_SPURIOUS) { |
---|
1392 | | - struct ieee80211_hdr *hdr; |
---|
1393 | | - |
---|
1394 | | - /* Read mac header to obtain Transmitter Address */ |
---|
1395 | | - rwnx_ipc_rxbuf_elem_sync(rwnx_hw, skb, msdu_offset + sizeof(*hdr)); |
---|
1396 | | - |
---|
1397 | | - hw_rxhdr = (struct hw_rxhdr *)skb->data; |
---|
1398 | | - hdr = (struct ieee80211_hdr *)(skb->data + msdu_offset); |
---|
1399 | | - rwnx_vif = rwnx_rx_get_vif(rwnx_hw, hw_rxhdr->flags_vif_idx); |
---|
1400 | | - if (rwnx_vif) { |
---|
1401 | | - rwnx_cfg80211_rx_spurious_frame(rwnx_vif->ndev, hdr->addr2, GFP_ATOMIC); |
---|
1402 | | - } |
---|
1403 | | - rwnx_ipc_rxbuf_elem_repush(rwnx_hw, skb); |
---|
1404 | | - goto end; |
---|
1405 | | - } |
---|
1406 | | - |
---|
1407 | | - /* Check if we need to forward the buffer */ |
---|
1408 | | - if (status & RX_STAT_FORWARD) { |
---|
1409 | | - |
---|
1410 | | - /* Remove the SK buffer from the rxbuf_elems table. It will also |
---|
1411 | | - unmap the buffer and then sync the buffer for the cpu */ |
---|
1412 | | - rwnx_ipc_rxbuf_elem_pull(rwnx_hw, skb); |
---|
1413 | | - hw_rxhdr = (struct hw_rxhdr *)skb->data; |
---|
1414 | | - rwnx_rx_vector_convert(rwnx_hw, |
---|
1415 | | - &hw_rxhdr->hwvect.rx_vect1, |
---|
1416 | | - &hw_rxhdr->hwvect.rx_vect2); |
---|
1417 | | - skb_reserve(skb, msdu_offset); |
---|
1418 | | - skb_put(skb, le32_to_cpu(hw_rxhdr->hwvect.len)); |
---|
1419 | | - |
---|
1420 | | - if (hw_rxhdr->flags_is_80211_mpdu) { |
---|
1421 | | - rwnx_rx_mgmt_any(rwnx_hw, skb, hw_rxhdr); |
---|
1422 | | - } else { |
---|
1423 | | - rwnx_vif = rwnx_rx_get_vif(rwnx_hw, hw_rxhdr->flags_vif_idx); |
---|
1424 | | - |
---|
1425 | | - if (!rwnx_vif) { |
---|
1426 | | - dev_err(rwnx_hw->dev, "Frame received but no active vif (%d)", |
---|
1427 | | - hw_rxhdr->flags_vif_idx); |
---|
1428 | | - dev_kfree_skb(skb); |
---|
1429 | | - goto check_alloc; |
---|
1430 | | - } |
---|
1431 | | - |
---|
1432 | | - if (hw_rxhdr->flags_sta_idx != RWNX_INVALID_STA) { |
---|
1433 | | - struct rwnx_sta *sta; |
---|
1434 | | - |
---|
1435 | | - sta = &rwnx_hw->sta_table[hw_rxhdr->flags_sta_idx]; |
---|
1436 | | - rwnx_rx_statistic(rwnx_hw, hw_rxhdr, sta); |
---|
1437 | | - |
---|
1438 | | - if (sta->vlan_idx != rwnx_vif->vif_index) { |
---|
1439 | | - rwnx_vif = rwnx_hw->vif_table[sta->vlan_idx]; |
---|
1440 | | - if (!rwnx_vif) { |
---|
1441 | | - dev_kfree_skb(skb); |
---|
1442 | | - goto check_alloc; |
---|
1443 | | - } |
---|
1444 | | - } |
---|
1445 | | - |
---|
1446 | | - if (hw_rxhdr->flags_is_4addr && !rwnx_vif->use_4addr) { |
---|
1447 | | - rwnx_cfg80211_rx_unexpected_4addr_frame(rwnx_vif->ndev, |
---|
1448 | | - sta->mac_addr, GFP_ATOMIC); |
---|
1449 | | - } |
---|
1450 | | - } |
---|
1451 | | - |
---|
1452 | | - skb->priority = 256 + hw_rxhdr->flags_user_prio; |
---|
1453 | | - if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, skb, hw_rxhdr)) |
---|
1454 | | - dev_kfree_skb(skb); |
---|
1455 | | - } |
---|
1456 | | - } |
---|
1457 | | - |
---|
1458 | | -check_alloc: |
---|
1459 | | - /* Check if we need to allocate a new buffer */ |
---|
1460 | | - if ((status & RX_STAT_ALLOC) && |
---|
1461 | | - rwnx_ipc_rxbuf_elem_allocs(rwnx_hw)) { |
---|
1462 | | - dev_err(rwnx_hw->dev, "Failed to alloc new RX buf\n"); |
---|
1463 | | - } |
---|
1464 | | - |
---|
1465 | | -end: |
---|
1466 | | - REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_RWNXDATAIND); |
---|
1467 | | - |
---|
1468 | | - /* Reset and repush descriptor to FW */ |
---|
1469 | | - rwnx_ipc_rxdesc_elem_repush(rwnx_hw, elem); |
---|
1470 | | - |
---|
1471 | | - return 0; |
---|
1472 | | -} |
---|
1473 | | - |
---|
1474 | 1083 | #ifdef AICWF_ARP_OFFLOAD |
---|
1475 | 1084 | void arpoffload_proc(struct sk_buff *skb, struct rwnx_vif *rwnx_vif) |
---|
1476 | 1085 | { |
---|
.. | .. |
---|
1493 | 1102 | if (option[offset] == DHCP_OPTION_MESSAGE_TYPE) { |
---|
1494 | 1103 | if (option[offset+2] == DHCP_ACK) { |
---|
1495 | 1104 | dhcped = 1; |
---|
1496 | | - printk("group=%x, should=%x\n", rwnx_vif->sta.group_cipher_type, WLAN_CIPHER_SUITE_CCMP); |
---|
1497 | | - if (rwnx_vif->sta.group_cipher_type == WLAN_CIPHER_SUITE_CCMP || rwnx_vif->sta.group_cipher_type == WLAN_CIPHER_SUITE_AES_CMAC) |
---|
| 1105 | + printk("paired=%x, should=%x\n", rwnx_vif->sta.paired_cipher_type, WLAN_CIPHER_SUITE_CCMP); |
---|
| 1106 | + if (rwnx_vif->sta.paired_cipher_type == WLAN_CIPHER_SUITE_CCMP || \ |
---|
| 1107 | + rwnx_vif->sta.paired_cipher_type == WLAN_CIPHER_SUITE_AES_CMAC || \ |
---|
| 1108 | + ((rwnx_vif->sta.group_cipher_type == 0xff) && \ |
---|
| 1109 | + (rwnx_vif->sta.paired_cipher_type == 0xff))) |
---|
1498 | 1110 | rwnx_send_arpoffload_en_req(rwnx_vif->rwnx_hw, rwnx_vif, dhcph->yiaddr, 1); |
---|
1499 | 1111 | else |
---|
1500 | 1112 | rwnx_send_arpoffload_en_req(rwnx_vif->rwnx_hw, rwnx_vif, dhcph->yiaddr, 0); |
---|
.. | .. |
---|
1648 | 1260 | return; |
---|
1649 | 1261 | } |
---|
1650 | 1262 | |
---|
| 1263 | + printk("reord_deinit_sta\n"); |
---|
1651 | 1264 | for (i = 0; i < 8; i++) { |
---|
1652 | 1265 | struct recv_msdu *req, *next; |
---|
1653 | 1266 | preorder_ctrl = &reord_info->preorder_ctrl[i]; |
---|
.. | .. |
---|
1659 | 1272 | req->pkt = NULL; |
---|
1660 | 1273 | reord_rxframe_free(&rx_priv->freeq_lock, &rx_priv->rxframes_freequeue, &req->rxframe_list); |
---|
1661 | 1274 | } |
---|
1662 | | - printk("reord dinit"); |
---|
1663 | 1275 | spin_unlock_irqrestore(&preorder_ctrl->reord_list_lock, flags); |
---|
1664 | 1276 | if (timer_pending(&preorder_ctrl->reord_timer)) { |
---|
1665 | 1277 | ret = del_timer_sync(&preorder_ctrl->reord_timer); |
---|
1666 | 1278 | } |
---|
1667 | 1279 | cancel_work_sync(&preorder_ctrl->reord_timer_work); |
---|
1668 | 1280 | } |
---|
| 1281 | + |
---|
1669 | 1282 | list_del(&reord_info->list); |
---|
1670 | 1283 | kfree(reord_info); |
---|
1671 | 1284 | } |
---|
.. | .. |
---|
1683 | 1296 | return -1; |
---|
1684 | 1297 | } |
---|
1685 | 1298 | |
---|
| 1299 | + if (!prframe->forward) { |
---|
| 1300 | + dev_kfree_skb(skb); |
---|
| 1301 | + prframe->pkt = NULL; |
---|
| 1302 | + reord_rxframe_free(&rx_priv->freeq_lock, rxframes_freequeue, &prframe->rxframe_list); |
---|
| 1303 | + return 0; |
---|
| 1304 | + } |
---|
| 1305 | + |
---|
1686 | 1306 | skb->data = prframe->rx_data; |
---|
1687 | 1307 | skb_set_tail_pointer(skb, prframe->len); |
---|
1688 | 1308 | skb->len = prframe->len; |
---|
| 1309 | + |
---|
| 1310 | + rwnx_vif->net_stats.rx_packets++; |
---|
| 1311 | + rwnx_vif->net_stats.rx_bytes += skb->len; |
---|
1689 | 1312 | //printk("netif sn=%d, len=%d\n", precv_frame->attrib.seq_num, skb->len); |
---|
1690 | 1313 | |
---|
1691 | 1314 | skb->dev = rwnx_vif->ndev; |
---|
.. | .. |
---|
1700 | 1323 | if (in_interrupt()) { |
---|
1701 | 1324 | netif_rx(skb); |
---|
1702 | 1325 | } else { |
---|
1703 | | - /* |
---|
1704 | | - * If the receive is not processed inside an ISR, the softirqd must be woken explicitly to service the NET_RX_SOFTIRQ. |
---|
1705 | | - * * In 2.6 kernels, this is handledby netif_rx_ni(), but in earlier kernels, we need to do it manually. |
---|
1706 | | - */ |
---|
| 1326 | + /* |
---|
| 1327 | + * If the receive is not processed inside an ISR, the softirqd must be woken explicitly to service the NET_RX_SOFTIRQ. |
---|
| 1328 | + * * In 2.6 kernels, this is handledby netif_rx_ni(), but in earlier kernels, we need to do it manually. |
---|
| 1329 | + */ |
---|
1707 | 1330 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) |
---|
1708 | | - netif_rx_ni(skb); |
---|
| 1331 | + netif_rx_ni(skb); |
---|
1709 | 1332 | #else |
---|
1710 | | - ulong flags; |
---|
1711 | | - netif_rx(skb); |
---|
1712 | | - local_irq_save(flags); |
---|
1713 | | - RAISE_RX_SOFTIRQ(); |
---|
1714 | | - local_irq_restore(flags); |
---|
| 1333 | + ulong flags; |
---|
| 1334 | + netif_rx(skb); |
---|
| 1335 | + local_irq_save(flags); |
---|
| 1336 | + RAISE_RX_SOFTIRQ(); |
---|
| 1337 | + local_irq_restore(flags); |
---|
1715 | 1338 | #endif |
---|
1716 | 1339 | } |
---|
1717 | 1340 | |
---|
1718 | | - rwnx_vif->net_stats.rx_packets++; |
---|
1719 | | - rwnx_vif->net_stats.rx_bytes += skb->len; |
---|
1720 | 1341 | prframe->pkt = NULL; |
---|
1721 | 1342 | reord_rxframe_free(&rx_priv->freeq_lock, rxframes_freequeue, &prframe->rxframe_list); |
---|
1722 | 1343 | |
---|
.. | .. |
---|
1817 | 1438 | return ; |
---|
1818 | 1439 | } |
---|
1819 | 1440 | |
---|
1820 | | -int reord_process_unit(struct aicwf_rx_priv *rx_priv, struct sk_buff *skb, u16 seq_num, u8 tid) |
---|
| 1441 | +int reord_process_unit(struct aicwf_rx_priv *rx_priv, struct sk_buff *skb, u16 seq_num, u8 tid, u8 forward) |
---|
1821 | 1442 | { |
---|
1822 | 1443 | int ret = 0; |
---|
1823 | 1444 | u8 *mac; |
---|
.. | .. |
---|
1846 | 1467 | pframe->rx_data = skb->data; |
---|
1847 | 1468 | pframe->len = skb->len; |
---|
1848 | 1469 | pframe->pkt = skb; |
---|
| 1470 | + pframe->forward = forward; |
---|
1849 | 1471 | preorder_ctrl = pframe->preorder_ctrl; |
---|
1850 | 1472 | |
---|
1851 | 1473 | if ((ntohs(eh->h_proto) == ETH_P_PAE) || is_mcast) |
---|
.. | .. |
---|
2000 | 1622 | } |
---|
2001 | 1623 | } |
---|
2002 | 1624 | |
---|
| 1625 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) |
---|
| 1626 | +void defrag_timeout_cb(ulong data) |
---|
| 1627 | +#else |
---|
| 1628 | +void defrag_timeout_cb(struct timer_list *t) |
---|
| 1629 | +#endif |
---|
| 1630 | +{ |
---|
| 1631 | + struct defrag_ctrl_info *defrag_ctrl = NULL; |
---|
| 1632 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) |
---|
| 1633 | + defrag_ctrl = (struct defrag_ctrl_info *)data; |
---|
| 1634 | +#else |
---|
| 1635 | + defrag_ctrl = from_timer(defrag_ctrl, t, defrag_timer); |
---|
| 1636 | +#endif |
---|
| 1637 | + |
---|
| 1638 | + printk("%s:%p\r\n", __func__, defrag_ctrl); |
---|
| 1639 | + list_del_init(&defrag_ctrl->list); |
---|
| 1640 | + dev_kfree_skb(defrag_ctrl->skb); |
---|
| 1641 | + kfree(defrag_ctrl); |
---|
| 1642 | +} |
---|
| 1643 | + |
---|
2003 | 1644 | u8 rwnx_rxdataind_aicwf(struct rwnx_hw *rwnx_hw, void *hostid, void *rx_priv) |
---|
2004 | 1645 | { |
---|
2005 | 1646 | struct hw_rxhdr *hw_rxhdr; |
---|
.. | .. |
---|
2015 | 1656 | u8 ether_type[2] = {0}; |
---|
2016 | 1657 | u8 pull_len = 0; |
---|
2017 | 1658 | u16 seq_num = 0; |
---|
| 1659 | + u8_l frag_num = 0; |
---|
2018 | 1660 | u8 tid = 0; |
---|
2019 | 1661 | u8 is_qos = 0; |
---|
| 1662 | + u8 is_frag = 0; |
---|
| 1663 | + struct defrag_ctrl_info *defrag_info = NULL; |
---|
| 1664 | + struct defrag_ctrl_info *defrag_info_tmp = NULL; |
---|
| 1665 | + int ret; |
---|
| 1666 | + u8 sta_idx = 0; |
---|
| 1667 | + u16_l frame_ctrl; |
---|
| 1668 | + u8 is_amsdu = 0; |
---|
| 1669 | + u16 len_alligned = 0; |
---|
| 1670 | + u16 sublen = 0; |
---|
| 1671 | + struct sk_buff *sub_skb = NULL; |
---|
2020 | 1672 | bool resend = false, forward = true; |
---|
2021 | 1673 | const struct ethhdr *eth; |
---|
2022 | 1674 | |
---|
2023 | | - REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_RWNXDATAIND); |
---|
2024 | 1675 | hw_rxhdr = (struct hw_rxhdr *)skb->data; |
---|
2025 | 1676 | |
---|
2026 | 1677 | if (hw_rxhdr->is_monitor_vif) { |
---|
.. | .. |
---|
2186 | 1837 | &hw_rxhdr->hwvect.rx_vect2); |
---|
2187 | 1838 | skb_pull(skb, msdu_offset + 2); //+2 since sdio allign 58->60 |
---|
2188 | 1839 | |
---|
| 1840 | +#define MAC_FCTRL_MOREFRAG 0x0400 |
---|
| 1841 | + frame_ctrl = (skb->data[1] << 8) | skb->data[0]; |
---|
| 1842 | + seq_num = ((skb->data[22] & 0xf0) >> 4) | (skb->data[23] << 4); |
---|
| 1843 | + frag_num = (skb->data[22] & 0x0f); |
---|
| 1844 | + is_amsdu = 0; |
---|
| 1845 | + |
---|
2189 | 1846 | if ((skb->data[0] & 0x0f) == 0x08) { |
---|
2190 | 1847 | if ((skb->data[0] & 0x80) == 0x80) {//qos data |
---|
2191 | 1848 | hdr_len = 26; |
---|
2192 | 1849 | tid = skb->data[24] & 0x0F; |
---|
2193 | 1850 | is_qos = 1; |
---|
| 1851 | + if (skb->data[24] & 0x80) |
---|
| 1852 | + is_amsdu = 1; |
---|
2194 | 1853 | } |
---|
2195 | 1854 | |
---|
| 1855 | + if (skb->data[1] & 0x80)//htc |
---|
| 1856 | + hdr_len += 4; |
---|
2196 | 1857 | if ((skb->data[1] & 0x3) == 0x1) {// to ds |
---|
2197 | 1858 | memcpy(ra, &skb->data[16], MAC_ADDR_LEN); |
---|
2198 | 1859 | memcpy(ta, &skb->data[10], MAC_ADDR_LEN); |
---|
.. | .. |
---|
2202 | 1863 | } |
---|
2203 | 1864 | |
---|
2204 | 1865 | pull_len += (hdr_len + 8); |
---|
2205 | | -#ifdef AICWF_RX_REORDER |
---|
2206 | | - seq_num = ((skb->data[22]&0xf0)>>4) | (skb->data[23]<<4); |
---|
2207 | | -#endif |
---|
| 1866 | + |
---|
2208 | 1867 | switch (hw_rxhdr->hwvect.decr_status) { |
---|
2209 | 1868 | case RWNX_RX_HD_DECR_CCMP128: |
---|
2210 | 1869 | pull_len += 8;//ccmp_header |
---|
.. | .. |
---|
2225 | 1884 | break; |
---|
2226 | 1885 | } |
---|
2227 | 1886 | |
---|
2228 | | - skb_pull(skb, pull_len); |
---|
2229 | | - skb_push(skb, 14); |
---|
2230 | | - memcpy(skb->data, ra, MAC_ADDR_LEN); |
---|
2231 | | - memcpy(&skb->data[6], ta, MAC_ADDR_LEN); |
---|
2232 | | - memcpy(&skb->data[12], ether_type, 2); |
---|
| 1887 | + if (is_amsdu) { |
---|
| 1888 | + skb_pull(skb, pull_len-8); |
---|
| 1889 | + /* |amsdu sub1 | amsdu sub2 | ... */ |
---|
| 1890 | + len_alligned = 0; |
---|
| 1891 | + sublen = 0; |
---|
| 1892 | + sub_skb = NULL; |
---|
| 1893 | + //printk("is_len:%d, pull:%d\n", skb->len, pull_len); |
---|
| 1894 | + while (skb->len > 16) { |
---|
| 1895 | + sublen = (skb->data[12]<<8)|(skb->data[13]); |
---|
| 1896 | + if (skb->len > (sublen+14)) |
---|
| 1897 | + len_alligned = roundup(sublen + 14, 4); |
---|
| 1898 | + else if (skb->len == (sublen+14)) |
---|
| 1899 | + len_alligned = sublen+14; |
---|
| 1900 | + else { |
---|
| 1901 | + printk("accroding to amsdu: this will not happen\n"); |
---|
| 1902 | + break; |
---|
| 1903 | + } |
---|
| 1904 | + //printk("sublen = %d, %x, %x, %x, %x\r\n", sublen,skb->data[0], skb->data[1], skb->data[12], skb->data[13]); |
---|
| 1905 | +#if 1 |
---|
| 1906 | + sub_skb = __dev_alloc_skb(sublen - 6 + 12, GFP_KERNEL); |
---|
| 1907 | + skb_put(sub_skb, sublen - 6 + 12); |
---|
| 1908 | + memcpy(sub_skb->data, skb->data, MAC_ADDR_LEN); |
---|
| 1909 | + memcpy(&sub_skb->data[6], &skb->data[6], MAC_ADDR_LEN); |
---|
| 1910 | + memcpy(&sub_skb->data[12], &skb->data[14 + 6], sublen - 6); |
---|
| 1911 | + |
---|
| 1912 | + rwnx_vif = rwnx_rx_get_vif(rwnx_hw, hw_rxhdr->flags_vif_idx); |
---|
| 1913 | + if (!rwnx_vif) { |
---|
| 1914 | + printk("Frame received but no active vif (%d)", hw_rxhdr->flags_vif_idx); |
---|
| 1915 | + dev_kfree_skb(sub_skb); |
---|
| 1916 | + break; |
---|
| 1917 | + } |
---|
| 1918 | + |
---|
| 1919 | + if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, sub_skb, hw_rxhdr)) |
---|
| 1920 | + dev_kfree_skb(sub_skb); |
---|
| 1921 | +#endif |
---|
| 1922 | + skb_pull(skb, len_alligned); |
---|
| 1923 | + } |
---|
| 1924 | + dev_kfree_skb(skb); |
---|
| 1925 | + return 0; |
---|
| 1926 | + } |
---|
| 1927 | + |
---|
| 1928 | + if (hw_rxhdr->flags_dst_idx != RWNX_INVALID_STA) |
---|
| 1929 | + sta_idx = hw_rxhdr->flags_dst_idx; |
---|
| 1930 | + |
---|
| 1931 | + if (!hw_rxhdr->flags_need_reord && ((frame_ctrl & MAC_FCTRL_MOREFRAG) || frag_num)) { |
---|
| 1932 | + printk("rxfrag:%d,%d\r\n", (frame_ctrl & MAC_FCTRL_MOREFRAG), frag_num); |
---|
| 1933 | + if (frame_ctrl & MAC_FCTRL_MOREFRAG) { |
---|
| 1934 | + spin_lock_bh(&rwnx_hw->defrag_lock); |
---|
| 1935 | + if (!list_empty(&rwnx_hw->defrag_list)) { |
---|
| 1936 | + list_for_each_entry(defrag_info_tmp, &rwnx_hw->defrag_list, list) { |
---|
| 1937 | + if ((defrag_info_tmp->sn == seq_num) && (defrag_info_tmp->tid == tid) && \ |
---|
| 1938 | + defrag_info_tmp->sta_idx == sta_idx) { |
---|
| 1939 | + defrag_info = defrag_info_tmp; |
---|
| 1940 | + break; |
---|
| 1941 | + } |
---|
| 1942 | + } |
---|
| 1943 | + } |
---|
| 1944 | + spin_unlock_bh(&rwnx_hw->defrag_lock); |
---|
| 1945 | + //printk("rx frag: sn=%d, fn=%d\r\n", seq_num, frag_num); |
---|
| 1946 | + if (defrag_info) { |
---|
| 1947 | + is_frag = 1; |
---|
| 1948 | + if (defrag_info->next_fn != frag_num) { |
---|
| 1949 | + //printk("discard:%d:%d\n", defrag_info->next_fn, frag_num); |
---|
| 1950 | + dev_kfree_skb(skb); |
---|
| 1951 | + return 0; |
---|
| 1952 | + } |
---|
| 1953 | + |
---|
| 1954 | + skb_put(defrag_info->skb, skb->len-(pull_len-8)); |
---|
| 1955 | + memcpy(&defrag_info->skb->data[defrag_info->frm_len], \ |
---|
| 1956 | + &skb->data[pull_len-8], skb->len - (pull_len-8)); |
---|
| 1957 | + //printk("middle:%d,%d\n", skb->len-(pull_len-8), skb->len); |
---|
| 1958 | + defrag_info->frm_len += (skb->len - (pull_len - 8)); |
---|
| 1959 | + defrag_info->next_fn++; |
---|
| 1960 | + dev_kfree_skb(skb); |
---|
| 1961 | + return 0; |
---|
| 1962 | + } else { |
---|
| 1963 | + defrag_info = kzalloc(sizeof(struct defrag_ctrl_info), GFP_ATOMIC); |
---|
| 1964 | + if (defrag_info == NULL) { |
---|
| 1965 | + printk("no defrag_ctrl_info\r\n"); |
---|
| 1966 | + dev_kfree_skb(skb); |
---|
| 1967 | + return 0; |
---|
| 1968 | + } |
---|
| 1969 | + defrag_info->skb = __dev_alloc_skb(2000, GFP_ATOMIC); |
---|
| 1970 | + if (defrag_info->skb == NULL) { |
---|
| 1971 | + printk("no fragment skb\r\n"); |
---|
| 1972 | + dev_kfree_skb(skb); |
---|
| 1973 | + kfree(defrag_info); |
---|
| 1974 | + return 0; |
---|
| 1975 | + } |
---|
| 1976 | + is_frag = 1; |
---|
| 1977 | + skb_pull(skb, pull_len); |
---|
| 1978 | + skb_push(skb, 14); |
---|
| 1979 | + memcpy(skb->data, ra, MAC_ADDR_LEN); |
---|
| 1980 | + memcpy(&skb->data[6], ta, MAC_ADDR_LEN); |
---|
| 1981 | + memcpy(&skb->data[12], ether_type, 2); |
---|
| 1982 | + |
---|
| 1983 | + defrag_info->sn = seq_num; |
---|
| 1984 | + defrag_info->next_fn = 1; |
---|
| 1985 | + defrag_info->tid = tid; |
---|
| 1986 | + defrag_info->sta_idx = sta_idx; |
---|
| 1987 | + |
---|
| 1988 | + skb_put(defrag_info->skb, skb->len); |
---|
| 1989 | + memcpy(defrag_info->skb->data, skb->data, skb->len); |
---|
| 1990 | + defrag_info->frm_len = skb->len; |
---|
| 1991 | + //printk("first:%p,%d\r\n", defrag_info, defrag_info->frm_len); |
---|
| 1992 | + spin_lock_bh(&rwnx_hw->defrag_lock); |
---|
| 1993 | + list_add_tail(&defrag_info->list, &rwnx_hw->defrag_list); |
---|
| 1994 | + spin_unlock_bh(&rwnx_hw->defrag_lock); |
---|
| 1995 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) |
---|
| 1996 | + init_timer(&defrag_info->defrag_timer); |
---|
| 1997 | + defrag_info->defrag_timer.data = (unsigned long)defrag_info; |
---|
| 1998 | + defrag_info->defrag_timer.function = defrag_timeout_cb; |
---|
| 1999 | +#else |
---|
| 2000 | + timer_setup(&defrag_info->defrag_timer, defrag_timeout_cb, 0); |
---|
| 2001 | +#endif |
---|
| 2002 | + ret = mod_timer(&defrag_info->defrag_timer, jiffies + msecs_to_jiffies(DEFRAG_MAX_WAIT)); |
---|
| 2003 | + dev_kfree_skb(skb); |
---|
| 2004 | + return 0; |
---|
| 2005 | + } |
---|
| 2006 | + } else { |
---|
| 2007 | + //check whether the last fragment |
---|
| 2008 | + if (!list_empty(&rwnx_hw->defrag_list)) { |
---|
| 2009 | + spin_lock_bh(&rwnx_hw->defrag_lock); |
---|
| 2010 | + list_for_each_entry(defrag_info_tmp, &rwnx_hw->defrag_list, list) { |
---|
| 2011 | + if (((defrag_info_tmp->sn == seq_num) && (defrag_info_tmp->tid == tid) && \ |
---|
| 2012 | + defrag_info_tmp->sta_idx == sta_idx)) { |
---|
| 2013 | + defrag_info = defrag_info_tmp; |
---|
| 2014 | + break; |
---|
| 2015 | + } |
---|
| 2016 | + } |
---|
| 2017 | + spin_unlock_bh(&rwnx_hw->defrag_lock); |
---|
| 2018 | + |
---|
| 2019 | + if (defrag_info) { |
---|
| 2020 | + if (defrag_info->next_fn != frag_num) { |
---|
| 2021 | + printk("discard:%d:%d\n", defrag_info->next_fn, frag_num); |
---|
| 2022 | + dev_kfree_skb(skb); |
---|
| 2023 | + return 0; |
---|
| 2024 | + } |
---|
| 2025 | + |
---|
| 2026 | + skb_put(defrag_info->skb, skb->len - (pull_len-8)); |
---|
| 2027 | + memcpy(&defrag_info->skb->data[defrag_info->frm_len], \ |
---|
| 2028 | + &skb->data[pull_len-8], skb->len - (pull_len-8)); |
---|
| 2029 | + defrag_info->frm_len += (skb->len - (pull_len-8)); |
---|
| 2030 | + is_frag = 1; |
---|
| 2031 | + //printk("last: sn=%d, fn=%d, %d, %d\r\n", seq_num, frag_num, defrag_info->frm_len, skb->len); |
---|
| 2032 | + dev_kfree_skb(skb); |
---|
| 2033 | + |
---|
| 2034 | + rwnx_vif = rwnx_rx_get_vif(rwnx_hw, hw_rxhdr->flags_vif_idx); |
---|
| 2035 | + if (!rwnx_vif) { |
---|
| 2036 | + printk("Frame received but no active vif (%d)", hw_rxhdr->flags_vif_idx); |
---|
| 2037 | + dev_kfree_skb(skb); |
---|
| 2038 | + return 0; |
---|
| 2039 | + } |
---|
| 2040 | + |
---|
| 2041 | + if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, defrag_info->skb, hw_rxhdr)) |
---|
| 2042 | + dev_kfree_skb(defrag_info->skb); |
---|
| 2043 | + |
---|
| 2044 | + spin_lock_bh(&rwnx_hw->defrag_lock); |
---|
| 2045 | + list_del_init(&defrag_info->list); |
---|
| 2046 | + spin_unlock_bh(&rwnx_hw->defrag_lock); |
---|
| 2047 | + if (timer_pending(&defrag_info->defrag_timer)) { |
---|
| 2048 | + ret = del_timer(&defrag_info->defrag_timer); |
---|
| 2049 | + } |
---|
| 2050 | + kfree(defrag_info); |
---|
| 2051 | + |
---|
| 2052 | + return 0; |
---|
| 2053 | + } |
---|
| 2054 | + } |
---|
| 2055 | + } |
---|
| 2056 | + } |
---|
| 2057 | + |
---|
| 2058 | + if (!is_frag) { |
---|
| 2059 | + skb_pull(skb, pull_len); |
---|
| 2060 | + skb_push(skb, 14); |
---|
| 2061 | + memcpy(skb->data, ra, MAC_ADDR_LEN); |
---|
| 2062 | + memcpy(&skb->data[6], ta, MAC_ADDR_LEN); |
---|
| 2063 | + memcpy(&skb->data[12], ether_type, 2); |
---|
| 2064 | + } |
---|
2233 | 2065 | } |
---|
2234 | 2066 | |
---|
2235 | 2067 | if (hw_rxhdr->flags_is_80211_mpdu) { |
---|
.. | .. |
---|
2273 | 2105 | |
---|
2274 | 2106 | if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) { |
---|
2275 | 2107 | if (is_qos && hw_rxhdr->flags_need_reord) |
---|
2276 | | - reord_process_unit((struct aicwf_rx_priv *)rx_priv, skb, seq_num, tid); |
---|
| 2108 | + reord_process_unit((struct aicwf_rx_priv *)rx_priv, skb, seq_num, tid, 1); |
---|
2277 | 2109 | else if (is_qos && !hw_rxhdr->flags_need_reord) { |
---|
2278 | 2110 | reord_flush_tid((struct aicwf_rx_priv *)rx_priv, skb, tid); |
---|
2279 | 2111 | if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, skb, hw_rxhdr)) |
---|
.. | .. |
---|
2309 | 2141 | |
---|
2310 | 2142 | if (forward) { |
---|
2311 | 2143 | if (is_qos && hw_rxhdr->flags_need_reord) |
---|
2312 | | - reord_process_unit((struct aicwf_rx_priv *)rx_priv, skb, seq_num, tid); |
---|
| 2144 | + reord_process_unit((struct aicwf_rx_priv *)rx_priv, skb, seq_num, tid, 1); |
---|
2313 | 2145 | else if (is_qos && !hw_rxhdr->flags_need_reord) { |
---|
2314 | 2146 | reord_flush_tid((struct aicwf_rx_priv *)rx_priv, skb, tid); |
---|
2315 | 2147 | rwnx_rx_data_skb_forward(rwnx_hw, rwnx_vif, skb, hw_rxhdr); |
---|
2316 | 2148 | } else |
---|
2317 | 2149 | rwnx_rx_data_skb_forward(rwnx_hw, rwnx_vif, skb, hw_rxhdr); |
---|
| 2150 | + } else if (resend) { |
---|
| 2151 | + if (is_qos && hw_rxhdr->flags_need_reord) |
---|
| 2152 | + reord_process_unit((struct aicwf_rx_priv *)rx_priv, skb, seq_num, tid, 0); |
---|
| 2153 | + else if (is_qos && !hw_rxhdr->flags_need_reord) { |
---|
| 2154 | + reord_flush_tid((struct aicwf_rx_priv *)rx_priv, skb, tid); |
---|
| 2155 | + dev_kfree_skb(skb); |
---|
| 2156 | + } |
---|
2318 | 2157 | } else |
---|
2319 | 2158 | dev_kfree_skb(skb); |
---|
2320 | 2159 | #else |
---|
.. | .. |
---|
2330 | 2169 | } |
---|
2331 | 2170 | |
---|
2332 | 2171 | end: |
---|
2333 | | - REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_RWNXDATAIND); |
---|
2334 | 2172 | return 0; |
---|
2335 | 2173 | } |
---|
2336 | 2174 | |
---|