hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
....@@ -8,7 +8,7 @@
88 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
99 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
1010 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11
- * Copyright(c) 2018 Intel Corporation
11
+ * Copyright(c) 2018 - 2020 Intel Corporation
1212 *
1313 * This program is free software; you can redistribute it and/or modify
1414 * it under the terms of version 2 of the GNU General Public License as
....@@ -23,7 +23,7 @@
2323 * in the file called COPYING.
2424 *
2525 * Contact Information:
26
- * Intel Linux Wireless <ilw@linux.intel.com>
26
+ * Intel Linux Wireless <linuxwifi@intel.com>
2727 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
2828 *
2929 * BSD LICENSE
....@@ -31,7 +31,7 @@
3131 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
3232 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
3333 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
34
- * Copyright(c) 2018 Intel Corporation
34
+ * Copyright(c) 2018 - 2020 Intel Corporation
3535 * All rights reserved.
3636 *
3737 * Redistribution and use in source and binary forms, with or without
....@@ -66,11 +66,37 @@
6666 #include "mvm.h"
6767 #include "fw-api.h"
6868
69
+static void *iwl_mvm_skb_get_hdr(struct sk_buff *skb)
70
+{
71
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
72
+ u8 *data = skb->data;
73
+
74
+ /* Alignment concerns */
75
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
76
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
77
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) % 4);
78
+ BUILD_BUG_ON(sizeof(struct ieee80211_vendor_radiotap) % 4);
79
+
80
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
81
+ data += sizeof(struct ieee80211_radiotap_he);
82
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
83
+ data += sizeof(struct ieee80211_radiotap_he_mu);
84
+ if (rx_status->flag & RX_FLAG_RADIOTAP_LSIG)
85
+ data += sizeof(struct ieee80211_radiotap_lsig);
86
+ if (rx_status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
87
+ struct ieee80211_vendor_radiotap *radiotap = (void *)data;
88
+
89
+ data += sizeof(*radiotap) + radiotap->len + radiotap->pad;
90
+ }
91
+
92
+ return data;
93
+}
94
+
6995 static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
7096 int queue, struct ieee80211_sta *sta)
7197 {
7298 struct iwl_mvm_sta *mvmsta;
73
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
99
+ struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb);
74100 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
75101 struct iwl_mvm_key_pn *ptk_pn;
76102 int res;
....@@ -222,6 +248,31 @@
222248 skb_put_data(skb, hdr, hdrlen);
223249 skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
224250
251
+ /*
252
+ * If we did CHECKSUM_COMPLETE, the hardware only does it right for
253
+ * certain cases and starts the checksum after the SNAP. Check if
254
+ * this is the case - it's easier to just bail out to CHECKSUM_NONE
255
+ * in the cases the hardware didn't handle, since it's rare to see
256
+ * such packets, even though the hardware did calculate the checksum
257
+ * in this case, just starting after the MAC header instead.
258
+ */
259
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
260
+ struct {
261
+ u8 hdr[6];
262
+ __be16 type;
263
+ } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len);
264
+
265
+ if (unlikely(headlen - hdrlen < sizeof(*shdr) ||
266
+ !ether_addr_equal(shdr->hdr, rfc1042_header) ||
267
+ (shdr->type != htons(ETH_P_IP) &&
268
+ shdr->type != htons(ETH_P_ARP) &&
269
+ shdr->type != htons(ETH_P_IPV6) &&
270
+ shdr->type != htons(ETH_P_8021Q) &&
271
+ shdr->type != htons(ETH_P_PAE) &&
272
+ shdr->type != htons(ETH_P_TDLS))))
273
+ skb->ip_summed = CHECKSUM_NONE;
274
+ }
275
+
225276 fraglen = len - headlen;
226277
227278 if (fraglen) {
....@@ -235,26 +286,50 @@
235286 return 0;
236287 }
237288
289
+static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
290
+ struct sk_buff *skb)
291
+{
292
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
293
+ struct ieee80211_vendor_radiotap *radiotap;
294
+ const int size = sizeof(*radiotap) + sizeof(__le16);
295
+
296
+ if (!mvm->cur_aid)
297
+ return;
298
+
299
+ /* ensure alignment */
300
+ BUILD_BUG_ON((size + 2) % 4);
301
+
302
+ radiotap = skb_put(skb, size + 2);
303
+ radiotap->align = 1;
304
+ /* Intel OUI */
305
+ radiotap->oui[0] = 0xf6;
306
+ radiotap->oui[1] = 0x54;
307
+ radiotap->oui[2] = 0x25;
308
+ /* radiotap sniffer config sub-namespace */
309
+ radiotap->subns = 1;
310
+ radiotap->present = 0x1;
311
+ radiotap->len = size - sizeof(*radiotap);
312
+ radiotap->pad = 2;
313
+
314
+ /* fill the data now */
315
+ memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid));
316
+ /* and clear the padding */
317
+ memset(radiotap->data + sizeof(__le16), 0, radiotap->pad);
318
+
319
+ rx_status->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA;
320
+}
321
+
238322 /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
239323 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
240324 struct napi_struct *napi,
241325 struct sk_buff *skb, int queue,
242
- struct ieee80211_sta *sta)
326
+ struct ieee80211_sta *sta,
327
+ bool csi)
243328 {
244
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
245
-
246
- if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
329
+ if (iwl_mvm_check_pn(mvm, skb, queue, sta))
247330 kfree_skb(skb);
248
- } else {
249
- unsigned int radiotap_len = 0;
250
-
251
- if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
252
- radiotap_len += sizeof(struct ieee80211_radiotap_he);
253
- if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
254
- radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
255
- __skb_push(skb, radiotap_len);
331
+ else
256332 ieee80211_rx_napi(mvm->hw, sta, skb, napi);
257
- }
258333 }
259334
260335 static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
....@@ -285,7 +360,7 @@
285360 struct iwl_rx_mpdu_desc *desc,
286361 u32 pkt_flags, int queue, u8 *crypt_len)
287362 {
288
- u16 status = le16_to_cpu(desc->status);
363
+ u32 status = le32_to_cpu(desc->status);
289364
290365 /*
291366 * Drop UNKNOWN frames in aggregation, unless in monitor mode
....@@ -326,8 +401,12 @@
326401 !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
327402 return 0;
328403
404
+ if (mvm->trans->trans_cfg->gen2 &&
405
+ !(status & RX_MPDU_RES_STATUS_MIC_OK))
406
+ stats->flag |= RX_FLAG_MMIC_ERROR;
407
+
329408 *crypt_len = IEEE80211_TKIP_IV_LEN;
330
- /* fall through if TTAK OK */
409
+ /* fall through */
331410 case IWL_RX_MPDU_STATUS_SEC_WEP:
332411 if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
333412 return -1;
....@@ -337,8 +416,11 @@
337416 IWL_RX_MPDU_STATUS_SEC_WEP)
338417 *crypt_len = IEEE80211_WEP_IV_LEN;
339418
340
- if (pkt_flags & FH_RSCSR_RADA_EN)
419
+ if (pkt_flags & FH_RSCSR_RADA_EN) {
341420 stats->flag |= RX_FLAG_ICV_STRIPPED;
421
+ if (mvm->trans->trans_cfg->gen2)
422
+ stats->flag |= RX_FLAG_MMIC_STRIPPED;
423
+ }
342424
343425 return 0;
344426 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
....@@ -347,30 +429,52 @@
347429 stats->flag |= RX_FLAG_DECRYPTED;
348430 return 0;
349431 default:
350
- /* Expected in monitor (not having the keys) */
351
- if (!mvm->monitor_on)
432
+ /*
433
+ * Sometimes we can get frames that were not decrypted
434
+ * because the firmware didn't have the keys yet. This can
435
+ * happen after connection where we can get multicast frames
436
+ * before the GTK is installed.
437
+ * Silently drop those frames.
438
+ * Also drop un-decrypted frames in monitor mode.
439
+ */
440
+ if (!is_multicast_ether_addr(hdr->addr1) &&
441
+ !mvm->monitor_on && net_ratelimit())
352442 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
353443 }
354444
355445 return 0;
356446 }
357447
358
-static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
448
+static void iwl_mvm_rx_csum(struct iwl_mvm *mvm,
449
+ struct ieee80211_sta *sta,
359450 struct sk_buff *skb,
360
- struct iwl_rx_mpdu_desc *desc)
451
+ struct iwl_rx_packet *pkt)
361452 {
362
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
363
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
364
- u16 flags = le16_to_cpu(desc->l3l4_flags);
365
- u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
366
- IWL_RX_L3_PROTO_POS);
453
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
367454
368
- if (mvmvif->features & NETIF_F_RXCSUM &&
369
- flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
370
- (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
371
- l3_prot == IWL_RX_L3_TYPE_IPV6 ||
372
- l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
373
- skb->ip_summed = CHECKSUM_UNNECESSARY;
455
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
456
+ if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
457
+ u16 hwsum = be16_to_cpu(desc->v3.raw_xsum);
458
+
459
+ skb->ip_summed = CHECKSUM_COMPLETE;
460
+ skb->csum = csum_unfold(~(__force __sum16)hwsum);
461
+ }
462
+ } else {
463
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
464
+ struct iwl_mvm_vif *mvmvif;
465
+ u16 flags = le16_to_cpu(desc->l3l4_flags);
466
+ u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
467
+ IWL_RX_L3_PROTO_POS);
468
+
469
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
470
+
471
+ if (mvmvif->features & NETIF_F_RXCSUM &&
472
+ flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
473
+ (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
474
+ l3_prot == IWL_RX_L3_TYPE_IPV6 ||
475
+ l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
476
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
477
+ }
374478 }
375479
376480 /*
....@@ -433,19 +537,21 @@
433537 }
434538
435539 int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
436
- const u8 *data, u32 count)
540
+ const u8 *data, u32 count, bool async)
437541 {
438
- struct iwl_rxq_sync_cmd *cmd;
542
+ u8 buf[sizeof(struct iwl_rxq_sync_cmd) +
543
+ sizeof(struct iwl_mvm_rss_sync_notif)];
544
+ struct iwl_rxq_sync_cmd *cmd = (void *)buf;
439545 u32 data_size = sizeof(*cmd) + count;
440546 int ret;
441547
442
- /* should be DWORD aligned */
443
- if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
548
+ /*
549
+ * size must be a multiple of DWORD
550
+ * Ensure we don't overflow buf
551
+ */
552
+ if (WARN_ON(count & 3 ||
553
+ count > sizeof(struct iwl_mvm_rss_sync_notif)))
444554 return -EINVAL;
445
-
446
- cmd = kzalloc(data_size, GFP_KERNEL);
447
- if (!cmd)
448
- return -ENOMEM;
449555
450556 cmd->rxq_mask = cpu_to_le32(rxq_mask);
451557 cmd->count = cpu_to_le32(count);
....@@ -455,9 +561,8 @@
455561 ret = iwl_mvm_send_cmd_pdu(mvm,
456562 WIDE_ID(DATA_PATH_GROUP,
457563 TRIGGER_RX_QUEUES_NOTIF_CMD),
458
- 0, data_size, cmd);
564
+ async ? CMD_ASYNC : 0, data_size, cmd);
459565
460
- kfree(cmd);
461566 return ret;
462567 }
463568
....@@ -473,14 +578,34 @@
473578 !ieee80211_sn_less(sn1, sn2 - buffer_size);
474579 }
475580
581
+static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
582
+{
583
+ if (IWL_MVM_USE_NSSN_SYNC) {
584
+ struct iwl_mvm_rss_sync_notif notif = {
585
+ .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
586
+ .metadata.sync = 0,
587
+ .nssn_sync.baid = baid,
588
+ .nssn_sync.nssn = nssn,
589
+ };
590
+
591
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif,
592
+ sizeof(notif));
593
+ }
594
+}
595
+
476596 #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
597
+
598
+enum iwl_mvm_release_flags {
599
+ IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0),
600
+ IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1),
601
+};
477602
478603 static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
479604 struct ieee80211_sta *sta,
480605 struct napi_struct *napi,
481606 struct iwl_mvm_baid_data *baid_data,
482607 struct iwl_mvm_reorder_buffer *reorder_buf,
483
- u16 nssn)
608
+ u16 nssn, u32 flags)
484609 {
485610 struct iwl_mvm_reorder_buf_entry *entries =
486611 &baid_data->entries[reorder_buf->queue *
....@@ -488,6 +613,18 @@
488613 u16 ssn = reorder_buf->head_sn;
489614
490615 lockdep_assert_held(&reorder_buf->lock);
616
+
617
+ /*
618
+ * We keep the NSSN not too far behind, if we are sync'ing it and it
619
+ * is more than 2048 ahead of us, it must be behind us. Discard it.
620
+ * This can happen if the queue that hit the 0 / 2048 seqno was lagging
621
+ * behind and this queue already processed packets. The next if
622
+ * would have caught cases where this queue would have processed less
623
+ * than 64 packets, but it may have processed more than 64 packets.
624
+ */
625
+ if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) &&
626
+ ieee80211_sn_less(nssn, ssn))
627
+ goto set_timer;
491628
492629 /* ignore nssn smaller than head sn - this can happen due to timeout */
493630 if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
....@@ -499,6 +636,9 @@
499636 struct sk_buff *skb;
500637
501638 ssn = ieee80211_sn_inc(ssn);
639
+ if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) &&
640
+ (ssn == 2048 || ssn == 0))
641
+ iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn);
502642
503643 /*
504644 * Empty the list. Will have more than one frame for A-MSDU.
....@@ -508,7 +648,7 @@
508648 while ((skb = __skb_dequeue(skb_list))) {
509649 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
510650 reorder_buf->queue,
511
- sta);
651
+ sta, false);
512652 reorder_buf->num_stored--;
513653 }
514654 }
....@@ -585,7 +725,8 @@
585725 sta_id, sn);
586726 iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
587727 sta, baid_data->tid);
588
- iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
728
+ iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data,
729
+ buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
589730 rcu_read_unlock();
590731 } else {
591732 /*
....@@ -627,7 +768,8 @@
627768 spin_lock_bh(&reorder_buf->lock);
628769 iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
629770 ieee80211_sn_add(reorder_buf->head_sn,
630
- reorder_buf->buf_size));
771
+ reorder_buf->buf_size),
772
+ 0);
631773 spin_unlock_bh(&reorder_buf->lock);
632774 del_timer_sync(&reorder_buf->reorder_timer);
633775
....@@ -635,8 +777,54 @@
635777 rcu_read_unlock();
636778 }
637779
638
-void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
639
- int queue)
780
+static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
781
+ struct napi_struct *napi,
782
+ u8 baid, u16 nssn, int queue,
783
+ u32 flags)
784
+{
785
+ struct ieee80211_sta *sta;
786
+ struct iwl_mvm_reorder_buffer *reorder_buf;
787
+ struct iwl_mvm_baid_data *ba_data;
788
+
789
+ IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
790
+ baid, nssn);
791
+
792
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
793
+ baid >= ARRAY_SIZE(mvm->baid_map)))
794
+ return;
795
+
796
+ rcu_read_lock();
797
+
798
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
799
+ if (WARN_ON_ONCE(!ba_data))
800
+ goto out;
801
+
802
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
803
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
804
+ goto out;
805
+
806
+ reorder_buf = &ba_data->reorder_buf[queue];
807
+
808
+ spin_lock_bh(&reorder_buf->lock);
809
+ iwl_mvm_release_frames(mvm, sta, napi, ba_data,
810
+ reorder_buf, nssn, flags);
811
+ spin_unlock_bh(&reorder_buf->lock);
812
+
813
+out:
814
+ rcu_read_unlock();
815
+}
816
+
817
+static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm,
818
+ struct napi_struct *napi, int queue,
819
+ const struct iwl_mvm_nssn_sync_data *data)
820
+{
821
+ iwl_mvm_release_frames_from_notif(mvm, napi, data->baid,
822
+ data->nssn, queue,
823
+ IWL_MVM_RELEASE_FROM_RSS_SYNC);
824
+}
825
+
826
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
827
+ struct iwl_rx_cmd_buffer *rxb, int queue)
640828 {
641829 struct iwl_rx_packet *pkt = rxb_addr(rxb);
642830 struct iwl_rxq_sync_notification *notif;
....@@ -657,6 +845,10 @@
657845 case IWL_MVM_RXQ_NOTIF_DEL_BA:
658846 iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
659847 break;
848
+ case IWL_MVM_RXQ_NSSN_SYNC:
849
+ iwl_mvm_nssn_sync(mvm, napi, queue,
850
+ (void *)internal_notif->data);
851
+ break;
660852 default:
661853 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
662854 }
....@@ -664,6 +856,55 @@
664856 if (internal_notif->sync &&
665857 !atomic_dec_return(&mvm->queue_sync_counter))
666858 wake_up(&mvm->rx_sync_waitq);
859
+}
860
+
861
+static void iwl_mvm_oldsn_workaround(struct iwl_mvm *mvm,
862
+ struct ieee80211_sta *sta, int tid,
863
+ struct iwl_mvm_reorder_buffer *buffer,
864
+ u32 reorder, u32 gp2, int queue)
865
+{
866
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
867
+
868
+ if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
869
+ /* we have a new (A-)MPDU ... */
870
+
871
+ /*
872
+ * reset counter to 0 if we didn't have any oldsn in
873
+ * the last A-MPDU (as detected by GP2 being identical)
874
+ */
875
+ if (!buffer->consec_oldsn_prev_drop)
876
+ buffer->consec_oldsn_drops = 0;
877
+
878
+ /* either way, update our tracking state */
879
+ buffer->consec_oldsn_ampdu_gp2 = gp2;
880
+ } else if (buffer->consec_oldsn_prev_drop) {
881
+ /*
882
+ * tracking state didn't change, and we had an old SN
883
+ * indication before - do nothing in this case, we
884
+ * already noted this one down and are waiting for the
885
+ * next A-MPDU (by GP2)
886
+ */
887
+ return;
888
+ }
889
+
890
+ /* return unless this MPDU has old SN */
891
+ if (!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN))
892
+ return;
893
+
894
+ /* update state */
895
+ buffer->consec_oldsn_prev_drop = 1;
896
+ buffer->consec_oldsn_drops++;
897
+
898
+ /* if limit is reached, send del BA and reset state */
899
+ if (buffer->consec_oldsn_drops == IWL_MVM_AMPDU_CONSEC_DROPS_DELBA) {
900
+ IWL_WARN(mvm,
901
+ "reached %d old SN frames from %pM on queue %d, stopping BA session on TID %d\n",
902
+ IWL_MVM_AMPDU_CONSEC_DROPS_DELBA,
903
+ sta->addr, queue, tid);
904
+ ieee80211_stop_rx_ba_session(mvmsta->vif, BIT(tid), sta->addr);
905
+ buffer->consec_oldsn_prev_drop = 0;
906
+ buffer->consec_oldsn_drops = 0;
907
+ }
667908 }
668909
669910 /*
....@@ -677,7 +918,8 @@
677918 struct sk_buff *skb,
678919 struct iwl_rx_mpdu_desc *desc)
679920 {
680
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
921
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
922
+ struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb);
681923 struct iwl_mvm_sta *mvm_sta;
682924 struct iwl_mvm_baid_data *baid_data;
683925 struct iwl_mvm_reorder_buffer *buffer;
....@@ -701,6 +943,8 @@
701943 * This also covers the case of receiving a Block Ack Request
702944 * outside a BA session; we'll pass it to mac80211 and that
703945 * then sends a delBA action frame.
946
+ * This also covers pure monitor mode, in which case we won't
947
+ * have any BA sessions.
704948 */
705949 if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
706950 return false;
....@@ -753,7 +997,8 @@
753997 }
754998
755999 if (ieee80211_is_back_req(hdr->frame_control)) {
756
- iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
1000
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data,
1001
+ buffer, nssn, 0);
7571002 goto drop;
7581003 }
7591004
....@@ -762,7 +1007,10 @@
7621007 * If the SN is smaller than the NSSN it might need to first go into
7631008 * the reorder buffer, in which case we just release up to it and the
7641009 * rest of the function will take care of storing it and releasing up to
765
- * the nssn
1010
+ * the nssn.
1011
+ * This should not happen. This queue has been lagging and it should
1012
+ * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice
1013
+ * and update the other queues.
7661014 */
7671015 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
7681016 buffer->buf_size) ||
....@@ -770,8 +1018,11 @@
7701018 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
7711019
7721020 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
773
- min_sn);
1021
+ min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
7741022 }
1023
+
1024
+ iwl_mvm_oldsn_workaround(mvm, sta, tid, buffer, reorder,
1025
+ rx_status->device_timestamp, queue);
7751026
7761027 /* drop any oudated packets */
7771028 if (ieee80211_sn_less(sn, buffer->head_sn))
....@@ -781,8 +1032,23 @@
7811032 if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
7821033 if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
7831034 buffer->buf_size) &&
784
- (!amsdu || last_subframe))
1035
+ (!amsdu || last_subframe)) {
1036
+ /*
1037
+ * If we crossed the 2048 or 0 SN, notify all the
1038
+ * queues. This is done in order to avoid having a
1039
+ * head_sn that lags behind for too long. When that
1040
+ * happens, we can get to a situation where the head_sn
1041
+ * is within the interval [nssn - buf_size : nssn]
1042
+ * which will make us think that the nssn is a packet
1043
+ * that we already freed because of the reordering
1044
+ * buffer and we will ignore it. So maintain the
1045
+ * head_sn somewhat updated across all the queues:
1046
+ * when it crosses 0 and 2048.
1047
+ */
1048
+ if (sn == 2048 || sn == 0)
1049
+ iwl_mvm_sync_nssn(mvm, baid, sn);
7851050 buffer->head_sn = nssn;
1051
+ }
7861052 /* No need to update AMSDU last SN - we are moving the head */
7871053 spin_unlock_bh(&buffer->lock);
7881054 return false;
....@@ -797,8 +1063,11 @@
7971063 * while technically there is no hole and we can move forward.
7981064 */
7991065 if (!buffer->num_stored && sn == buffer->head_sn) {
800
- if (!amsdu || last_subframe)
1066
+ if (!amsdu || last_subframe) {
1067
+ if (sn == 2048 || sn == 0)
1068
+ iwl_mvm_sync_nssn(mvm, baid, sn);
8011069 buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
1070
+ }
8021071 /* No need to update AMSDU last SN - we are moving the head */
8031072 spin_unlock_bh(&buffer->lock);
8041073 return false;
....@@ -843,7 +1112,9 @@
8431112 * release notification with up to date NSSN.
8441113 */
8451114 if (!amsdu || last_subframe)
846
- iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
1115
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data,
1116
+ buffer, nssn,
1117
+ IWL_MVM_RELEASE_SEND_RSS_SYNC);
8471118
8481119 spin_unlock_bh(&buffer->lock);
8491120 return true;
....@@ -899,6 +1170,460 @@
8991170 ether_addr_copy(addr, mac_addr);
9001171 }
9011172
1173
+struct iwl_mvm_rx_phy_data {
1174
+ enum iwl_rx_phy_info_type info_type;
1175
+ __le32 d0, d1, d2, d3;
1176
+ __le16 d4;
1177
+};
1178
+
1179
+static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
1180
+ struct iwl_mvm_rx_phy_data *phy_data,
1181
+ u32 rate_n_flags,
1182
+ struct ieee80211_radiotap_he_mu *he_mu)
1183
+{
1184
+ u32 phy_data2 = le32_to_cpu(phy_data->d2);
1185
+ u32 phy_data3 = le32_to_cpu(phy_data->d3);
1186
+ u16 phy_data4 = le16_to_cpu(phy_data->d4);
1187
+
1188
+ if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) {
1189
+ he_mu->flags1 |=
1190
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
1191
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
1192
+
1193
+ he_mu->flags1 |=
1194
+ le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU,
1195
+ phy_data4),
1196
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
1197
+
1198
+ he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0,
1199
+ phy_data2);
1200
+ he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1,
1201
+ phy_data3);
1202
+ he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2,
1203
+ phy_data2);
1204
+ he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3,
1205
+ phy_data3);
1206
+ }
1207
+
1208
+ if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) &&
1209
+ (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) {
1210
+ he_mu->flags1 |=
1211
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
1212
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
1213
+
1214
+ he_mu->flags2 |=
1215
+ le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU,
1216
+ phy_data4),
1217
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
1218
+
1219
+ he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0,
1220
+ phy_data2);
1221
+ he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1,
1222
+ phy_data3);
1223
+ he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2,
1224
+ phy_data2);
1225
+ he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3,
1226
+ phy_data3);
1227
+ }
1228
+}
1229
+
1230
+static void
1231
+iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
1232
+ u32 rate_n_flags,
1233
+ struct ieee80211_radiotap_he *he,
1234
+ struct ieee80211_radiotap_he_mu *he_mu,
1235
+ struct ieee80211_rx_status *rx_status)
1236
+{
1237
+ /*
1238
+ * Unfortunately, we have to leave the mac80211 data
1239
+ * incorrect for the case that we receive an HE-MU
1240
+ * transmission and *don't* have the HE phy data (due
1241
+ * to the bits being used for TSF). This shouldn't
1242
+ * happen though as management frames where we need
1243
+ * the TSF/timers are not be transmitted in HE-MU.
1244
+ */
1245
+ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
1246
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
1247
+ u8 offs = 0;
1248
+
1249
+ rx_status->bw = RATE_INFO_BW_HE_RU;
1250
+
1251
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
1252
+
1253
+ switch (ru) {
1254
+ case 0 ... 36:
1255
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
1256
+ offs = ru;
1257
+ break;
1258
+ case 37 ... 52:
1259
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
1260
+ offs = ru - 37;
1261
+ break;
1262
+ case 53 ... 60:
1263
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1264
+ offs = ru - 53;
1265
+ break;
1266
+ case 61 ... 64:
1267
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
1268
+ offs = ru - 61;
1269
+ break;
1270
+ case 65 ... 66:
1271
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
1272
+ offs = ru - 65;
1273
+ break;
1274
+ case 67:
1275
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
1276
+ break;
1277
+ case 68:
1278
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
1279
+ break;
1280
+ }
1281
+ he->data2 |= le16_encode_bits(offs,
1282
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
1283
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
1284
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
1285
+ if (phy_data->d1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80))
1286
+ he->data2 |=
1287
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
1288
+
1289
+#define CHECK_BW(bw) \
1290
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
1291
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \
1292
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \
1293
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
1294
+ CHECK_BW(20);
1295
+ CHECK_BW(40);
1296
+ CHECK_BW(80);
1297
+ CHECK_BW(160);
1298
+
1299
+ if (he_mu)
1300
+ he_mu->flags2 |=
1301
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
1302
+ rate_n_flags),
1303
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
1304
+ else if (he_type == RATE_MCS_HE_TYPE_TRIG)
1305
+ he->data6 |=
1306
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
1307
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
1308
+ rate_n_flags),
1309
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
1310
+}
1311
+
1312
+static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
1313
+ struct iwl_mvm_rx_phy_data *phy_data,
1314
+ struct ieee80211_radiotap_he *he,
1315
+ struct ieee80211_radiotap_he_mu *he_mu,
1316
+ struct ieee80211_rx_status *rx_status,
1317
+ u32 rate_n_flags, int queue)
1318
+{
1319
+ switch (phy_data->info_type) {
1320
+ case IWL_RX_PHY_INFO_TYPE_NONE:
1321
+ case IWL_RX_PHY_INFO_TYPE_CCK:
1322
+ case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY:
1323
+ case IWL_RX_PHY_INFO_TYPE_HT:
1324
+ case IWL_RX_PHY_INFO_TYPE_VHT_SU:
1325
+ case IWL_RX_PHY_INFO_TYPE_VHT_MU:
1326
+ return;
1327
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
1328
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
1329
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN |
1330
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN |
1331
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN);
1332
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
1333
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1),
1334
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1);
1335
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
1336
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2),
1337
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2);
1338
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
1339
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3),
1340
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3);
1341
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
1342
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4),
1343
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4);
1344
+ /* fall through */
1345
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
1346
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
1347
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
1348
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
1349
+ /* HE common */
1350
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
1351
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
1352
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
1353
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
1354
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
1355
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
1356
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
1357
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1358
+ IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK),
1359
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
1360
+ if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB &&
1361
+ phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) {
1362
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
1363
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1364
+ IWL_RX_PHY_DATA0_HE_UPLINK),
1365
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
1366
+ }
1367
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1368
+ IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM),
1369
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
1370
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1371
+ IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK),
1372
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
1373
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1374
+ IWL_RX_PHY_DATA0_HE_PE_DISAMBIG),
1375
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
1376
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d1,
1377
+ IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK),
1378
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
1379
+ he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1380
+ IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK),
1381
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP);
1382
+ he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1383
+ IWL_RX_PHY_DATA0_HE_DOPPLER),
1384
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
1385
+ break;
1386
+ }
1387
+
1388
+ switch (phy_data->info_type) {
1389
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
1390
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
1391
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
1392
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN);
1393
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1394
+ IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
1395
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
1396
+ break;
1397
+ default:
1398
+ /* nothing here */
1399
+ break;
1400
+ }
1401
+
1402
+ switch (phy_data->info_type) {
1403
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
1404
+ he_mu->flags1 |=
1405
+ le16_encode_bits(le16_get_bits(phy_data->d4,
1406
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM),
1407
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
1408
+ he_mu->flags1 |=
1409
+ le16_encode_bits(le16_get_bits(phy_data->d4,
1410
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK),
1411
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
1412
+ he_mu->flags2 |=
1413
+ le16_encode_bits(le16_get_bits(phy_data->d4,
1414
+ IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
1415
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
1416
+ iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu);
1417
+ /* fall through */
1418
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
1419
+ he_mu->flags2 |=
1420
+ le16_encode_bits(le32_get_bits(phy_data->d1,
1421
+ IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK),
1422
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
1423
+ he_mu->flags2 |=
1424
+ le16_encode_bits(le32_get_bits(phy_data->d1,
1425
+ IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION),
1426
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
1427
+ /* fall through */
1428
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
1429
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
1430
+ iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags,
1431
+ he, he_mu, rx_status);
1432
+ break;
1433
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
1434
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
1435
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1436
+ IWL_RX_PHY_DATA0_HE_BEAM_CHNG),
1437
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
1438
+ break;
1439
+ default:
1440
+ /* nothing */
1441
+ break;
1442
+ }
1443
+}
1444
+
1445
+static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
1446
+ struct iwl_mvm_rx_phy_data *phy_data,
1447
+ u32 rate_n_flags, u16 phy_info, int queue)
1448
+{
1449
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
1450
+ struct ieee80211_radiotap_he *he = NULL;
1451
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
1452
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
1453
+ u8 stbc, ltf;
1454
+ static const struct ieee80211_radiotap_he known = {
1455
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
1456
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
1457
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
1458
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
1459
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
1460
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
1461
+ };
1462
+ static const struct ieee80211_radiotap_he_mu mu_known = {
1463
+ .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
1464
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
1465
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
1466
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
1467
+ .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
1468
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
1469
+ };
1470
+
1471
+ he = skb_put_data(skb, &known, sizeof(known));
1472
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
1473
+
1474
+ if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU ||
1475
+ phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) {
1476
+ he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
1477
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
1478
+ }
1479
+
1480
+ /* report the AMPDU-EOF bit on single frames */
1481
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1482
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1483
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1484
+ if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
1485
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1486
+ }
1487
+
1488
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
1489
+ iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status,
1490
+ rate_n_flags, queue);
1491
+
1492
+ /* update aggregation data for monitor sake on default queue */
1493
+ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
1494
+ (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1495
+ bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
1496
+
1497
+ /* toggle is switched whenever new aggregation starts */
1498
+ if (toggle_bit != mvm->ampdu_toggle) {
1499
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1500
+ if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
1501
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1502
+ }
1503
+ }
1504
+
1505
+ if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
1506
+ rate_n_flags & RATE_MCS_HE_106T_MSK) {
1507
+ rx_status->bw = RATE_INFO_BW_HE_RU;
1508
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1509
+ }
1510
+
1511
+ /* actually data is filled in mac80211 */
1512
+ if (he_type == RATE_MCS_HE_TYPE_SU ||
1513
+ he_type == RATE_MCS_HE_TYPE_EXT_SU)
1514
+ he->data1 |=
1515
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
1516
+
1517
+ stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
1518
+ rx_status->nss =
1519
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1520
+ RATE_VHT_MCS_NSS_POS) + 1;
1521
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
1522
+ rx_status->encoding = RX_ENC_HE;
1523
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1524
+ if (rate_n_flags & RATE_MCS_BF_MSK)
1525
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
1526
+
1527
+ rx_status->he_dcm =
1528
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
1529
+
1530
+#define CHECK_TYPE(F) \
1531
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
1532
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
1533
+
1534
+ CHECK_TYPE(SU);
1535
+ CHECK_TYPE(EXT_SU);
1536
+ CHECK_TYPE(MU);
1537
+ CHECK_TYPE(TRIG);
1538
+
1539
+ he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
1540
+
1541
+ if (rate_n_flags & RATE_MCS_BF_MSK)
1542
+ he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
1543
+
1544
+ switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
1545
+ RATE_MCS_HE_GI_LTF_POS) {
1546
+ case 0:
1547
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
1548
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
1549
+ else
1550
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1551
+ if (he_type == RATE_MCS_HE_TYPE_MU)
1552
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1553
+ else
1554
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
1555
+ break;
1556
+ case 1:
1557
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
1558
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
1559
+ else
1560
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1561
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
1562
+ break;
1563
+ case 2:
1564
+ if (he_type == RATE_MCS_HE_TYPE_TRIG) {
1565
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
1566
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1567
+ } else {
1568
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
1569
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
1570
+ }
1571
+ break;
1572
+ case 3:
1573
+ if ((he_type == RATE_MCS_HE_TYPE_SU ||
1574
+ he_type == RATE_MCS_HE_TYPE_EXT_SU) &&
1575
+ rate_n_flags & RATE_MCS_SGI_MSK)
1576
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1577
+ else
1578
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
1579
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1580
+ break;
1581
+ }
1582
+
1583
+ he->data5 |= le16_encode_bits(ltf,
1584
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
1585
+}
1586
+
1587
+static void iwl_mvm_decode_lsig(struct sk_buff *skb,
1588
+ struct iwl_mvm_rx_phy_data *phy_data)
1589
+{
1590
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
1591
+ struct ieee80211_radiotap_lsig *lsig;
1592
+
1593
+ switch (phy_data->info_type) {
1594
+ case IWL_RX_PHY_INFO_TYPE_HT:
1595
+ case IWL_RX_PHY_INFO_TYPE_VHT_SU:
1596
+ case IWL_RX_PHY_INFO_TYPE_VHT_MU:
1597
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
1598
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
1599
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
1600
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
1601
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
1602
+ lsig = skb_put(skb, sizeof(*lsig));
1603
+ lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN);
1604
+ lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->d1,
1605
+ IWL_RX_PHY_DATA1_LSIG_LEN_MASK),
1606
+ IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH);
1607
+ rx_status->flag |= RX_FLAG_RADIOTAP_LSIG;
1608
+ break;
1609
+ default:
1610
+ break;
1611
+ }
1612
+}
1613
+
1614
+static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
1615
+{
1616
+ switch (phy_band) {
1617
+ case PHY_BAND_24:
1618
+ return NL80211_BAND_2GHZ;
1619
+ case PHY_BAND_5:
1620
+ return NL80211_BAND_5GHZ;
1621
+ default:
1622
+ WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
1623
+ return NL80211_BAND_5GHZ;
1624
+ }
1625
+}
1626
+
9021627 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
9031628 struct iwl_rx_cmd_buffer *rxb, int queue)
9041629 {
....@@ -912,24 +1637,28 @@
9121637 struct ieee80211_sta *sta = NULL;
9131638 struct sk_buff *skb;
9141639 u8 crypt_len = 0, channel, energy_a, energy_b;
915
- struct ieee80211_radiotap_he *he = NULL;
916
- struct ieee80211_radiotap_he_mu *he_mu = NULL;
917
- u32 he_type = 0xffffffff;
918
- /* this is invalid e.g. because puncture type doesn't allow 0b11 */
919
-#define HE_PHY_DATA_INVAL ((u64)-1)
920
- u64 he_phy_data = HE_PHY_DATA_INVAL;
9211640 size_t desc_size;
1641
+ struct iwl_mvm_rx_phy_data phy_data = {
1642
+ .d4 = desc->phy_data4,
1643
+ .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
1644
+ };
1645
+ bool csi = false;
9221646
9231647 if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
9241648 return;
9251649
926
- if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
1650
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
9271651 rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
9281652 channel = desc->v3.channel;
9291653 gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
9301654 energy_a = desc->v3.energy_a;
9311655 energy_b = desc->v3.energy_b;
9321656 desc_size = sizeof(*desc);
1657
+
1658
+ phy_data.d0 = desc->v3.phy_data0;
1659
+ phy_data.d1 = desc->v3.phy_data1;
1660
+ phy_data.d2 = desc->v3.phy_data2;
1661
+ phy_data.d3 = desc->v3.phy_data3;
9331662 } else {
9341663 rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
9351664 channel = desc->v1.channel;
....@@ -937,7 +1666,17 @@
9371666 energy_a = desc->v1.energy_a;
9381667 energy_b = desc->v1.energy_b;
9391668 desc_size = IWL_RX_DESC_SIZE_V1;
1669
+
1670
+ phy_data.d0 = desc->v1.phy_data0;
1671
+ phy_data.d1 = desc->v1.phy_data1;
1672
+ phy_data.d2 = desc->v1.phy_data2;
1673
+ phy_data.d3 = desc->v1.phy_data3;
9401674 }
1675
+
1676
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
1677
+ phy_data.info_type =
1678
+ le32_get_bits(phy_data.d1,
1679
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
9411680
9421681 hdr = (void *)(pkt->data + desc_size);
9431682 /* Dont use dev_alloc_skb(), we'll have enough headroom once
....@@ -961,48 +1700,26 @@
9611700
9621701 rx_status = IEEE80211_SKB_RXCB(skb);
9631702
964
- if (rate_n_flags & RATE_MCS_HE_MSK) {
965
- static const struct ieee80211_radiotap_he known = {
966
- .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
967
- IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
968
- IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
969
- IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
970
- .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
971
- IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
972
- };
973
- static const struct ieee80211_radiotap_he_mu mu_known = {
974
- .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
975
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
976
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
977
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
978
- .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
979
- };
980
- unsigned int radiotap_len = 0;
981
-
982
- he = skb_put_data(skb, &known, sizeof(known));
983
- radiotap_len += sizeof(known);
984
- rx_status->flag |= RX_FLAG_RADIOTAP_HE;
985
-
986
- he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
987
-
988
- if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
989
- if (mvm->trans->cfg->device_family >=
990
- IWL_DEVICE_FAMILY_22560)
991
- he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
992
- else
993
- he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
994
-
995
- if (he_type == RATE_MCS_HE_TYPE_MU) {
996
- he_mu = skb_put_data(skb, &mu_known,
997
- sizeof(mu_known));
998
- radiotap_len += sizeof(mu_known);
999
- rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
1000
- }
1001
- }
1002
-
1003
- /* temporarily hide the radiotap data */
1004
- __skb_pull(skb, radiotap_len);
1703
+ /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
1704
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1705
+ case RATE_MCS_CHAN_WIDTH_20:
1706
+ break;
1707
+ case RATE_MCS_CHAN_WIDTH_40:
1708
+ rx_status->bw = RATE_INFO_BW_40;
1709
+ break;
1710
+ case RATE_MCS_CHAN_WIDTH_80:
1711
+ rx_status->bw = RATE_INFO_BW_80;
1712
+ break;
1713
+ case RATE_MCS_CHAN_WIDTH_160:
1714
+ rx_status->bw = RATE_INFO_BW_160;
1715
+ break;
10051716 }
1717
+
1718
+ if (rate_n_flags & RATE_MCS_HE_MSK)
1719
+ iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
1720
+ phy_info, queue);
1721
+
1722
+ iwl_mvm_decode_lsig(skb, &phy_data);
10061723
10071724 rx_status = IEEE80211_SKB_RXCB(skb);
10081725
....@@ -1017,20 +1734,22 @@
10171734 * Keep packets with CRC errors (and with overrun) for monitor mode
10181735 * (otherwise the firmware discards them) but mark them as bad.
10191736 */
1020
- if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
1021
- !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
1737
+ if (!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
1738
+ !(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
10221739 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
1023
- le16_to_cpu(desc->status));
1740
+ le32_to_cpu(desc->status));
10241741 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
10251742 }
10261743 /* set the preamble flag if appropriate */
1027
- if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
1744
+ if (rate_n_flags & RATE_MCS_CCK_MSK &&
1745
+ phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
10281746 rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
10291747
10301748 if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
10311749 u64 tsf_on_air_rise;
10321750
1033
- if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1751
+ if (mvm->trans->trans_cfg->device_family >=
1752
+ IWL_DEVICE_FAMILY_AX210)
10341753 tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
10351754 else
10361755 tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
....@@ -1038,56 +1757,17 @@
10381757 rx_status->mactime = tsf_on_air_rise;
10391758 /* TSF as indicated by the firmware is at INA time */
10401759 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
1041
- } else if (he_type == RATE_MCS_HE_TYPE_SU) {
1042
- u64 he_phy_data;
1043
-
1044
- if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1045
- he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1046
- else
1047
- he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
1048
-
1049
- he->data1 |=
1050
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
1051
- if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
1052
- he_phy_data))
1053
- he->data3 |=
1054
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
1055
-
1056
- if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1057
- rx_status->ampdu_reference = mvm->ampdu_ref;
1058
- mvm->ampdu_ref++;
1059
-
1060
- rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1061
- rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1062
- if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
1063
- he_phy_data))
1064
- rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1065
- }
1066
- } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
1067
- he_mu->flags1 |=
1068
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
1069
- he_phy_data),
1070
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
1071
- he_mu->flags1 |=
1072
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
1073
- he_phy_data),
1074
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
1075
- he_mu->flags1 |=
1076
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
1077
- he_phy_data),
1078
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
1079
- he_mu->flags2 |=
1080
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
1081
- he_phy_data),
1082
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
1083
- he_mu->flags2 |=
1084
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
1085
- he_phy_data),
1086
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
10871760 }
1761
+
10881762 rx_status->device_timestamp = gp2_on_air_rise;
1089
- rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
1090
- NL80211_BAND_2GHZ;
1763
+ if (iwl_mvm_is_band_in_rx_supported(mvm)) {
1764
+ u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
1765
+
1766
+ rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
1767
+ } else {
1768
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
1769
+ NL80211_BAND_2GHZ;
1770
+ }
10911771 rx_status->freq = ieee80211_channel_to_frequency(channel,
10921772 rx_status->band);
10931773 iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
....@@ -1096,37 +1776,31 @@
10961776 /* update aggregation data for monitor sake on default queue */
10971777 if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
10981778 bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
1099
- u64 he_phy_data;
1100
-
1101
- if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1102
- he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1103
- else
1104
- he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
11051779
11061780 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1107
- /* toggle is switched whenever new aggregation starts */
1781
+ /*
1782
+ * Toggle is switched whenever new aggregation starts. Make
1783
+ * sure ampdu_reference is never 0 so we can later use it to
1784
+ * see if the frame was really part of an A-MPDU or not.
1785
+ */
11081786 if (toggle_bit != mvm->ampdu_toggle) {
11091787 mvm->ampdu_ref++;
1788
+ if (mvm->ampdu_ref == 0)
1789
+ mvm->ampdu_ref++;
11101790 mvm->ampdu_toggle = toggle_bit;
1111
-
1112
- if (he_phy_data != HE_PHY_DATA_INVAL &&
1113
- he_type == RATE_MCS_HE_TYPE_MU) {
1114
- rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1115
- if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
1116
- he_phy_data))
1117
- rx_status->flag |=
1118
- RX_FLAG_AMPDU_EOF_BIT;
1119
- }
11201791 }
11211792 rx_status->ampdu_reference = mvm->ampdu_ref;
11221793 }
11231794
1795
+ if (unlikely(mvm->monitor_on))
1796
+ iwl_mvm_add_rtap_sniffer_config(mvm, skb);
1797
+
11241798 rcu_read_lock();
11251799
1126
- if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
1127
- u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
1800
+ if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
1801
+ u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
11281802
1129
- if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
1803
+ if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) {
11301804 sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
11311805 if (IS_ERR(sta))
11321806 sta = NULL;
....@@ -1146,6 +1820,8 @@
11461820 u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
11471821 IWL_RX_MPDU_REORDER_BAID_MASK) >>
11481822 IWL_RX_MPDU_REORDER_BAID_SHIFT);
1823
+ struct iwl_fw_dbg_trigger_tlv *trig;
1824
+ struct ieee80211_vif *vif = mvmsta->vif;
11491825
11501826 if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
11511827 !is_multicast_ether_addr(hdr->addr1) &&
....@@ -1158,8 +1834,7 @@
11581834 * frames from a blocked station on a new channel we can
11591835 * TX to it again.
11601836 */
1161
- if (unlikely(tx_blocked_vif) &&
1162
- tx_blocked_vif == mvmsta->vif) {
1837
+ if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
11631838 struct iwl_mvm_vif *mvmvif =
11641839 iwl_mvm_vif_from_mac80211(tx_blocked_vif);
11651840
....@@ -1170,29 +1845,24 @@
11701845
11711846 rs_update_last_rssi(mvm, mvmsta, rx_status);
11721847
1173
- if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
1174
- ieee80211_is_beacon(hdr->frame_control)) {
1175
- struct iwl_fw_dbg_trigger_tlv *trig;
1848
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
1849
+ ieee80211_vif_to_wdev(vif),
1850
+ FW_DBG_TRIGGER_RSSI);
1851
+
1852
+ if (trig && ieee80211_is_beacon(hdr->frame_control)) {
11761853 struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
1177
- bool trig_check;
11781854 s32 rssi;
11791855
1180
- trig = iwl_fw_dbg_get_trigger(mvm->fw,
1181
- FW_DBG_TRIGGER_RSSI);
11821856 rssi_trig = (void *)trig->data;
11831857 rssi = le32_to_cpu(rssi_trig->rssi);
11841858
1185
- trig_check =
1186
- iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
1187
- ieee80211_vif_to_wdev(mvmsta->vif),
1188
- trig);
1189
- if (trig_check && rx_status->signal < rssi)
1859
+ if (rx_status->signal < rssi)
11901860 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
11911861 NULL);
11921862 }
11931863
11941864 if (ieee80211_is_data(hdr->frame_control))
1195
- iwl_mvm_rx_csum(sta, skb, desc);
1865
+ iwl_mvm_rx_csum(mvm, sta, skb, pkt);
11961866
11971867 if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
11981868 kfree_skb(skb);
....@@ -1211,7 +1881,7 @@
12111881
12121882 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
12131883
1214
- if (mvm->trans->cfg->device_family ==
1884
+ if (mvm->trans->trans_cfg->device_family ==
12151885 IWL_DEVICE_FAMILY_9000) {
12161886 iwl_mvm_flip_address(hdr->addr3);
12171887
....@@ -1224,84 +1894,6 @@
12241894
12251895 iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
12261896 }
1227
- }
1228
-
1229
- switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1230
- case RATE_MCS_CHAN_WIDTH_20:
1231
- break;
1232
- case RATE_MCS_CHAN_WIDTH_40:
1233
- rx_status->bw = RATE_INFO_BW_40;
1234
- break;
1235
- case RATE_MCS_CHAN_WIDTH_80:
1236
- rx_status->bw = RATE_INFO_BW_80;
1237
- break;
1238
- case RATE_MCS_CHAN_WIDTH_160:
1239
- rx_status->bw = RATE_INFO_BW_160;
1240
- break;
1241
- }
1242
-
1243
- if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
1244
- rate_n_flags & RATE_MCS_HE_106T_MSK) {
1245
- rx_status->bw = RATE_INFO_BW_HE_RU;
1246
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1247
- }
1248
-
1249
- if (rate_n_flags & RATE_MCS_HE_MSK &&
1250
- phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
1251
- he_type == RATE_MCS_HE_TYPE_MU) {
1252
- /*
1253
- * Unfortunately, we have to leave the mac80211 data
1254
- * incorrect for the case that we receive an HE-MU
1255
- * transmission and *don't* have the he_mu pointer,
1256
- * i.e. we don't have the phy data (due to the bits
1257
- * being used for TSF). This shouldn't happen though
1258
- * as management frames where we need the TSF/timers
1259
- * are not be transmitted in HE-MU, I think.
1260
- */
1261
- u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
1262
- u8 offs = 0;
1263
-
1264
- rx_status->bw = RATE_INFO_BW_HE_RU;
1265
-
1266
- switch (ru) {
1267
- case 0 ... 36:
1268
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
1269
- offs = ru;
1270
- break;
1271
- case 37 ... 52:
1272
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
1273
- offs = ru - 37;
1274
- break;
1275
- case 53 ... 60:
1276
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1277
- offs = ru - 53;
1278
- break;
1279
- case 61 ... 64:
1280
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
1281
- offs = ru - 61;
1282
- break;
1283
- case 65 ... 66:
1284
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
1285
- offs = ru - 65;
1286
- break;
1287
- case 67:
1288
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
1289
- break;
1290
- case 68:
1291
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
1292
- break;
1293
- }
1294
- he->data2 |=
1295
- le16_encode_bits(offs,
1296
- IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
1297
- he->data2 |=
1298
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
1299
- if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
1300
- he->data2 |=
1301
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
1302
- } else if (he) {
1303
- he->data1 |=
1304
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
13051897 }
13061898
13071899 if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
....@@ -1328,120 +1920,7 @@
13281920 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
13291921 if (rate_n_flags & RATE_MCS_BF_MSK)
13301922 rx_status->enc_flags |= RX_ENC_FLAG_BF;
1331
- } else if (he) {
1332
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
1333
- RATE_MCS_STBC_POS;
1334
- rx_status->nss =
1335
- ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1336
- RATE_VHT_MCS_NSS_POS) + 1;
1337
- rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
1338
- rx_status->encoding = RX_ENC_HE;
1339
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1340
- if (rate_n_flags & RATE_MCS_BF_MSK)
1341
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
1342
-
1343
- rx_status->he_dcm =
1344
- !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
1345
-
1346
-#define CHECK_TYPE(F) \
1347
- BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
1348
- (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
1349
-
1350
- CHECK_TYPE(SU);
1351
- CHECK_TYPE(EXT_SU);
1352
- CHECK_TYPE(MU);
1353
- CHECK_TYPE(TRIG);
1354
-
1355
- he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
1356
-
1357
- if (rate_n_flags & RATE_MCS_BF_POS)
1358
- he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
1359
-
1360
- switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
1361
- RATE_MCS_HE_GI_LTF_POS) {
1362
- case 0:
1363
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1364
- break;
1365
- case 1:
1366
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1367
- break;
1368
- case 2:
1369
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
1370
- break;
1371
- case 3:
1372
- if (rate_n_flags & RATE_MCS_SGI_MSK)
1373
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1374
- else
1375
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
1376
- break;
1377
- }
1378
-
1379
- switch (he_type) {
1380
- case RATE_MCS_HE_TYPE_SU: {
1381
- u16 val;
1382
-
1383
- /* LTF syms correspond to streams */
1384
- he->data2 |=
1385
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
1386
- switch (rx_status->nss) {
1387
- case 1:
1388
- val = 0;
1389
- break;
1390
- case 2:
1391
- val = 1;
1392
- break;
1393
- case 3:
1394
- case 4:
1395
- val = 2;
1396
- break;
1397
- case 5:
1398
- case 6:
1399
- val = 3;
1400
- break;
1401
- case 7:
1402
- case 8:
1403
- val = 4;
1404
- break;
1405
- default:
1406
- WARN_ONCE(1, "invalid nss: %d\n",
1407
- rx_status->nss);
1408
- val = 0;
1409
- }
1410
- he->data5 |=
1411
- le16_encode_bits(val,
1412
- IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
1413
- }
1414
- break;
1415
- case RATE_MCS_HE_TYPE_MU: {
1416
- u16 val;
1417
- u64 he_phy_data;
1418
-
1419
- if (mvm->trans->cfg->device_family >=
1420
- IWL_DEVICE_FAMILY_22560)
1421
- he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1422
- else
1423
- he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
1424
-
1425
- if (he_phy_data == HE_PHY_DATA_INVAL)
1426
- break;
1427
-
1428
- val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
1429
- he_phy_data);
1430
-
1431
- he->data2 |=
1432
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
1433
- he->data5 |=
1434
- cpu_to_le16(FIELD_PREP(
1435
- IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
1436
- val));
1437
- }
1438
- break;
1439
- case RATE_MCS_HE_TYPE_EXT_SU:
1440
- case RATE_MCS_HE_TYPE_TRIG:
1441
- /* not supported yet */
1442
- break;
1443
- }
1444
- } else {
1923
+ } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) {
14451924 int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
14461925 rx_status->band);
14471926
....@@ -1452,7 +1931,6 @@
14521931 goto out;
14531932 }
14541933 rx_status->rate_idx = rate;
1455
-
14561934 }
14571935
14581936 /* management stuff on default queue */
....@@ -1465,7 +1943,7 @@
14651943
14661944 if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
14671945 ieee80211_is_probe_resp(hdr->frame_control)))
1468
- rx_status->boottime_ns = ktime_get_boot_ns();
1946
+ rx_status->boottime_ns = ktime_get_boottime_ns();
14691947 }
14701948
14711949 if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
....@@ -1474,7 +1952,150 @@
14741952 }
14751953
14761954 if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
1477
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
1955
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
1956
+ sta, csi);
1957
+out:
1958
+ rcu_read_unlock();
1959
+}
1960
+
1961
+void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
1962
+ struct iwl_rx_cmd_buffer *rxb, int queue)
1963
+{
1964
+ struct ieee80211_rx_status *rx_status;
1965
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
1966
+ struct iwl_rx_no_data *desc = (void *)pkt->data;
1967
+ u32 rate_n_flags = le32_to_cpu(desc->rate);
1968
+ u32 gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
1969
+ u32 rssi = le32_to_cpu(desc->rssi);
1970
+ u32 info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK;
1971
+ u16 phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
1972
+ struct ieee80211_sta *sta = NULL;
1973
+ struct sk_buff *skb;
1974
+ u8 channel, energy_a, energy_b;
1975
+ struct iwl_mvm_rx_phy_data phy_data = {
1976
+ .d0 = desc->phy_info[0],
1977
+ .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
1978
+ };
1979
+
1980
+ if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
1981
+ return;
1982
+
1983
+ energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS;
1984
+ energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
1985
+ channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
1986
+
1987
+ phy_data.info_type =
1988
+ le32_get_bits(desc->phy_info[1],
1989
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
1990
+
1991
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
1992
+ * ieee80211_hdr pulled.
1993
+ */
1994
+ skb = alloc_skb(128, GFP_ATOMIC);
1995
+ if (!skb) {
1996
+ IWL_ERR(mvm, "alloc_skb failed\n");
1997
+ return;
1998
+ }
1999
+
2000
+ rx_status = IEEE80211_SKB_RXCB(skb);
2001
+
2002
+ /* 0-length PSDU */
2003
+ rx_status->flag |= RX_FLAG_NO_PSDU;
2004
+
2005
+ switch (info_type) {
2006
+ case RX_NO_DATA_INFO_TYPE_NDP:
2007
+ rx_status->zero_length_psdu_type =
2008
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
2009
+ break;
2010
+ case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
2011
+ case RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED:
2012
+ rx_status->zero_length_psdu_type =
2013
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
2014
+ break;
2015
+ default:
2016
+ rx_status->zero_length_psdu_type =
2017
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
2018
+ break;
2019
+ }
2020
+
2021
+ /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
2022
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
2023
+ case RATE_MCS_CHAN_WIDTH_20:
2024
+ break;
2025
+ case RATE_MCS_CHAN_WIDTH_40:
2026
+ rx_status->bw = RATE_INFO_BW_40;
2027
+ break;
2028
+ case RATE_MCS_CHAN_WIDTH_80:
2029
+ rx_status->bw = RATE_INFO_BW_80;
2030
+ break;
2031
+ case RATE_MCS_CHAN_WIDTH_160:
2032
+ rx_status->bw = RATE_INFO_BW_160;
2033
+ break;
2034
+ }
2035
+
2036
+ if (rate_n_flags & RATE_MCS_HE_MSK)
2037
+ iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
2038
+ phy_info, queue);
2039
+
2040
+ iwl_mvm_decode_lsig(skb, &phy_data);
2041
+
2042
+ rx_status->device_timestamp = gp2_on_air_rise;
2043
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
2044
+ NL80211_BAND_2GHZ;
2045
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
2046
+ rx_status->band);
2047
+ iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
2048
+ energy_b);
2049
+
2050
+ rcu_read_lock();
2051
+
2052
+ if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
2053
+ rate_n_flags & RATE_MCS_SGI_MSK)
2054
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2055
+ if (rate_n_flags & RATE_HT_MCS_GF_MSK)
2056
+ rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
2057
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
2058
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2059
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
2060
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
2061
+ RATE_MCS_STBC_POS;
2062
+ rx_status->encoding = RX_ENC_HT;
2063
+ rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
2064
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
2065
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
2066
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
2067
+ RATE_MCS_STBC_POS;
2068
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
2069
+ rx_status->encoding = RX_ENC_VHT;
2070
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
2071
+ if (rate_n_flags & RATE_MCS_BF_MSK)
2072
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
2073
+ /*
2074
+ * take the nss from the rx_vec since the rate_n_flags has
2075
+ * only 2 bits for the nss which gives a max of 4 ss but
2076
+ * there may be up to 8 spatial streams
2077
+ */
2078
+ rx_status->nss =
2079
+ le32_get_bits(desc->rx_vec[0],
2080
+ RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
2081
+ } else if (rate_n_flags & RATE_MCS_HE_MSK) {
2082
+ rx_status->nss =
2083
+ le32_get_bits(desc->rx_vec[0],
2084
+ RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
2085
+ } else {
2086
+ int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
2087
+ rx_status->band);
2088
+
2089
+ if (WARN(rate < 0 || rate > 0xFF,
2090
+ "Invalid rate flags 0x%x, band %d,\n",
2091
+ rate_n_flags, rx_status->band)) {
2092
+ kfree_skb(skb);
2093
+ goto out;
2094
+ }
2095
+ rx_status->rate_idx = rate;
2096
+ }
2097
+
2098
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
14782099 out:
14792100 rcu_read_unlock();
14802101 }
....@@ -1484,35 +2105,47 @@
14842105 {
14852106 struct iwl_rx_packet *pkt = rxb_addr(rxb);
14862107 struct iwl_frame_release *release = (void *)pkt->data;
1487
- struct ieee80211_sta *sta;
1488
- struct iwl_mvm_reorder_buffer *reorder_buf;
1489
- struct iwl_mvm_baid_data *ba_data;
14902108
1491
- int baid = release->baid;
2109
+ iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
2110
+ le16_to_cpu(release->nssn),
2111
+ queue, 0);
2112
+}
14922113
1493
- IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
1494
- release->baid, le16_to_cpu(release->nssn));
2114
+void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
2115
+ struct iwl_rx_cmd_buffer *rxb, int queue)
2116
+{
2117
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
2118
+ struct iwl_bar_frame_release *release = (void *)pkt->data;
2119
+ unsigned int baid = le32_get_bits(release->ba_info,
2120
+ IWL_BAR_FRAME_RELEASE_BAID_MASK);
2121
+ unsigned int nssn = le32_get_bits(release->ba_info,
2122
+ IWL_BAR_FRAME_RELEASE_NSSN_MASK);
2123
+ unsigned int sta_id = le32_get_bits(release->sta_tid,
2124
+ IWL_BAR_FRAME_RELEASE_STA_MASK);
2125
+ unsigned int tid = le32_get_bits(release->sta_tid,
2126
+ IWL_BAR_FRAME_RELEASE_TID_MASK);
2127
+ struct iwl_mvm_baid_data *baid_data;
14952128
1496
- if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2129
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
2130
+ baid >= ARRAY_SIZE(mvm->baid_map)))
14972131 return;
14982132
14992133 rcu_read_lock();
2134
+ baid_data = rcu_dereference(mvm->baid_map[baid]);
2135
+ if (!baid_data) {
2136
+ IWL_DEBUG_RX(mvm,
2137
+ "Got valid BAID %d but not allocated, invalid BAR release!\n",
2138
+ baid);
2139
+ goto out;
2140
+ }
15002141
1501
- ba_data = rcu_dereference(mvm->baid_map[baid]);
1502
- if (WARN_ON_ONCE(!ba_data))
2142
+ if (WARN(tid != baid_data->tid || sta_id != baid_data->sta_id,
2143
+ "baid 0x%x is mapped to sta:%d tid:%d, but BAR release received for sta:%d tid:%d\n",
2144
+ baid, baid_data->sta_id, baid_data->tid, sta_id,
2145
+ tid))
15032146 goto out;
15042147
1505
- sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
1506
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1507
- goto out;
1508
-
1509
- reorder_buf = &ba_data->reorder_buf[queue];
1510
-
1511
- spin_lock_bh(&reorder_buf->lock);
1512
- iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
1513
- le16_to_cpu(release->nssn));
1514
- spin_unlock_bh(&reorder_buf->lock);
1515
-
2148
+ iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue, 0);
15162149 out:
15172150 rcu_read_unlock();
15182151 }