hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/wireless/ath/ath10k/htt_rx.c
....@@ -1,19 +1,8 @@
1
+// SPDX-License-Identifier: ISC
12 /*
23 * Copyright (c) 2005-2011 Atheros Communications Inc.
34 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
45 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5
- *
6
- * Permission to use, copy, modify, and/or distribute this software for any
7
- * purpose with or without fee is hereby granted, provided that the above
8
- * copyright notice and this permission notice appear in all copies.
9
- *
10
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
176 */
187
198 #include "core.h"
....@@ -273,6 +262,9 @@
273262 struct ath10k_htt *htt = &ar->htt;
274263 int ret;
275264
265
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
266
+ return 0;
267
+
276268 spin_lock_bh(&htt->rx_ring.lock);
277269 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
278270 htt->rx_ring.fill_cnt));
....@@ -287,6 +279,9 @@
287279
288280 void ath10k_htt_rx_free(struct ath10k_htt *htt)
289281 {
282
+ if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
283
+ return;
284
+
290285 del_timer_sync(&htt->rx_ring.refill_retry_timer);
291286
292287 skb_queue_purge(&htt->rx_msdus_q);
....@@ -471,6 +466,166 @@
471466 return msdu;
472467 }
473468
469
+static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
470
+ struct sk_buff *frag_list,
471
+ unsigned int frag_len)
472
+{
473
+ skb_shinfo(skb_head)->frag_list = frag_list;
474
+ skb_head->data_len = frag_len;
475
+ skb_head->len += skb_head->data_len;
476
+}
477
+
478
+static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
479
+ struct sk_buff *msdu,
480
+ struct htt_rx_in_ord_msdu_desc **msdu_desc)
481
+{
482
+ struct ath10k *ar = htt->ar;
483
+ u32 paddr;
484
+ struct sk_buff *frag_buf;
485
+ struct sk_buff *prev_frag_buf;
486
+ u8 last_frag;
487
+ struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
488
+ struct htt_rx_desc *rxd;
489
+ int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
490
+
491
+ rxd = (void *)msdu->data;
492
+ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
493
+
494
+ skb_put(msdu, sizeof(struct htt_rx_desc));
495
+ skb_pull(msdu, sizeof(struct htt_rx_desc));
496
+ skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
497
+ amsdu_len -= msdu->len;
498
+
499
+ last_frag = ind_desc->reserved;
500
+ if (last_frag) {
501
+ if (amsdu_len) {
502
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
503
+ __le16_to_cpu(ind_desc->msdu_len),
504
+ amsdu_len);
505
+ }
506
+ return 0;
507
+ }
508
+
509
+ ind_desc++;
510
+ paddr = __le32_to_cpu(ind_desc->msdu_paddr);
511
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
512
+ if (!frag_buf) {
513
+ ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
514
+ return -ENOENT;
515
+ }
516
+
517
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
518
+ ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
519
+
520
+ amsdu_len -= frag_buf->len;
521
+ prev_frag_buf = frag_buf;
522
+ last_frag = ind_desc->reserved;
523
+ while (!last_frag) {
524
+ ind_desc++;
525
+ paddr = __le32_to_cpu(ind_desc->msdu_paddr);
526
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
527
+ if (!frag_buf) {
528
+ ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
529
+ paddr);
530
+ prev_frag_buf->next = NULL;
531
+ return -ENOENT;
532
+ }
533
+
534
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
535
+ last_frag = ind_desc->reserved;
536
+ amsdu_len -= frag_buf->len;
537
+
538
+ prev_frag_buf->next = frag_buf;
539
+ prev_frag_buf = frag_buf;
540
+ }
541
+
542
+ if (amsdu_len) {
543
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
544
+ __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
545
+ }
546
+
547
+ *msdu_desc = ind_desc;
548
+
549
+ prev_frag_buf->next = NULL;
550
+ return 0;
551
+}
552
+
553
+static int
554
+ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
555
+ struct sk_buff *msdu,
556
+ struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
557
+{
558
+ struct ath10k *ar = htt->ar;
559
+ u64 paddr;
560
+ struct sk_buff *frag_buf;
561
+ struct sk_buff *prev_frag_buf;
562
+ u8 last_frag;
563
+ struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
564
+ struct htt_rx_desc *rxd;
565
+ int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
566
+
567
+ rxd = (void *)msdu->data;
568
+ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
569
+
570
+ skb_put(msdu, sizeof(struct htt_rx_desc));
571
+ skb_pull(msdu, sizeof(struct htt_rx_desc));
572
+ skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
573
+ amsdu_len -= msdu->len;
574
+
575
+ last_frag = ind_desc->reserved;
576
+ if (last_frag) {
577
+ if (amsdu_len) {
578
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
579
+ __le16_to_cpu(ind_desc->msdu_len),
580
+ amsdu_len);
581
+ }
582
+ return 0;
583
+ }
584
+
585
+ ind_desc++;
586
+ paddr = __le64_to_cpu(ind_desc->msdu_paddr);
587
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
588
+ if (!frag_buf) {
589
+ ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
590
+ return -ENOENT;
591
+ }
592
+
593
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
594
+ ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
595
+
596
+ amsdu_len -= frag_buf->len;
597
+ prev_frag_buf = frag_buf;
598
+ last_frag = ind_desc->reserved;
599
+ while (!last_frag) {
600
+ ind_desc++;
601
+ paddr = __le64_to_cpu(ind_desc->msdu_paddr);
602
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
603
+ if (!frag_buf) {
604
+ ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
605
+ paddr);
606
+ prev_frag_buf->next = NULL;
607
+ return -ENOENT;
608
+ }
609
+
610
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
611
+ last_frag = ind_desc->reserved;
612
+ amsdu_len -= frag_buf->len;
613
+
614
+ prev_frag_buf->next = frag_buf;
615
+ prev_frag_buf = frag_buf;
616
+ }
617
+
618
+ if (amsdu_len) {
619
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
620
+ __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
621
+ }
622
+
623
+ *msdu_desc = ind_desc;
624
+
625
+ prev_frag_buf->next = NULL;
626
+ return 0;
627
+}
628
+
474629 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
475630 struct htt_rx_in_ord_ind *ev,
476631 struct sk_buff_head *list)
....@@ -479,7 +634,7 @@
479634 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
480635 struct htt_rx_desc *rxd;
481636 struct sk_buff *msdu;
482
- int msdu_count;
637
+ int msdu_count, ret;
483638 bool is_offload;
484639 u32 paddr;
485640
....@@ -495,6 +650,18 @@
495650 if (!msdu) {
496651 __skb_queue_purge(list);
497652 return -ENOENT;
653
+ }
654
+
655
+ if (!is_offload && ar->monitor_arvif) {
656
+ ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
657
+ &msdu_desc);
658
+ if (ret) {
659
+ __skb_queue_purge(list);
660
+ return ret;
661
+ }
662
+ __skb_queue_tail(list, msdu);
663
+ msdu_desc++;
664
+ continue;
498665 }
499666
500667 __skb_queue_tail(list, msdu);
....@@ -529,7 +696,7 @@
529696 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
530697 struct htt_rx_desc *rxd;
531698 struct sk_buff *msdu;
532
- int msdu_count;
699
+ int msdu_count, ret;
533700 bool is_offload;
534701 u64 paddr;
535702
....@@ -544,6 +711,18 @@
544711 if (!msdu) {
545712 __skb_queue_purge(list);
546713 return -ENOENT;
714
+ }
715
+
716
+ if (!is_offload && ar->monitor_arvif) {
717
+ ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
718
+ &msdu_desc);
719
+ if (ret) {
720
+ __skb_queue_purge(list);
721
+ return ret;
722
+ }
723
+ __skb_queue_tail(list, msdu);
724
+ msdu_desc++;
725
+ continue;
547726 }
548727
549728 __skb_queue_tail(list, msdu);
....@@ -577,6 +756,9 @@
577756 void *vaddr, *vaddr_ring;
578757 size_t size;
579758 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
759
+
760
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
761
+ return 0;
580762
581763 htt->rx_confused = false;
582764
....@@ -1117,6 +1299,13 @@
11171299
11181300 status = IEEE80211_SKB_RXCB(skb);
11191301
1302
+ if (!(ar->filter_flags & FIF_FCSFAIL) &&
1303
+ status->flag & RX_FLAG_FAILED_FCS_CRC) {
1304
+ ar->stats.rx_crc_err_drop++;
1305
+ dev_kfree_skb_any(skb);
1306
+ return;
1307
+ }
1308
+
11201309 ath10k_dbg(ar, ATH10K_DBG_DATA,
11211310 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
11221311 skb,
....@@ -1164,7 +1353,8 @@
11641353 struct sk_buff *msdu,
11651354 struct ieee80211_rx_status *status,
11661355 enum htt_rx_mpdu_encrypt_type enctype,
1167
- bool is_decrypted)
1356
+ bool is_decrypted,
1357
+ const u8 first_hdr[64])
11681358 {
11691359 struct ieee80211_hdr *hdr;
11701360 struct htt_rx_desc *rxd;
....@@ -1172,6 +1362,9 @@
11721362 size_t crypto_len;
11731363 bool is_first;
11741364 bool is_last;
1365
+ bool msdu_limit_err;
1366
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
1367
+ u8 *qos;
11751368
11761369 rxd = (void *)msdu->data - sizeof(*rxd);
11771370 is_first = !!(rxd->msdu_end.common.info0 &
....@@ -1189,15 +1382,44 @@
11891382 * [FCS] <-- at end, needs to be trimmed
11901383 */
11911384
1385
+ /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1386
+ * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1387
+ * error packets. If limit exceeds, hw sends all remaining MSDUs as
1388
+ * a single last MSDU with this msdu limit error set.
1389
+ */
1390
+ msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd);
1391
+
1392
+ /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1393
+ * without first MSDU is expected in that case, and handled later here.
1394
+ */
11921395 /* This probably shouldn't happen but warn just in case */
1193
- if (unlikely(WARN_ON_ONCE(!is_first)))
1396
+ if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
11941397 return;
11951398
11961399 /* This probably shouldn't happen but warn just in case */
1197
- if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1400
+ if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
11981401 return;
11991402
12001403 skb_trim(msdu, msdu->len - FCS_LEN);
1404
+
1405
+ /* Push original 80211 header */
1406
+ if (unlikely(msdu_limit_err)) {
1407
+ hdr = (struct ieee80211_hdr *)first_hdr;
1408
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
1409
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1410
+
1411
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
1412
+ qos = ieee80211_get_qos_ctl(hdr);
1413
+ qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1414
+ }
1415
+
1416
+ if (crypto_len)
1417
+ memcpy(skb_push(msdu, crypto_len),
1418
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
1419
+ crypto_len);
1420
+
1421
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1422
+ }
12011423
12021424 /* In most cases this will be true for sniffed frames. It makes sense
12031425 * to deliver them as-is without stripping the crypto param. This is
....@@ -1472,7 +1694,7 @@
14721694 switch (decap) {
14731695 case RX_MSDU_DECAP_RAW:
14741696 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1475
- is_decrypted);
1697
+ is_decrypted, first_hdr);
14761698 break;
14771699 case RX_MSDU_DECAP_NATIVE_WIFI:
14781700 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
....@@ -1524,16 +1746,97 @@
15241746 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
15251747 }
15261748
1749
+static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
1750
+ u16 offset,
1751
+ enum htt_rx_mpdu_encrypt_type enctype)
1752
+{
1753
+ struct ieee80211_hdr *hdr;
1754
+ u64 pn = 0;
1755
+ u8 *ehdr;
1756
+
1757
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
1758
+ ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
1759
+
1760
+ if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
1761
+ pn = ehdr[0];
1762
+ pn |= (u64)ehdr[1] << 8;
1763
+ pn |= (u64)ehdr[4] << 16;
1764
+ pn |= (u64)ehdr[5] << 24;
1765
+ pn |= (u64)ehdr[6] << 32;
1766
+ pn |= (u64)ehdr[7] << 40;
1767
+ }
1768
+ return pn;
1769
+}
1770
+
1771
+static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
1772
+ struct sk_buff *skb,
1773
+ u16 offset)
1774
+{
1775
+ struct ieee80211_hdr *hdr;
1776
+
1777
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
1778
+ return !is_multicast_ether_addr(hdr->addr1);
1779
+}
1780
+
1781
+static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
1782
+ struct sk_buff *skb,
1783
+ u16 peer_id,
1784
+ u16 offset,
1785
+ enum htt_rx_mpdu_encrypt_type enctype)
1786
+{
1787
+ struct ath10k_peer *peer;
1788
+ union htt_rx_pn_t *last_pn, new_pn = {0};
1789
+ struct ieee80211_hdr *hdr;
1790
+ bool more_frags;
1791
+ u8 tid, frag_number;
1792
+ u32 seq;
1793
+
1794
+ peer = ath10k_peer_find_by_id(ar, peer_id);
1795
+ if (!peer) {
1796
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
1797
+ return false;
1798
+ }
1799
+
1800
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
1801
+ if (ieee80211_is_data_qos(hdr->frame_control))
1802
+ tid = ieee80211_get_tid(hdr);
1803
+ else
1804
+ tid = ATH10K_TXRX_NON_QOS_TID;
1805
+
1806
+ last_pn = &peer->frag_tids_last_pn[tid];
1807
+ new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
1808
+ more_frags = ieee80211_has_morefrags(hdr->frame_control);
1809
+ frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1810
+ seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
1811
+
1812
+ if (frag_number == 0) {
1813
+ last_pn->pn48 = new_pn.pn48;
1814
+ peer->frag_tids_seq[tid] = seq;
1815
+ } else {
1816
+ if (seq != peer->frag_tids_seq[tid])
1817
+ return false;
1818
+
1819
+ if (new_pn.pn48 != last_pn->pn48 + 1)
1820
+ return false;
1821
+
1822
+ last_pn->pn48 = new_pn.pn48;
1823
+ }
1824
+
1825
+ return true;
1826
+}
1827
+
15271828 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
15281829 struct sk_buff_head *amsdu,
15291830 struct ieee80211_rx_status *status,
15301831 bool fill_crypt_header,
15311832 u8 *rx_hdr,
1532
- enum ath10k_pkt_rx_err *err)
1833
+ enum ath10k_pkt_rx_err *err,
1834
+ u16 peer_id,
1835
+ bool frag)
15331836 {
15341837 struct sk_buff *first;
15351838 struct sk_buff *last;
1536
- struct sk_buff *msdu;
1839
+ struct sk_buff *msdu, *temp;
15371840 struct htt_rx_desc *rxd;
15381841 struct ieee80211_hdr *hdr;
15391842 enum htt_rx_mpdu_encrypt_type enctype;
....@@ -1546,6 +1849,7 @@
15461849 bool is_decrypted;
15471850 bool is_mgmt;
15481851 u32 attention;
1852
+ bool frag_pn_check = true, multicast_check = true;
15491853
15501854 if (skb_queue_empty(amsdu))
15511855 return;
....@@ -1644,7 +1948,37 @@
16441948 }
16451949
16461950 skb_queue_walk(amsdu, msdu) {
1951
+ if (frag && !fill_crypt_header && is_decrypted &&
1952
+ enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1953
+ frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
1954
+ msdu,
1955
+ peer_id,
1956
+ 0,
1957
+ enctype);
1958
+
1959
+ if (frag)
1960
+ multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
1961
+ msdu,
1962
+ 0);
1963
+
1964
+ if (!frag_pn_check || !multicast_check) {
1965
+ /* Discard the fragment with invalid PN or multicast DA
1966
+ */
1967
+ temp = msdu->prev;
1968
+ __skb_unlink(msdu, amsdu);
1969
+ dev_kfree_skb_any(msdu);
1970
+ msdu = temp;
1971
+ frag_pn_check = true;
1972
+ multicast_check = true;
1973
+ continue;
1974
+ }
1975
+
16471976 ath10k_htt_rx_h_csum_offload(msdu);
1977
+
1978
+ if (frag && !fill_crypt_header &&
1979
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1980
+ status->flag &= ~RX_FLAG_MMIC_STRIPPED;
1981
+
16481982 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
16491983 is_decrypted);
16501984
....@@ -1662,6 +1996,11 @@
16621996
16631997 hdr = (void *)msdu->data;
16641998 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1999
+
2000
+ if (frag && !fill_crypt_header &&
2001
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2002
+ status->flag &= ~RX_FLAG_IV_STRIPPED &
2003
+ ~RX_FLAG_MMIC_STRIPPED;
16652004 }
16662005 }
16672006
....@@ -1693,7 +2032,7 @@
16932032 }
16942033
16952034 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
1696
- unsigned long int *unchain_cnt)
2035
+ unsigned long *unchain_cnt)
16972036 {
16982037 struct sk_buff *skb, *first;
16992038 int space;
....@@ -1742,8 +2081,8 @@
17422081
17432082 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
17442083 struct sk_buff_head *amsdu,
1745
- unsigned long int *drop_cnt,
1746
- unsigned long int *unchain_cnt)
2084
+ unsigned long *drop_cnt,
2085
+ unsigned long *unchain_cnt)
17472086 {
17482087 struct sk_buff *first;
17492088 struct htt_rx_desc *rxd;
....@@ -1846,7 +2185,7 @@
18462185 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
18472186 struct sk_buff_head *amsdu,
18482187 struct ieee80211_rx_status *rx_status,
1849
- unsigned long int *drop_cnt)
2188
+ unsigned long *drop_cnt)
18502189 {
18512190 if (skb_queue_empty(amsdu))
18522191 return;
....@@ -1866,10 +2205,10 @@
18662205 struct ieee80211_rx_status *rx_status = &htt->rx_status;
18672206 struct sk_buff_head amsdu;
18682207 int ret;
1869
- unsigned long int drop_cnt = 0;
1870
- unsigned long int unchain_cnt = 0;
1871
- unsigned long int drop_cnt_filter = 0;
1872
- unsigned long int msdus_to_queue, num_msdus;
2208
+ unsigned long drop_cnt = 0;
2209
+ unsigned long unchain_cnt = 0;
2210
+ unsigned long drop_cnt_filter = 0;
2211
+ unsigned long msdus_to_queue, num_msdus;
18732212 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
18742213 u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
18752214
....@@ -1902,7 +2241,8 @@
19022241 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
19032242
19042243 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
1905
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
2244
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
2245
+ false);
19062246 msdus_to_queue = skb_queue_len(&amsdu);
19072247 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
19082248
....@@ -1913,8 +2253,550 @@
19132253 return 0;
19142254 }
19152255
1916
-static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1917
- struct htt_rx_indication *rx)
2256
+static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
2257
+ union htt_rx_pn_t *pn,
2258
+ int pn_len_bits)
2259
+{
2260
+ switch (pn_len_bits) {
2261
+ case 48:
2262
+ pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
2263
+ ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
2264
+ break;
2265
+ case 24:
2266
+ pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
2267
+ break;
2268
+ }
2269
+}
2270
+
2271
+static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
2272
+ union htt_rx_pn_t *old_pn)
2273
+{
2274
+ return ((new_pn->pn48 & 0xffffffffffffULL) <=
2275
+ (old_pn->pn48 & 0xffffffffffffULL));
2276
+}
2277
+
2278
+static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
2279
+ struct ath10k_peer *peer,
2280
+ struct htt_rx_indication_hl *rx)
2281
+{
2282
+ bool last_pn_valid, pn_invalid = false;
2283
+ enum htt_txrx_sec_cast_type sec_index;
2284
+ enum htt_security_types sec_type;
2285
+ union htt_rx_pn_t new_pn = {0};
2286
+ struct htt_hl_rx_desc *rx_desc;
2287
+ union htt_rx_pn_t *last_pn;
2288
+ u32 rx_desc_info, tid;
2289
+ int num_mpdu_ranges;
2290
+
2291
+ lockdep_assert_held(&ar->data_lock);
2292
+
2293
+ if (!peer)
2294
+ return false;
2295
+
2296
+ if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
2297
+ return false;
2298
+
2299
+ num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2300
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2301
+
2302
+ rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2303
+ rx_desc_info = __le32_to_cpu(rx_desc->info);
2304
+
2305
+ if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
2306
+ return false;
2307
+
2308
+ tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2309
+ last_pn_valid = peer->tids_last_pn_valid[tid];
2310
+ last_pn = &peer->tids_last_pn[tid];
2311
+
2312
+ if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2313
+ sec_index = HTT_TXRX_SEC_MCAST;
2314
+ else
2315
+ sec_index = HTT_TXRX_SEC_UCAST;
2316
+
2317
+ sec_type = peer->rx_pn[sec_index].sec_type;
2318
+ ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2319
+
2320
+ if (sec_type != HTT_SECURITY_AES_CCMP &&
2321
+ sec_type != HTT_SECURITY_TKIP &&
2322
+ sec_type != HTT_SECURITY_TKIP_NOMIC)
2323
+ return false;
2324
+
2325
+ if (last_pn_valid)
2326
+ pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
2327
+ else
2328
+ peer->tids_last_pn_valid[tid] = true;
2329
+
2330
+ if (!pn_invalid)
2331
+ last_pn->pn48 = new_pn.pn48;
2332
+
2333
+ return pn_invalid;
2334
+}
2335
+
2336
+static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
2337
+ struct htt_rx_indication_hl *rx,
2338
+ struct sk_buff *skb,
2339
+ enum htt_rx_pn_check_type check_pn_type,
2340
+ enum htt_rx_tkip_demic_type tkip_mic_type)
2341
+{
2342
+ struct ath10k *ar = htt->ar;
2343
+ struct ath10k_peer *peer;
2344
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
2345
+ struct fw_rx_desc_hl *fw_desc;
2346
+ enum htt_txrx_sec_cast_type sec_index;
2347
+ enum htt_security_types sec_type;
2348
+ union htt_rx_pn_t new_pn = {0};
2349
+ struct htt_hl_rx_desc *rx_desc;
2350
+ struct ieee80211_hdr *hdr;
2351
+ struct ieee80211_rx_status *rx_status;
2352
+ u16 peer_id;
2353
+ u8 rx_desc_len;
2354
+ int num_mpdu_ranges;
2355
+ size_t tot_hdr_len;
2356
+ struct ieee80211_channel *ch;
2357
+ bool pn_invalid, qos, first_msdu;
2358
+ u32 tid, rx_desc_info;
2359
+
2360
+ peer_id = __le16_to_cpu(rx->hdr.peer_id);
2361
+ tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2362
+
2363
+ spin_lock_bh(&ar->data_lock);
2364
+ peer = ath10k_peer_find_by_id(ar, peer_id);
2365
+ spin_unlock_bh(&ar->data_lock);
2366
+ if (!peer && peer_id != HTT_INVALID_PEERID)
2367
+ ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
2368
+
2369
+ if (!peer)
2370
+ return true;
2371
+
2372
+ num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2373
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2374
+ mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
2375
+ fw_desc = &rx->fw_desc;
2376
+ rx_desc_len = fw_desc->len;
2377
+
2378
+ if (fw_desc->u.bits.discard) {
2379
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
2380
+ goto err;
2381
+ }
2382
+
2383
+ /* I have not yet seen any case where num_mpdu_ranges > 1.
2384
+ * qcacld does not seem handle that case either, so we introduce the
2385
+ * same limitiation here as well.
2386
+ */
2387
+ if (num_mpdu_ranges > 1)
2388
+ ath10k_warn(ar,
2389
+ "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2390
+ num_mpdu_ranges);
2391
+
2392
+ if (mpdu_ranges->mpdu_range_status !=
2393
+ HTT_RX_IND_MPDU_STATUS_OK &&
2394
+ mpdu_ranges->mpdu_range_status !=
2395
+ HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
2396
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
2397
+ mpdu_ranges->mpdu_range_status);
2398
+ goto err;
2399
+ }
2400
+
2401
+ rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2402
+ rx_desc_info = __le32_to_cpu(rx_desc->info);
2403
+
2404
+ if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2405
+ sec_index = HTT_TXRX_SEC_MCAST;
2406
+ else
2407
+ sec_index = HTT_TXRX_SEC_UCAST;
2408
+
2409
+ sec_type = peer->rx_pn[sec_index].sec_type;
2410
+ first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
2411
+
2412
+ ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2413
+
2414
+ if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
2415
+ spin_lock_bh(&ar->data_lock);
2416
+ pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
2417
+ spin_unlock_bh(&ar->data_lock);
2418
+
2419
+ if (pn_invalid)
2420
+ goto err;
2421
+ }
2422
+
2423
+ /* Strip off all headers before the MAC header before delivery to
2424
+ * mac80211
2425
+ */
2426
+ tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
2427
+ sizeof(rx->ppdu) + sizeof(rx->prefix) +
2428
+ sizeof(rx->fw_desc) +
2429
+ sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
2430
+
2431
+ skb_pull(skb, tot_hdr_len);
2432
+
2433
+ hdr = (struct ieee80211_hdr *)skb->data;
2434
+ qos = ieee80211_is_data_qos(hdr->frame_control);
2435
+
2436
+ rx_status = IEEE80211_SKB_RXCB(skb);
2437
+ memset(rx_status, 0, sizeof(*rx_status));
2438
+
2439
+ if (rx->ppdu.combined_rssi == 0) {
2440
+ /* SDIO firmware does not provide signal */
2441
+ rx_status->signal = 0;
2442
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2443
+ } else {
2444
+ rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
2445
+ rx->ppdu.combined_rssi;
2446
+ rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
2447
+ }
2448
+
2449
+ spin_lock_bh(&ar->data_lock);
2450
+ ch = ar->scan_channel;
2451
+ if (!ch)
2452
+ ch = ar->rx_channel;
2453
+ if (!ch)
2454
+ ch = ath10k_htt_rx_h_any_channel(ar);
2455
+ if (!ch)
2456
+ ch = ar->tgt_oper_chan;
2457
+ spin_unlock_bh(&ar->data_lock);
2458
+
2459
+ if (ch) {
2460
+ rx_status->band = ch->band;
2461
+ rx_status->freq = ch->center_freq;
2462
+ }
2463
+ if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
2464
+ rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
2465
+ else
2466
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
2467
+
2468
+ /* Not entirely sure about this, but all frames from the chipset has
2469
+ * the protected flag set even though they have already been decrypted.
2470
+ * Unmasking this flag is necessary in order for mac80211 not to drop
2471
+ * the frame.
2472
+ * TODO: Verify this is always the case or find out a way to check
2473
+ * if there has been hw decryption.
2474
+ */
2475
+ if (ieee80211_has_protected(hdr->frame_control)) {
2476
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2477
+ rx_status->flag |= RX_FLAG_DECRYPTED |
2478
+ RX_FLAG_IV_STRIPPED |
2479
+ RX_FLAG_MMIC_STRIPPED;
2480
+
2481
+ if (tid < IEEE80211_NUM_TIDS &&
2482
+ first_msdu &&
2483
+ check_pn_type == HTT_RX_PN_CHECK &&
2484
+ (sec_type == HTT_SECURITY_AES_CCMP ||
2485
+ sec_type == HTT_SECURITY_TKIP ||
2486
+ sec_type == HTT_SECURITY_TKIP_NOMIC)) {
2487
+ u8 offset, *ivp, i;
2488
+ s8 keyidx = 0;
2489
+ __le64 pn48 = cpu_to_le64(new_pn.pn48);
2490
+
2491
+ hdr = (struct ieee80211_hdr *)skb->data;
2492
+ offset = ieee80211_hdrlen(hdr->frame_control);
2493
+ hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2494
+ rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
2495
+
2496
+ memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
2497
+ skb->data, offset);
2498
+ skb_push(skb, IEEE80211_CCMP_HDR_LEN);
2499
+ ivp = skb->data + offset;
2500
+ memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
2501
+ /* Ext IV */
2502
+ ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
2503
+
2504
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
2505
+ if (peer->keys[i] &&
2506
+ peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
2507
+ keyidx = peer->keys[i]->keyidx;
2508
+ }
2509
+
2510
+ /* Key ID */
2511
+ ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
2512
+
2513
+ if (sec_type == HTT_SECURITY_AES_CCMP) {
2514
+ rx_status->flag |= RX_FLAG_MIC_STRIPPED;
2515
+ /* pn 0, pn 1 */
2516
+ memcpy(skb->data + offset, &pn48, 2);
2517
+ /* pn 1, pn 3 , pn 34 , pn 5 */
2518
+ memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2519
+ } else {
2520
+ rx_status->flag |= RX_FLAG_ICV_STRIPPED;
2521
+ /* TSC 0 */
2522
+ memcpy(skb->data + offset + 2, &pn48, 1);
2523
+ /* TSC 1 */
2524
+ memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
2525
+ /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2526
+ memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2527
+ }
2528
+ }
2529
+ }
2530
+
2531
+ if (tkip_mic_type == HTT_RX_TKIP_MIC)
2532
+ rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
2533
+ ~RX_FLAG_MMIC_STRIPPED;
2534
+
2535
+ if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
2536
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
2537
+
2538
+ if (!qos && tid < IEEE80211_NUM_TIDS) {
2539
+ u8 offset;
2540
+ __le16 qos_ctrl = 0;
2541
+
2542
+ hdr = (struct ieee80211_hdr *)skb->data;
2543
+ offset = ieee80211_hdrlen(hdr->frame_control);
2544
+
2545
+ hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2546
+ memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
2547
+ skb_push(skb, IEEE80211_QOS_CTL_LEN);
2548
+ qos_ctrl = cpu_to_le16(tid);
2549
+ memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
2550
+ }
2551
+
2552
+ if (ar->napi.dev)
2553
+ ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
2554
+ else
2555
+ ieee80211_rx_ni(ar->hw, skb);
2556
+
2557
+ /* We have delivered the skb to the upper layers (mac80211) so we
2558
+ * must not free it.
2559
+ */
2560
+ return false;
2561
+err:
2562
+ /* Tell the caller that it must free the skb since we have not
2563
+ * consumed it
2564
+ */
2565
+ return true;
2566
+}
2567
+
2568
+static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
2569
+ u16 head_len,
2570
+ u16 hdr_len)
2571
+{
2572
+ u8 *ivp, *orig_hdr;
2573
+
2574
+ orig_hdr = skb->data;
2575
+ ivp = orig_hdr + hdr_len + head_len;
2576
+
2577
+ /* the ExtIV bit is always set to 1 for TKIP */
2578
+ if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2579
+ return -EINVAL;
2580
+
2581
+ memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2582
+ skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2583
+ skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
2584
+ return 0;
2585
+}
2586
+
2587
+static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
2588
+ u16 head_len,
2589
+ u16 hdr_len)
2590
+{
2591
+ u8 *ivp, *orig_hdr;
2592
+
2593
+ orig_hdr = skb->data;
2594
+ ivp = orig_hdr + hdr_len + head_len;
2595
+
2596
+ /* the ExtIV bit is always set to 1 for TKIP */
2597
+ if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2598
+ return -EINVAL;
2599
+
2600
+ memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2601
+ skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2602
+ skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
2603
+ return 0;
2604
+}
2605
+
2606
+static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
2607
+ u16 head_len,
2608
+ u16 hdr_len)
2609
+{
2610
+ u8 *ivp, *orig_hdr;
2611
+
2612
+ orig_hdr = skb->data;
2613
+ ivp = orig_hdr + hdr_len + head_len;
2614
+
2615
+ /* the ExtIV bit is always set to 1 for CCMP */
2616
+ if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2617
+ return -EINVAL;
2618
+
2619
+ skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
2620
+ memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
2621
+ skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
2622
+ return 0;
2623
+}
2624
+
2625
+static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
2626
+ u16 head_len,
2627
+ u16 hdr_len)
2628
+{
2629
+ u8 *orig_hdr;
2630
+
2631
+ orig_hdr = skb->data;
2632
+
2633
+ memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
2634
+ orig_hdr, head_len + hdr_len);
2635
+ skb_pull(skb, IEEE80211_WEP_IV_LEN);
2636
+ skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
2637
+ return 0;
2638
+}
2639
+
2640
+static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
2641
+ struct htt_rx_fragment_indication *rx,
2642
+ struct sk_buff *skb)
2643
+{
2644
+ struct ath10k *ar = htt->ar;
2645
+ enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
2646
+ enum htt_txrx_sec_cast_type sec_index;
2647
+ struct htt_rx_indication_hl *rx_hl;
2648
+ enum htt_security_types sec_type;
2649
+ u32 tid, frag, seq, rx_desc_info;
2650
+ union htt_rx_pn_t new_pn = {0};
2651
+ struct htt_hl_rx_desc *rx_desc;
2652
+ u16 peer_id, sc, hdr_space;
2653
+ union htt_rx_pn_t *last_pn;
2654
+ struct ieee80211_hdr *hdr;
2655
+ int ret, num_mpdu_ranges;
2656
+ struct ath10k_peer *peer;
2657
+ struct htt_resp *resp;
2658
+ size_t tot_hdr_len;
2659
+
2660
+ resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2661
+ skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2662
+ skb_trim(skb, skb->len - FCS_LEN);
2663
+
2664
+ peer_id = __le16_to_cpu(rx->peer_id);
2665
+ rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
2666
+
2667
+ spin_lock_bh(&ar->data_lock);
2668
+ peer = ath10k_peer_find_by_id(ar, peer_id);
2669
+ if (!peer) {
2670
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
2671
+ goto err;
2672
+ }
2673
+
2674
+ num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
2675
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2676
+
2677
+ tot_hdr_len = sizeof(struct htt_resp_hdr) +
2678
+ sizeof(rx_hl->hdr) +
2679
+ sizeof(rx_hl->ppdu) +
2680
+ sizeof(rx_hl->prefix) +
2681
+ sizeof(rx_hl->fw_desc) +
2682
+ sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
2683
+
2684
+ tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2685
+ rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
2686
+ rx_desc_info = __le32_to_cpu(rx_desc->info);
2687
+
2688
+ hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
2689
+
2690
+ if (is_multicast_ether_addr(hdr->addr1)) {
2691
+ /* Discard the fragment with multicast DA */
2692
+ goto err;
2693
+ }
2694
+
2695
+ if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
2696
+ spin_unlock_bh(&ar->data_lock);
2697
+ return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2698
+ HTT_RX_NON_PN_CHECK,
2699
+ HTT_RX_NON_TKIP_MIC);
2700
+ }
2701
+
2702
+ if (ieee80211_has_retry(hdr->frame_control))
2703
+ goto err;
2704
+
2705
+ hdr_space = ieee80211_hdrlen(hdr->frame_control);
2706
+ sc = __le16_to_cpu(hdr->seq_ctrl);
2707
+ seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2708
+ frag = sc & IEEE80211_SCTL_FRAG;
2709
+
2710
+ sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
2711
+ HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
2712
+ sec_type = peer->rx_pn[sec_index].sec_type;
2713
+ ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2714
+
2715
+ switch (sec_type) {
2716
+ case HTT_SECURITY_TKIP:
2717
+ tkip_mic = HTT_RX_TKIP_MIC;
2718
+ ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
2719
+ tot_hdr_len +
2720
+ rx_hl->fw_desc.len,
2721
+ hdr_space);
2722
+ if (ret)
2723
+ goto err;
2724
+ break;
2725
+ case HTT_SECURITY_TKIP_NOMIC:
2726
+ ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
2727
+ tot_hdr_len +
2728
+ rx_hl->fw_desc.len,
2729
+ hdr_space);
2730
+ if (ret)
2731
+ goto err;
2732
+ break;
2733
+ case HTT_SECURITY_AES_CCMP:
2734
+ ret = ath10k_htt_rx_frag_ccmp_decap(skb,
2735
+ tot_hdr_len + rx_hl->fw_desc.len,
2736
+ hdr_space);
2737
+ if (ret)
2738
+ goto err;
2739
+ break;
2740
+ case HTT_SECURITY_WEP128:
2741
+ case HTT_SECURITY_WEP104:
2742
+ case HTT_SECURITY_WEP40:
2743
+ ret = ath10k_htt_rx_frag_wep_decap(skb,
2744
+ tot_hdr_len + rx_hl->fw_desc.len,
2745
+ hdr_space);
2746
+ if (ret)
2747
+ goto err;
2748
+ break;
2749
+ default:
2750
+ break;
2751
+ }
2752
+
2753
+ resp = (struct htt_resp *)(skb->data);
2754
+
2755
+ if (sec_type != HTT_SECURITY_AES_CCMP &&
2756
+ sec_type != HTT_SECURITY_TKIP &&
2757
+ sec_type != HTT_SECURITY_TKIP_NOMIC) {
2758
+ spin_unlock_bh(&ar->data_lock);
2759
+ return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2760
+ HTT_RX_NON_PN_CHECK,
2761
+ HTT_RX_NON_TKIP_MIC);
2762
+ }
2763
+
2764
+ last_pn = &peer->frag_tids_last_pn[tid];
2765
+
2766
+ if (frag == 0) {
2767
+ if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
2768
+ goto err;
2769
+
2770
+ last_pn->pn48 = new_pn.pn48;
2771
+ peer->frag_tids_seq[tid] = seq;
2772
+ } else if (sec_type == HTT_SECURITY_AES_CCMP) {
2773
+ if (seq != peer->frag_tids_seq[tid])
2774
+ goto err;
2775
+
2776
+ if (new_pn.pn48 != last_pn->pn48 + 1)
2777
+ goto err;
2778
+
2779
+ last_pn->pn48 = new_pn.pn48;
2780
+ last_pn = &peer->tids_last_pn[tid];
2781
+ last_pn->pn48 = new_pn.pn48;
2782
+ }
2783
+
2784
+ spin_unlock_bh(&ar->data_lock);
2785
+
2786
+ return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2787
+ HTT_RX_NON_PN_CHECK, tkip_mic);
2788
+
2789
+err:
2790
+ spin_unlock_bh(&ar->data_lock);
2791
+
2792
+ /* Tell the caller that it must free the skb since we have not
2793
+ * consumed it
2794
+ */
2795
+ return true;
2796
+}
2797
+
2798
+static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
2799
+ struct htt_rx_indication *rx)
19182800 {
19192801 struct ath10k *ar = htt->ar;
19202802 struct htt_rx_indication_mpdu_range *mpdu_ranges;
....@@ -1931,9 +2813,7 @@
19312813 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
19322814
19332815 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1934
- rx, sizeof(*rx) +
1935
- (sizeof(struct htt_rx_indication_mpdu_range) *
1936
- num_mpdu_ranges));
2816
+ rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
19372817
19382818 for (i = 0; i < num_mpdu_ranges; i++)
19392819 mpdu_count += mpdu_ranges[i].mpdu_count;
....@@ -1951,8 +2831,14 @@
19512831 struct htt_resp *resp = (struct htt_resp *)skb->data;
19522832 struct htt_tx_done tx_done = {};
19532833 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1954
- __le16 msdu_id;
1955
- int i;
2834
+ __le16 msdu_id, *msdus;
2835
+ bool rssi_enabled = false;
2836
+ u8 msdu_count = 0, num_airtime_records, tid;
2837
+ int i, htt_pad = 0;
2838
+ struct htt_data_tx_compl_ppdu_dur *ppdu_info;
2839
+ struct ath10k_peer *peer;
2840
+ u16 ppdu_info_offset = 0, peer_id;
2841
+ u32 tx_duration;
19562842
19572843 switch (status) {
19582844 case HTT_DATA_TX_STATUS_NO_ACK:
....@@ -1975,9 +2861,31 @@
19752861 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
19762862 resp->data_tx_completion.num_msdus);
19772863
1978
- for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1979
- msdu_id = resp->data_tx_completion.msdus[i];
2864
+ msdu_count = resp->data_tx_completion.num_msdus;
2865
+ msdus = resp->data_tx_completion.msdus;
2866
+ rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
2867
+
2868
+ if (rssi_enabled)
2869
+ htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
2870
+ resp);
2871
+
2872
+ for (i = 0; i < msdu_count; i++) {
2873
+ msdu_id = msdus[i];
19802874 tx_done.msdu_id = __le16_to_cpu(msdu_id);
2875
+
2876
+ if (rssi_enabled) {
2877
+ /* Total no of MSDUs should be even,
2878
+ * if odd MSDUs are sent firmware fills
2879
+ * last msdu id with 0xffff
2880
+ */
2881
+ if (msdu_count & 0x01) {
2882
+ msdu_id = msdus[msdu_count + i + 1 + htt_pad];
2883
+ tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2884
+ } else {
2885
+ msdu_id = msdus[msdu_count + i + htt_pad];
2886
+ tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2887
+ }
2888
+ }
19812889
19822890 /* kfifo_put: In practice firmware shouldn't fire off per-CE
19832891 * interrupt and main interrupt (MSI/-X range case) for the same
....@@ -1987,11 +2895,58 @@
19872895 * Note that with only one concurrent reader and one concurrent
19882896 * writer, you don't need extra locking to use these macro.
19892897 */
1990
- if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
2898
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
2899
+ ath10k_txrx_tx_unref(htt, &tx_done);
2900
+ } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
19912901 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
19922902 tx_done.msdu_id, tx_done.status);
19932903 ath10k_txrx_tx_unref(htt, &tx_done);
19942904 }
2905
+ }
2906
+
2907
+ if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
2908
+ return;
2909
+
2910
+ ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
2911
+
2912
+ if (rssi_enabled)
2913
+ ppdu_info_offset += ppdu_info_offset;
2914
+
2915
+ if (resp->data_tx_completion.flags2 &
2916
+ (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
2917
+ ppdu_info_offset += 2;
2918
+
2919
+ ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
2920
+ num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
2921
+ __le32_to_cpu(ppdu_info->info0));
2922
+
2923
+ for (i = 0; i < num_airtime_records; i++) {
2924
+ struct htt_data_tx_ppdu_dur *ppdu_dur;
2925
+ u32 info0;
2926
+
2927
+ ppdu_dur = &ppdu_info->ppdu_dur[i];
2928
+ info0 = __le32_to_cpu(ppdu_dur->info0);
2929
+
2930
+ peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
2931
+ info0);
2932
+ rcu_read_lock();
2933
+ spin_lock_bh(&ar->data_lock);
2934
+
2935
+ peer = ath10k_peer_find_by_id(ar, peer_id);
2936
+ if (!peer || !peer->sta) {
2937
+ spin_unlock_bh(&ar->data_lock);
2938
+ rcu_read_unlock();
2939
+ continue;
2940
+ }
2941
+
2942
+ tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
2943
+ IEEE80211_QOS_CTL_TID_MASK;
2944
+ tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
2945
+
2946
+ ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
2947
+
2948
+ spin_unlock_bh(&ar->data_lock);
2949
+ rcu_read_unlock();
19952950 }
19962951 }
19972952
....@@ -2253,11 +3208,11 @@
22533208 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
22543209 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
22553210 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
2256
- NULL);
3211
+ NULL, peer_id, frag);
22573212 ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
22583213 break;
22593214 case -EAGAIN:
2260
- /* fall through */
3215
+ fallthrough;
22613216 default:
22623217 /* Should not happen. */
22633218 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
....@@ -2307,6 +3262,7 @@
23073262 u8 tid;
23083263 int ret;
23093264 int i;
3265
+ bool may_tx;
23103266
23113267 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
23123268
....@@ -2379,8 +3335,13 @@
23793335 num_msdus = 0;
23803336 num_bytes = 0;
23813337
3338
+ ieee80211_txq_schedule_start(hw, txq->ac);
3339
+ may_tx = ieee80211_txq_may_transmit(hw, txq);
23823340 while (num_msdus < max_num_msdus &&
23833341 num_bytes < max_num_bytes) {
3342
+ if (!may_tx)
3343
+ break;
3344
+
23843345 ret = ath10k_mac_tx_push_txq(hw, txq);
23853346 if (ret < 0)
23863347 break;
....@@ -2388,6 +3349,8 @@
23883349 num_msdus++;
23893350 num_bytes += ret;
23903351 }
3352
+ ieee80211_return_txq(hw, txq, false);
3353
+ ieee80211_txq_schedule_end(hw, txq->ac);
23913354
23923355 record->num_msdus = cpu_to_le16(num_msdus);
23933356 record->num_bytes = cpu_to_le32(num_bytes);
....@@ -2555,7 +3518,7 @@
25553518 dev_kfree_skb_any(skb);
25563519 }
25573520
2558
-static inline bool is_valid_legacy_rate(u8 rate)
3521
+static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
25593522 {
25603523 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
25613524 18, 24, 36, 48, 54};
....@@ -2563,10 +3526,133 @@
25633526
25643527 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
25653528 if (rate == legacy_rates[i])
2566
- return true;
3529
+ return i;
25673530 }
25683531
2569
- return false;
3532
+ ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
3533
+ return -EINVAL;
3534
+}
3535
+
3536
+static void
3537
+ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
3538
+ struct ath10k_sta *arsta,
3539
+ struct ath10k_per_peer_tx_stats *pstats,
3540
+ s8 legacy_rate_idx)
3541
+{
3542
+ struct rate_info *txrate = &arsta->txrate;
3543
+ struct ath10k_htt_tx_stats *tx_stats;
3544
+ int idx, ht_idx, gi, mcs, bw, nss;
3545
+ unsigned long flags;
3546
+
3547
+ if (!arsta->tx_stats)
3548
+ return;
3549
+
3550
+ tx_stats = arsta->tx_stats;
3551
+ flags = txrate->flags;
3552
+ gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
3553
+ mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
3554
+ bw = txrate->bw;
3555
+ nss = txrate->nss;
3556
+ ht_idx = mcs + (nss - 1) * 8;
3557
+ idx = mcs * 8 + 8 * 10 * (nss - 1);
3558
+ idx += bw * 2 + gi;
3559
+
3560
+#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3561
+
3562
+ if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
3563
+ STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
3564
+ STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
3565
+ STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
3566
+ STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
3567
+ STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
3568
+ STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
3569
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3570
+ STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
3571
+ STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
3572
+ STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
3573
+ STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
3574
+ STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
3575
+ STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
3576
+ } else {
3577
+ mcs = legacy_rate_idx;
3578
+
3579
+ STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
3580
+ STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
3581
+ STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
3582
+ STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
3583
+ STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
3584
+ STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
3585
+ }
3586
+
3587
+ if (ATH10K_HW_AMPDU(pstats->flags)) {
3588
+ tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
3589
+
3590
+ if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3591
+ STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
3592
+ pstats->succ_bytes + pstats->retry_bytes;
3593
+ STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
3594
+ pstats->succ_pkts + pstats->retry_pkts;
3595
+ } else {
3596
+ STATS_OP_FMT(AMPDU).vht[0][mcs] +=
3597
+ pstats->succ_bytes + pstats->retry_bytes;
3598
+ STATS_OP_FMT(AMPDU).vht[1][mcs] +=
3599
+ pstats->succ_pkts + pstats->retry_pkts;
3600
+ }
3601
+ STATS_OP_FMT(AMPDU).bw[0][bw] +=
3602
+ pstats->succ_bytes + pstats->retry_bytes;
3603
+ STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
3604
+ pstats->succ_bytes + pstats->retry_bytes;
3605
+ STATS_OP_FMT(AMPDU).gi[0][gi] +=
3606
+ pstats->succ_bytes + pstats->retry_bytes;
3607
+ STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
3608
+ pstats->succ_bytes + pstats->retry_bytes;
3609
+ STATS_OP_FMT(AMPDU).bw[1][bw] +=
3610
+ pstats->succ_pkts + pstats->retry_pkts;
3611
+ STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
3612
+ pstats->succ_pkts + pstats->retry_pkts;
3613
+ STATS_OP_FMT(AMPDU).gi[1][gi] +=
3614
+ pstats->succ_pkts + pstats->retry_pkts;
3615
+ STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
3616
+ pstats->succ_pkts + pstats->retry_pkts;
3617
+ } else {
3618
+ tx_stats->ack_fails +=
3619
+ ATH10K_HW_BA_FAIL(pstats->flags);
3620
+ }
3621
+
3622
+ STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
3623
+ STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
3624
+ STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
3625
+
3626
+ STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
3627
+ STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
3628
+ STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
3629
+
3630
+ STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
3631
+ STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
3632
+ STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
3633
+
3634
+ STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
3635
+ STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
3636
+ STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
3637
+
3638
+ STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
3639
+ STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
3640
+ STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
3641
+
3642
+ STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
3643
+ STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
3644
+ STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
3645
+
3646
+ if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
3647
+ STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
3648
+ STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
3649
+ STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
3650
+ STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
3651
+ STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
3652
+ STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
3653
+ }
3654
+
3655
+ tx_stats->tx_duration += pstats->duration;
25703656 }
25713657
25723658 static void
....@@ -2575,7 +3661,10 @@
25753661 struct ath10k_per_peer_tx_stats *peer_stats)
25763662 {
25773663 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3664
+ struct ieee80211_chanctx_conf *conf = NULL;
25783665 u8 rate = 0, sgi;
3666
+ s8 rate_idx = 0;
3667
+ bool skip_auto_rate;
25793668 struct rate_info txrate;
25803669
25813670 lockdep_assert_held(&ar->data_lock);
....@@ -2585,6 +3674,13 @@
25853674 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
25863675 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
25873676 sgi = ATH10K_HW_GI(peer_stats->flags);
3677
+ skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
3678
+
3679
+ /* Firmware's rate control skips broadcast/management frames,
3680
+ * if host has configure fixed rates and in some other special cases.
3681
+ */
3682
+ if (skip_auto_rate)
3683
+ return;
25883684
25893685 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
25903686 ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
....@@ -2599,21 +3695,16 @@
25993695 }
26003696
26013697 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
2602
-
3698
+ memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
26033699 if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
26043700 txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
26053701 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
2606
-
2607
- if (!is_valid_legacy_rate(rate)) {
2608
- ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
2609
- rate);
2610
- return;
2611
- }
2612
-
26133702 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
2614
- rate *= 10;
2615
- if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
2616
- rate = rate - 5;
3703
+ if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
3704
+ rate = 5;
3705
+ rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
3706
+ if (rate_idx < 0)
3707
+ return;
26173708 arsta->txrate.legacy = rate;
26183709 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
26193710 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
....@@ -2623,11 +3714,73 @@
26233714 arsta->txrate.mcs = txrate.mcs;
26243715 }
26253716
2626
- if (sgi)
2627
- arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3717
+ switch (txrate.flags) {
3718
+ case WMI_RATE_PREAMBLE_OFDM:
3719
+ if (arsta->arvif && arsta->arvif->vif)
3720
+ conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
3721
+ if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
3722
+ arsta->tx_info.status.rates[0].idx = rate_idx - 4;
3723
+ break;
3724
+ case WMI_RATE_PREAMBLE_CCK:
3725
+ arsta->tx_info.status.rates[0].idx = rate_idx;
3726
+ if (sgi)
3727
+ arsta->tx_info.status.rates[0].flags |=
3728
+ (IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
3729
+ IEEE80211_TX_RC_SHORT_GI);
3730
+ break;
3731
+ case WMI_RATE_PREAMBLE_HT:
3732
+ arsta->tx_info.status.rates[0].idx =
3733
+ txrate.mcs + ((txrate.nss - 1) * 8);
3734
+ if (sgi)
3735
+ arsta->tx_info.status.rates[0].flags |=
3736
+ IEEE80211_TX_RC_SHORT_GI;
3737
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
3738
+ break;
3739
+ case WMI_RATE_PREAMBLE_VHT:
3740
+ ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
3741
+ txrate.mcs, txrate.nss);
3742
+ if (sgi)
3743
+ arsta->tx_info.status.rates[0].flags |=
3744
+ IEEE80211_TX_RC_SHORT_GI;
3745
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
3746
+ break;
3747
+ }
26283748
26293749 arsta->txrate.nss = txrate.nss;
26303750 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
3751
+ arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
3752
+ if (sgi)
3753
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3754
+
3755
+ switch (arsta->txrate.bw) {
3756
+ case RATE_INFO_BW_40:
3757
+ arsta->tx_info.status.rates[0].flags |=
3758
+ IEEE80211_TX_RC_40_MHZ_WIDTH;
3759
+ break;
3760
+ case RATE_INFO_BW_80:
3761
+ arsta->tx_info.status.rates[0].flags |=
3762
+ IEEE80211_TX_RC_80_MHZ_WIDTH;
3763
+ break;
3764
+ }
3765
+
3766
+ if (peer_stats->succ_pkts) {
3767
+ arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
3768
+ arsta->tx_info.status.rates[0].count = 1;
3769
+ ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
3770
+ }
3771
+
3772
+ if (ar->htt.disable_tx_comp) {
3773
+ arsta->tx_failed += peer_stats->failed_pkts;
3774
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
3775
+ arsta->tx_failed);
3776
+ }
3777
+
3778
+ arsta->tx_retries += peer_stats->retry_pkts;
3779
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
3780
+
3781
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar))
3782
+ ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
3783
+ rate_idx);
26313784 }
26323785
26333786 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
....@@ -2676,6 +3829,7 @@
26763829 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
26773830 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
26783831 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
3832
+ p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
26793833
26803834 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
26813835 }
....@@ -2741,6 +3895,51 @@
27413895 rcu_read_unlock();
27423896 }
27433897
3898
+static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
3899
+{
3900
+ switch (sec_type) {
3901
+ case HTT_SECURITY_TKIP:
3902
+ case HTT_SECURITY_TKIP_NOMIC:
3903
+ case HTT_SECURITY_AES_CCMP:
3904
+ return 48;
3905
+ default:
3906
+ return 0;
3907
+ }
3908
+}
3909
+
3910
+static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
3911
+ struct htt_security_indication *ev)
3912
+{
3913
+ enum htt_txrx_sec_cast_type sec_index;
3914
+ enum htt_security_types sec_type;
3915
+ struct ath10k_peer *peer;
3916
+
3917
+ spin_lock_bh(&ar->data_lock);
3918
+
3919
+ peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
3920
+ if (!peer) {
3921
+ ath10k_warn(ar, "failed to find peer id %d for security indication",
3922
+ __le16_to_cpu(ev->peer_id));
3923
+ goto out;
3924
+ }
3925
+
3926
+ sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
3927
+
3928
+ if (ev->flags & HTT_SECURITY_IS_UNICAST)
3929
+ sec_index = HTT_TXRX_SEC_UCAST;
3930
+ else
3931
+ sec_index = HTT_TXRX_SEC_MCAST;
3932
+
3933
+ peer->rx_pn[sec_index].sec_type = sec_type;
3934
+ peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
3935
+
3936
+ memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
3937
+ memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
3938
+
3939
+out:
3940
+ spin_unlock_bh(&ar->data_lock);
3941
+}
3942
+
27443943 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
27453944 {
27463945 struct ath10k_htt *htt = &ar->htt;
....@@ -2769,7 +3968,12 @@
27693968 break;
27703969 }
27713970 case HTT_T2H_MSG_TYPE_RX_IND:
2772
- ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
3971
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
3972
+ ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
3973
+ } else {
3974
+ skb_queue_tail(&htt->rx_indication_head, skb);
3975
+ return false;
3976
+ }
27733977 break;
27743978 case HTT_T2H_MSG_TYPE_PEER_MAP: {
27753979 struct htt_peer_map_event ev = {
....@@ -2789,6 +3993,9 @@
27893993 }
27903994 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
27913995 struct htt_tx_done tx_done = {};
3996
+ struct ath10k_htt *htt = &ar->htt;
3997
+ struct ath10k_htc *htc = &ar->htc;
3998
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
27923999 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
27934000 int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
27944001
....@@ -2814,6 +4021,12 @@
28144021 break;
28154022 }
28164023
4024
+ if (htt->disable_tx_comp) {
4025
+ spin_lock_bh(&htc->tx_lock);
4026
+ ep->tx_credits++;
4027
+ spin_unlock_bh(&htc->tx_lock);
4028
+ }
4029
+
28174030 status = ath10k_txrx_tx_unref(htt, &tx_done);
28184031 if (!status) {
28194032 spin_lock_bh(&htt->tx_lock);
....@@ -2829,6 +4042,7 @@
28294042 struct ath10k *ar = htt->ar;
28304043 struct htt_security_indication *ev = &resp->security_indication;
28314044
4045
+ ath10k_htt_rx_sec_ind_handler(ar, ev);
28324046 ath10k_dbg(ar, ATH10K_DBG_HTT,
28334047 "sec ind peer_id %d unicast %d type %d\n",
28344048 __le16_to_cpu(ev->peer_id),
....@@ -2841,6 +4055,10 @@
28414055 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
28424056 skb->data, skb->len);
28434057 atomic_inc(&htt->num_mpdus_ready);
4058
+
4059
+ return ath10k_htt_rx_proc_rx_frag_ind(htt,
4060
+ &resp->rx_frag_ind,
4061
+ skb);
28444062 break;
28454063 }
28464064 case HTT_T2H_MSG_TYPE_TEST:
....@@ -2883,8 +4101,32 @@
28834101 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
28844102 return false;
28854103 }
2886
- case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
4104
+ case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
4105
+ struct ath10k_htt *htt = &ar->htt;
4106
+ struct ath10k_htc *htc = &ar->htc;
4107
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4108
+ u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
4109
+ int htt_credit_delta;
4110
+
4111
+ htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
4112
+ if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
4113
+ htt_credit_delta = -htt_credit_delta;
4114
+
4115
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
4116
+ "htt credit update delta %d\n",
4117
+ htt_credit_delta);
4118
+
4119
+ if (htt->disable_tx_comp) {
4120
+ spin_lock_bh(&htc->tx_lock);
4121
+ ep->tx_credits += htt_credit_delta;
4122
+ spin_unlock_bh(&htc->tx_lock);
4123
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
4124
+ "htt credit total %d\n",
4125
+ ep->tx_credits);
4126
+ ep->ep_ops.ep_tx_credits(htc->ar);
4127
+ }
28874128 break;
4129
+ }
28884130 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
28894131 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
28904132 u32 freq = __le32_to_cpu(resp->chan_change.freq);
....@@ -2953,6 +4195,37 @@
29534195
29544196 return quota;
29554197 }
4198
+
4199
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
4200
+{
4201
+ struct htt_resp *resp;
4202
+ struct ath10k_htt *htt = &ar->htt;
4203
+ struct sk_buff *skb;
4204
+ bool release;
4205
+ int quota;
4206
+
4207
+ for (quota = 0; quota < budget; quota++) {
4208
+ skb = skb_dequeue(&htt->rx_indication_head);
4209
+ if (!skb)
4210
+ break;
4211
+
4212
+ resp = (struct htt_resp *)skb->data;
4213
+
4214
+ release = ath10k_htt_rx_proc_rx_ind_hl(htt,
4215
+ &resp->rx_ind_hl,
4216
+ skb,
4217
+ HTT_RX_PN_CHECK,
4218
+ HTT_RX_NON_TKIP_MIC);
4219
+
4220
+ if (release)
4221
+ dev_kfree_skb_any(skb);
4222
+
4223
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
4224
+ skb_queue_len(&htt->rx_indication_head));
4225
+ }
4226
+ return quota;
4227
+}
4228
+EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
29564229
29574230 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
29584231 {
....@@ -3053,11 +4326,17 @@
30534326 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
30544327 };
30554328
4329
+static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
4330
+ .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
4331
+};
4332
+
30564333 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
30574334 {
30584335 struct ath10k *ar = htt->ar;
30594336
3060
- if (ar->hw_params.target_64bit)
4337
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
4338
+ htt->rx_ops = &htt_rx_ops_hl;
4339
+ else if (ar->hw_params.target_64bit)
30614340 htt->rx_ops = &htt_rx_ops_64;
30624341 else
30634342 htt->rx_ops = &htt_rx_ops_32;