forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/drivers/net/wireless/ath/wil6210/txrx_edma.c
....@@ -1,17 +1,6 @@
1
+// SPDX-License-Identifier: ISC
12 /*
2
- * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
3
- *
4
- * Permission to use, copy, modify, and/or distribute this software for any
5
- * purpose with or without fee is hereby granted, provided that the above
6
- * copyright notice and this permission notice appear in all copies.
7
- *
8
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
3
+ * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
154 */
165
176 #include <linux/etherdevice.h>
....@@ -26,9 +15,14 @@
2615 #include "txrx.h"
2716 #include "trace.h"
2817
18
+/* Max number of entries (packets to complete) to update the hwtail of tx
19
+ * status ring. Should be power of 2
20
+ */
21
+#define WIL_EDMA_TX_SRING_UPDATE_HW_TAIL 128
2922 #define WIL_EDMA_MAX_DATA_OFFSET (2)
3023 /* RX buffer size must be aligned to 4 bytes */
3124 #define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
25
+#define MAX_INVALID_BUFF_ID_RETRY (3)
3226
3327 static void wil_tx_desc_unmap_edma(struct device *dev,
3428 union wil_tx_desc *desc,
....@@ -99,7 +93,7 @@
9993 /* Status messages are allocated and initialized to 0. This is necessary
10094 * since DR bit should be initialized to 0.
10195 */
102
- sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
96
+ sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
10397 if (!sring->va)
10498 return -ENOMEM;
10599
....@@ -153,14 +147,12 @@
153147 return rc;
154148 }
155149
156
-/**
157
- * Allocate one skb for Rx descriptor RING
158
- */
150
+/* Allocate one skb for Rx descriptor RING */
159151 static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
160152 struct wil_ring *ring, u32 i)
161153 {
162154 struct device *dev = wil_to_dev(wil);
163
- unsigned int sz = ALIGN(wil->rx_buf_len, 4);
155
+ unsigned int sz = wil->rx_buf_len;
164156 dma_addr_t pa;
165157 u16 buff_id;
166158 struct list_head *active = &wil->rx_buff_mgmt.active;
....@@ -216,10 +208,17 @@
216208 }
217209
218210 static inline
219
-void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg)
211
+void wil_get_next_rx_status_msg(struct wil_status_ring *sring, u8 *dr_bit,
212
+ void *msg)
220213 {
221
- memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)),
222
- sring->elem_size);
214
+ struct wil_rx_status_compressed *_msg;
215
+
216
+ _msg = (struct wil_rx_status_compressed *)
217
+ (sring->va + (sring->elem_size * sring->swhead));
218
+ *dr_bit = WIL_GET_BITS(_msg->d0, 31, 31);
219
+ /* make sure dr_bit is read before the rest of status msg */
220
+ rmb();
221
+ memcpy(msg, (void *)_msg, sring->elem_size);
223222 }
224223
225224 static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
....@@ -315,7 +314,8 @@
315314 struct list_head *free = &wil->rx_buff_mgmt.free;
316315 int i;
317316
318
- wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
317
+ wil->rx_buff_mgmt.buff_arr = kcalloc(size + 1,
318
+ sizeof(struct wil_rx_buff),
319319 GFP_KERNEL);
320320 if (!wil->rx_buff_mgmt.buff_arr)
321321 return -ENOMEM;
....@@ -324,14 +324,16 @@
324324 INIT_LIST_HEAD(active);
325325 INIT_LIST_HEAD(free);
326326
327
- /* Linkify the list */
327
+ /* Linkify the list.
328
+ * buffer id 0 should not be used (marks invalid id).
329
+ */
328330 buff_arr = wil->rx_buff_mgmt.buff_arr;
329
- for (i = 0; i < size; i++) {
331
+ for (i = 1; i <= size; i++) {
330332 list_add(&buff_arr[i].list, free);
331333 buff_arr[i].id = i;
332334 }
333335
334
- wil->rx_buff_mgmt.size = size;
336
+ wil->rx_buff_mgmt.size = size + 1;
335337
336338 return 0;
337339 }
....@@ -344,8 +346,8 @@
344346 struct wil_status_ring *sring = &wil->srings[ring_id];
345347 int rc;
346348
347
- wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size,
348
- ring_id);
349
+ wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n",
350
+ status_ring_size, ring_id);
349351
350352 memset(&sring->rx_data, 0, sizeof(sring->rx_data));
351353
....@@ -384,15 +386,15 @@
384386 if (!ring->ctx)
385387 goto err;
386388
387
- ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
389
+ ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
388390 if (!ring->va)
389391 goto err_free_ctx;
390392
391393 if (ring->is_rx) {
392394 sz = sizeof(*ring->edma_rx_swtail.va);
393395 ring->edma_rx_swtail.va =
394
- dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
395
- GFP_KERNEL);
396
+ dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
397
+ GFP_KERNEL);
396398 if (!ring->edma_rx_swtail.va)
397399 goto err_free_va;
398400 }
....@@ -431,6 +433,9 @@
431433 &ring->pa, ring->ctx);
432434
433435 wil_move_all_rx_buff_to_free_list(wil, ring);
436
+ dma_free_coherent(dev, sizeof(*ring->edma_rx_swtail.va),
437
+ ring->edma_rx_swtail.va,
438
+ ring->edma_rx_swtail.pa);
434439 goto out;
435440 }
436441
....@@ -543,7 +548,7 @@
543548 s = &wil->sta[cid];
544549 c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
545550 cc = &c->key_id[key_id];
546
- pn = (u8 *)&st->ext.pn_15_0;
551
+ pn = (u8 *)&st->ext.pn;
547552
548553 if (!cc->key_set) {
549554 wil_err_ratelimited(wil,
....@@ -576,8 +581,7 @@
576581 if (!sring->va)
577582 continue;
578583
579
- wil_get_next_rx_status_msg(sring, msg);
580
- dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
584
+ wil_get_next_rx_status_msg(sring, &dr_bit, msg);
581585
582586 /* Check if there are unhandled RX status messages */
583587 if (dr_bit == sring->desc_rdy_pol)
....@@ -589,6 +593,7 @@
589593
590594 static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
591595 {
596
+ /* RX buffer size must be aligned to 4 bytes */
592597 wil->rx_buf_len = rx_large_buf ?
593598 WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
594599 }
....@@ -602,7 +607,6 @@
602607 sizeof(struct wil_rx_status_compressed) :
603608 sizeof(struct wil_rx_status_extended);
604609 int i;
605
- u16 max_rx_pl_per_desc;
606610
607611 /* In SW reorder one must use extended status messages */
608612 if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
....@@ -628,8 +632,6 @@
628632
629633 wil_rx_buf_len_init_edma(wil);
630634
631
- max_rx_pl_per_desc = ALIGN(wil->rx_buf_len, 4);
632
-
633635 /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
634636 if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
635637 wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
....@@ -637,7 +639,7 @@
637639 wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
638640 wil->num_rx_status_rings);
639641
640
- rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc);
642
+ rc = wil_wmi_cfg_def_rx_offload(wil, wil->rx_buf_len);
641643 if (rc)
642644 return rc;
643645
....@@ -732,11 +734,21 @@
732734 txdata->enabled = 0;
733735 spin_unlock_bh(&txdata->lock);
734736 wil_ring_free_edma(wil, ring);
735
- wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
737
+ wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
736738 wil->ring2cid_tid[ring_id][1] = 0;
737739
738740 out:
739741 return rc;
742
+}
743
+
744
+static int wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id,
745
+ int cid, int tid)
746
+{
747
+ struct wil6210_priv *wil = vif_to_wil(vif);
748
+
749
+ wil_err(wil, "ring modify is not supported for EDMA\n");
750
+
751
+ return -EOPNOTSUPP;
740752 }
741753
742754 /* This function is used only for RX SW reorder */
....@@ -799,17 +811,8 @@
799811 struct sk_buff *skb,
800812 struct wil_net_stats *stats)
801813 {
802
- int error;
803814 int l2_rx_status;
804
- int l3_rx_status;
805
- int l4_rx_status;
806815 void *msg = wil_skb_rxstatus(skb);
807
-
808
- error = wil_rx_status_get_error(msg);
809
- if (!error) {
810
- skb->ip_summed = CHECKSUM_UNNECESSARY;
811
- return 0;
812
- }
813816
814817 l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
815818 if (l2_rx_status != 0) {
....@@ -839,17 +842,7 @@
839842 return -EFAULT;
840843 }
841844
842
- l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
843
- l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
844
- if (!l3_rx_status && !l4_rx_status)
845
- skb->ip_summed = CHECKSUM_UNNECESSARY;
846
- /* If HW reports bad checksum, let IP stack re-check it
847
- * For example, HW don't understand Microsoft IP stack that
848
- * mis-calculates TCP checksum - if it should be 0x0,
849
- * it writes 0xffff in violation of RFC 1624
850
- */
851
- else
852
- stats->rx_csum_err++;
845
+ skb->ip_summed = wil_rx_status_get_checksum(msg, stats);
853846
854847 return 0;
855848 }
....@@ -864,7 +857,7 @@
864857 struct sk_buff *skb;
865858 dma_addr_t pa;
866859 struct wil_ring_rx_data *rxdata = &sring->rx_data;
867
- unsigned int sz = ALIGN(wil->rx_buf_len, 4);
860
+ unsigned int sz = wil->rx_buf_len;
868861 struct wil_net_stats *stats = NULL;
869862 u16 dmalen;
870863 int cid;
....@@ -874,12 +867,12 @@
874867 u8 data_offset;
875868 struct wil_rx_status_extended *s;
876869 u16 sring_idx = sring - wil->srings;
870
+ int invalid_buff_id_retry;
877871
878872 BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
879873
880874 again:
881
- wil_get_next_rx_status_msg(sring, msg);
882
- dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
875
+ wil_get_next_rx_status_msg(sring, &dr_bit, msg);
883876
884877 /* Completed handling all the ready status messages */
885878 if (dr_bit != sring->desc_rdy_pol)
....@@ -887,25 +880,54 @@
887880
888881 /* Extract the buffer ID from the status message */
889882 buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
890
- if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
891
- wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
892
- buff_id, sring->swhead);
893
- wil_sring_advance_swhead(sring);
894
- goto again;
883
+
884
+ invalid_buff_id_retry = 0;
885
+ while (!buff_id) {
886
+ struct wil_rx_status_extended *s;
887
+
888
+ wil_dbg_txrx(wil,
889
+ "buff_id is not updated yet by HW, (swhead 0x%x)\n",
890
+ sring->swhead);
891
+ if (++invalid_buff_id_retry > MAX_INVALID_BUFF_ID_RETRY)
892
+ break;
893
+
894
+ /* Read the status message again */
895
+ s = (struct wil_rx_status_extended *)
896
+ (sring->va + (sring->elem_size * sring->swhead));
897
+ *(struct wil_rx_status_extended *)msg = *s;
898
+ buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
895899 }
896900
897
- wil_sring_advance_swhead(sring);
901
+ if (unlikely(!wil_val_in_range(buff_id, 1, wil->rx_buff_mgmt.size))) {
902
+ wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
903
+ buff_id, sring->swhead);
904
+ print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
905
+ msg, wil->use_compressed_rx_status ?
906
+ sizeof(struct wil_rx_status_compressed) :
907
+ sizeof(struct wil_rx_status_extended), false);
908
+
909
+ wil_rx_status_reset_buff_id(sring);
910
+ wil_sring_advance_swhead(sring);
911
+ sring->invalid_buff_id_cnt++;
912
+ goto again;
913
+ }
898914
899915 /* Extract the SKB from the rx_buff management array */
900916 skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
901917 wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
902918 if (!skb) {
903919 wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
920
+ wil_rx_status_reset_buff_id(sring);
904921 /* Move the buffer from the active list to the free list */
905
- list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
906
- &wil->rx_buff_mgmt.free);
922
+ list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
923
+ &wil->rx_buff_mgmt.free);
924
+ wil_sring_advance_swhead(sring);
925
+ sring->invalid_buff_id_cnt++;
907926 goto again;
908927 }
928
+
929
+ wil_rx_status_reset_buff_id(sring);
930
+ wil_sring_advance_swhead(sring);
909931
910932 memcpy(&pa, skb->cb, sizeof(pa));
911933 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
....@@ -921,13 +943,13 @@
921943 sizeof(struct wil_rx_status_extended), false);
922944
923945 /* Move the buffer from the active list to the free list */
924
- list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
925
- &wil->rx_buff_mgmt.free);
946
+ list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
947
+ &wil->rx_buff_mgmt.free);
926948
927949 eop = wil_rx_status_get_eop(msg);
928950
929951 cid = wil_rx_status_get_cid(msg);
930
- if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) {
952
+ if (unlikely(!wil_val_in_range(cid, 0, wil->max_assoc_sta))) {
931953 wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
932954 cid, sring->swhead);
933955 rxdata->skipping = true;
....@@ -935,8 +957,8 @@
935957 }
936958 stats = &wil->sta[cid].stats;
937959
938
- if (unlikely(skb->len < ETH_HLEN)) {
939
- wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len);
960
+ if (unlikely(dmalen < ETH_HLEN)) {
961
+ wil_dbg_txrx(wil, "Short frame, len = %d\n", dmalen);
940962 stats->rx_short_frame++;
941963 rxdata->skipping = true;
942964 goto skipping;
....@@ -944,6 +966,11 @@
944966
945967 if (unlikely(dmalen > sz)) {
946968 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
969
+ print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
970
+ msg, wil->use_compressed_rx_status ?
971
+ sizeof(struct wil_rx_status_compressed) :
972
+ sizeof(struct wil_rx_status_extended), false);
973
+
947974 stats->rx_large_frame++;
948975 rxdata->skipping = true;
949976 }
....@@ -999,6 +1026,8 @@
9991026 stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
10001027 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
10011028 stats->rx_per_mcs[stats->last_mcs_rx]++;
1029
+
1030
+ stats->last_cb_mode_rx = wil_rx_status_get_cb_mode(msg);
10021031 }
10031032
10041033 if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
....@@ -1109,17 +1138,19 @@
11091138 }
11101139
11111140 static inline void
1112
-wil_get_next_tx_status_msg(struct wil_status_ring *sring,
1141
+wil_get_next_tx_status_msg(struct wil_status_ring *sring, u8 *dr_bit,
11131142 struct wil_ring_tx_status *msg)
11141143 {
11151144 struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
11161145 (sring->va + (sring->elem_size * sring->swhead));
11171146
1147
+ *dr_bit = _msg->desc_ready >> TX_STATUS_DESC_READY_POS;
1148
+ /* make sure dr_bit is read before the rest of status msg */
1149
+ rmb();
11181150 *msg = *_msg;
11191151 }
11201152
1121
-/**
1122
- * Clean up transmitted skb's from the Tx descriptor RING.
1153
+/* Clean up transmitted skb's from the Tx descriptor RING.
11231154 * Return number of descriptors cleared.
11241155 */
11251156 int wil_tx_sring_handler(struct wil6210_priv *wil,
....@@ -1132,10 +1163,10 @@
11321163 /* Total number of completed descriptors in all descriptor rings */
11331164 int desc_cnt = 0;
11341165 int cid;
1135
- struct wil_net_stats *stats = NULL;
1166
+ struct wil_net_stats *stats;
11361167 struct wil_tx_enhanced_desc *_d;
11371168 unsigned int ring_id;
1138
- unsigned int num_descs;
1169
+ unsigned int num_descs, num_statuses = 0;
11391170 int i;
11401171 u8 dr_bit; /* Descriptor Ready bit */
11411172 struct wil_ring_tx_status msg;
....@@ -1143,8 +1174,7 @@
11431174 int used_before_complete;
11441175 int used_new;
11451176
1146
- wil_get_next_tx_status_msg(sring, &msg);
1147
- dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
1177
+ wil_get_next_tx_status_msg(sring, &dr_bit, &msg);
11481178
11491179 /* Process completion messages while DR bit has the expected polarity */
11501180 while (dr_bit == sring->desc_rdy_pol) {
....@@ -1182,8 +1212,8 @@
11821212 ndev = vif_to_ndev(vif);
11831213
11841214 cid = wil->ring2cid_tid[ring_id][0];
1185
- if (cid < WIL6210_MAX_CID)
1186
- stats = &wil->sta[cid].stats;
1215
+ stats = (cid < wil->max_assoc_sta) ? &wil->sta[cid].stats :
1216
+ NULL;
11871217
11881218 wil_dbg_txrx(wil,
11891219 "tx_status: completed desc_ring (%d), num_descs (%d)\n",
....@@ -1231,6 +1261,10 @@
12311261 if (stats)
12321262 stats->tx_errors++;
12331263 }
1264
+
1265
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE))
1266
+ wil_tx_complete_handle_eapol(vif, skb);
1267
+
12341268 wil_consume_skb(skb, msg.status == 0);
12351269 }
12361270 memset(ctx, 0, sizeof(*ctx));
....@@ -1256,24 +1290,28 @@
12561290 }
12571291
12581292 again:
1293
+ num_statuses++;
1294
+ if (num_statuses % WIL_EDMA_TX_SRING_UPDATE_HW_TAIL == 0)
1295
+ /* update HW tail to allow HW to push new statuses */
1296
+ wil_w(wil, sring->hwtail, sring->swhead);
1297
+
12591298 wil_sring_advance_swhead(sring);
12601299
1261
- wil_get_next_tx_status_msg(sring, &msg);
1262
- dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
1300
+ wil_get_next_tx_status_msg(sring, &dr_bit, &msg);
12631301 }
12641302
12651303 /* shall we wake net queues? */
12661304 if (desc_cnt)
12671305 wil_update_net_queues(wil, vif, NULL, false);
12681306
1269
- /* Update the HW tail ptr (RD ptr) */
1270
- wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
1307
+ if (num_statuses % WIL_EDMA_TX_SRING_UPDATE_HW_TAIL != 0)
1308
+ /* Update the HW tail ptr (RD ptr) */
1309
+ wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
12711310
12721311 return desc_cnt;
12731312 }
12741313
1275
-/**
1276
- * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1314
+/* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
12771315 * @skb is used to obtain the protocol and headers length.
12781316 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
12791317 * 2 - middle, 3 - last descriptor.
....@@ -1441,7 +1479,7 @@
14411479 /* Rest of the descriptors are from the SKB fragments */
14421480 for (f = 0; f < nr_frags; f++) {
14431481 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1444
- int len = frag->size;
1482
+ int len = skb_frag_size(frag);
14451483
14461484 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
14471485 len, descs_used);
....@@ -1593,6 +1631,7 @@
15931631 wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
15941632 wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
15951633 wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
1634
+ wil->txrx_ops.tx_ring_modify = wil_tx_ring_modify_edma;
15961635 /* RX ops */
15971636 wil->txrx_ops.rx_init = wil_rx_init_edma;
15981637 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;