forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/drivers/net/wireless/ath/wcn36xx/dxe.c
....@@ -179,8 +179,6 @@
179179 if (!wcn_ch->cpu_addr)
180180 return -ENOMEM;
181181
182
- memset(wcn_ch->cpu_addr, 0, size);
183
-
184182 cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
185183 cur_ctl = wcn_ch->head_blk_ctl;
186184
....@@ -274,6 +272,21 @@
274272 return 0;
275273 }
276274
275
+static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
276
+{
277
+ int reg_data = 0;
278
+
279
+ wcn36xx_dxe_read_register(wcn,
280
+ WCN36XX_DXE_INT_MASK_REG,
281
+ &reg_data);
282
+
283
+ reg_data &= ~wcn_ch;
284
+
285
+ wcn36xx_dxe_write_register(wcn,
286
+ WCN36XX_DXE_INT_MASK_REG,
287
+ (int)reg_data);
288
+}
289
+
277290 static int wcn36xx_dxe_fill_skb(struct device *dev,
278291 struct wcn36xx_dxe_ctl *ctl,
279292 gfp_t gfp)
....@@ -336,6 +349,7 @@
336349 spin_lock_irqsave(&wcn->dxe_lock, flags);
337350 skb = wcn->tx_ack_skb;
338351 wcn->tx_ack_skb = NULL;
352
+ del_timer(&wcn->tx_ack_timer);
339353 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
340354
341355 if (!skb) {
....@@ -347,8 +361,36 @@
347361
348362 if (status == 1)
349363 info->flags |= IEEE80211_TX_STAT_ACK;
364
+ else
365
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
350366
351367 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
368
+
369
+ ieee80211_tx_status_irqsafe(wcn->hw, skb);
370
+ ieee80211_wake_queues(wcn->hw);
371
+}
372
+
373
+static void wcn36xx_dxe_tx_timer(struct timer_list *t)
374
+{
375
+ struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
376
+ struct ieee80211_tx_info *info;
377
+ unsigned long flags;
378
+ struct sk_buff *skb;
379
+
380
+ /* TX Timeout */
381
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
382
+
383
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
384
+ skb = wcn->tx_ack_skb;
385
+ wcn->tx_ack_skb = NULL;
386
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
387
+
388
+ if (!skb)
389
+ return;
390
+
391
+ info = IEEE80211_SKB_CB(skb);
392
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
393
+ info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
352394
353395 ieee80211_tx_status_irqsafe(wcn->hw, skb);
354396 ieee80211_wake_queues(wcn->hw);
....@@ -376,8 +418,21 @@
376418 dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
377419 ctl->skb->len, DMA_TO_DEVICE);
378420 info = IEEE80211_SKB_CB(ctl->skb);
379
- if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
380
- /* Keep frame until TX status comes */
421
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
422
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
423
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
424
+ ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb);
425
+ } else {
426
+ /* Wait for the TX ack indication or timeout... */
427
+ spin_lock(&wcn->dxe_lock);
428
+ if (WARN_ON(wcn->tx_ack_skb))
429
+ ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb);
430
+ wcn->tx_ack_skb = ctl->skb; /* Tracking ref */
431
+ mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
432
+ spin_unlock(&wcn->dxe_lock);
433
+ }
434
+ /* do not free, ownership transferred to mac80211 status cb */
435
+ } else {
381436 ieee80211_free_txskb(wcn->hw, ctl->skb);
382437 }
383438
....@@ -436,8 +491,9 @@
436491 int_reason);
437492
438493 if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
439
- WCN36XX_CH_STAT_INT_ED_MASK))
494
+ WCN36XX_CH_STAT_INT_ED_MASK)) {
440495 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
496
+ }
441497 }
442498
443499 if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
....@@ -448,7 +504,6 @@
448504 wcn36xx_dxe_write_register(wcn,
449505 WCN36XX_DXE_0_INT_CLR,
450506 WCN36XX_INT_MASK_CHAN_TX_L);
451
-
452507
453508 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
454509 wcn36xx_dxe_write_register(wcn,
....@@ -475,8 +530,9 @@
475530 int_reason);
476531
477532 if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
478
- WCN36XX_CH_STAT_INT_ED_MASK))
533
+ WCN36XX_CH_STAT_INT_ED_MASK)) {
479534 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
535
+ }
480536 }
481537
482538 return IRQ_HANDLED;
....@@ -638,13 +694,13 @@
638694 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
639695
640696 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
641
- cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
697
+ cpu_addr = dma_alloc_coherent(wcn->dev, s,
698
+ &wcn->mgmt_mem_pool.phy_addr,
642699 GFP_KERNEL);
643700 if (!cpu_addr)
644701 goto out_err;
645702
646703 wcn->mgmt_mem_pool.virt_addr = cpu_addr;
647
- memset(cpu_addr, 0, s);
648704
649705 /* Allocate BD headers for DATA frames */
650706
....@@ -653,13 +709,13 @@
653709 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
654710
655711 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
656
- cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
712
+ cpu_addr = dma_alloc_coherent(wcn->dev, s,
713
+ &wcn->data_mem_pool.phy_addr,
657714 GFP_KERNEL);
658715 if (!cpu_addr)
659716 goto out_err;
660717
661718 wcn->data_mem_pool.virt_addr = cpu_addr;
662
- memset(cpu_addr, 0, s);
663719
664720 return 0;
665721
....@@ -828,7 +884,6 @@
828884 WCN36XX_DXE_WQ_TX_L);
829885
830886 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
831
- wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
832887
833888 /***************************************/
834889 /* Init descriptors for TX HIGH channel */
....@@ -852,9 +907,6 @@
852907
853908 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
854909
855
- /* Enable channel interrupts */
856
- wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
857
-
858910 /***************************************/
859911 /* Init descriptors for RX LOW channel */
860912 /***************************************/
....@@ -863,7 +915,6 @@
863915 dev_err(wcn->dev, "Error allocating descriptor\n");
864916 goto out_err_rxl_ch;
865917 }
866
-
867918
868919 /* For RX we need to preallocated buffers */
869920 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
....@@ -886,9 +937,6 @@
886937 wcn36xx_dxe_write_register(wcn,
887938 WCN36XX_DXE_REG_CTL_RX_L,
888939 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
889
-
890
- /* Enable channel interrupts */
891
- wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
892940
893941 /***************************************/
894942 /* Init descriptors for RX HIGH channel */
....@@ -921,12 +969,17 @@
921969 WCN36XX_DXE_REG_CTL_RX_H,
922970 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
923971
924
- /* Enable channel interrupts */
925
- wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
926
-
927972 ret = wcn36xx_dxe_request_irqs(wcn);
928973 if (ret < 0)
929974 goto out_err_irq;
975
+
976
+ timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
977
+
978
+ /* Enable channel interrupts */
979
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
980
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
981
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
982
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
930983
931984 return 0;
932985
....@@ -944,14 +997,27 @@
944997
945998 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
946999 {
1000
+ int reg_data = 0;
1001
+
1002
+ /* Disable channel interrupts */
1003
+ wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
1004
+ wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
1005
+ wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
1006
+ wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
1007
+
9471008 free_irq(wcn->tx_irq, wcn);
9481009 free_irq(wcn->rx_irq, wcn);
1010
+ del_timer(&wcn->tx_ack_timer);
9491011
9501012 if (wcn->tx_ack_skb) {
9511013 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
9521014 wcn->tx_ack_skb = NULL;
9531015 }
9541016
1017
+ /* Put the DXE block into reset before freeing memory */
1018
+ reg_data = WCN36XX_DXE_REG_RESET;
1019
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
1020
+
9551021 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
9561022 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
9571023