tzh
2024-08-15 d4a1bd480003f3e1a0590bc46fbcb24f05652ca7
longan/kernel/linux-4.9/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_txq.c
old mode 100644new mode 100755
....@@ -137,15 +137,15 @@
137137 if (sw_txhdr->desc.host.packet_cnt > 1) {
138138 struct rwnx_amsdu_txhdr *amsdu_txhdr;
139139 list_for_each_entry(amsdu_txhdr, &sw_txhdr->amsdu.hdrs, list) {
140
- dma_unmap_single(rwnx_hw->dev, amsdu_txhdr->dma_addr,
141
- amsdu_txhdr->map_len, DMA_TO_DEVICE);
140
+ //dma_unmap_single(rwnx_hw->dev, amsdu_txhdr->dma_addr,
141
+ // amsdu_txhdr->map_len, DMA_TO_DEVICE);
142142 dev_kfree_skb_any(amsdu_txhdr->skb);
143143 }
144144 }
145145 #endif
146146 kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr);
147
- dma_unmap_single(rwnx_hw->dev, sw_txhdr->dma_addr, sw_txhdr->map_len,
148
- DMA_TO_DEVICE);
147
+ //dma_unmap_single(rwnx_hw->dev, sw_txhdr->dma_addr, sw_txhdr->map_len,
148
+ // DMA_TO_DEVICE);
149149
150150 #ifdef CONFIG_RWNX_FULLMAC
151151 dev_kfree_skb_any(skb);
....@@ -257,7 +257,6 @@
257257 }
258258
259259 #endif /* CONFIG_RWNX_FULLMAC*/
260
- rwnx_ipc_sta_buffer_init(rwnx_hw, rwnx_sta->sta_idx);
261260 }
262261
263262 /**
....@@ -385,7 +384,9 @@
385384 void rwnx_txq_add_to_hw_list(struct rwnx_txq *txq)
386385 {
387386 if (!(txq->status & RWNX_TXQ_IN_HWQ_LIST)) {
387
+#ifdef CREATE_TRACE_POINTS
388388 trace_txq_add_to_hw(txq);
389
+#endif
389390 txq->status |= RWNX_TXQ_IN_HWQ_LIST;
390391 list_add_tail(&txq->sched_list, &txq->hwq->list);
391392 txq->hwq->need_processing = true;
....@@ -403,7 +404,9 @@
403404 void rwnx_txq_del_from_hw_list(struct rwnx_txq *txq)
404405 {
405406 if (txq->status & RWNX_TXQ_IN_HWQ_LIST) {
407
+#ifdef CREATE_TRACE_POINTS
406408 trace_txq_del_from_hw(txq);
409
+#endif
407410 txq->status &= ~RWNX_TXQ_IN_HWQ_LIST;
408411 list_del(&txq->sched_list);
409412 }
....@@ -441,7 +444,9 @@
441444 {
442445 BUG_ON(txq == NULL);
443446 if (txq->idx != TXQ_INACTIVE && (txq->status & reason)) {
447
+#ifdef CREATE_TRACE_POINTS
444448 trace_txq_start(txq, reason);
449
+#endif
445450 txq->status &= ~reason;
446451 if (!rwnx_txq_is_stopped(txq) && rwnx_txq_skb_ready(txq))
447452 rwnx_txq_add_to_hw_list(txq);
....@@ -461,7 +466,9 @@
461466 {
462467 BUG_ON(txq == NULL);
463468 if (txq->idx != TXQ_INACTIVE) {
469
+#ifdef CREATE_TRACE_POINTS
464470 trace_txq_stop(txq, reason);
471
+#endif
465472 txq->status |= reason;
466473 rwnx_txq_del_from_hw_list(txq);
467474 }
....@@ -493,8 +500,9 @@
493500 {
494501 struct rwnx_txq *txq;
495502 int tid;
496
-
503
+#ifdef CREATE_TRACE_POINTS
497504 trace_txq_sta_start(rwnx_sta->sta_idx);
505
+#endif
498506
499507 foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) {
500508 rwnx_txq_start(txq, reason);
....@@ -529,8 +537,9 @@
529537
530538 if (!rwnx_sta)
531539 return;
532
-
540
+#ifdef CREATE_TRACE_POINTS
533541 trace_txq_sta_stop(rwnx_sta->sta_idx);
542
+#endif
534543 foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) {
535544 rwnx_txq_stop(txq, reason);
536545 }
....@@ -540,7 +549,9 @@
540549 void rwnx_txq_tdls_sta_start(struct rwnx_vif *rwnx_vif, u16 reason,
541550 struct rwnx_hw *rwnx_hw)
542551 {
552
+#ifdef CREATE_TRACE_POINTS
543553 trace_txq_vif_start(rwnx_vif->vif_index);
554
+#endif
544555 spin_lock_bh(&rwnx_hw->tx_lock);
545556
546557 if (rwnx_vif->sta.tdls_sta)
....@@ -554,7 +565,9 @@
554565 void rwnx_txq_tdls_sta_stop(struct rwnx_vif *rwnx_vif, u16 reason,
555566 struct rwnx_hw *rwnx_hw)
556567 {
568
+#ifdef CREATE_TRACE_POINTS
557569 trace_txq_vif_stop(rwnx_vif->vif_index);
570
+#endif
558571
559572 spin_lock_bh(&rwnx_hw->tx_lock);
560573
....@@ -614,9 +627,9 @@
614627 struct rwnx_hw *rwnx_hw)
615628 {
616629 struct rwnx_txq *txq;
617
-
630
+#ifdef CREATE_TRACE_POINTS
618631 trace_txq_vif_start(rwnx_vif->vif_index);
619
-
632
+#endif
620633 spin_lock_bh(&rwnx_hw->tx_lock);
621634
622635 #ifdef CONFIG_RWNX_FULLMAC
....@@ -658,10 +671,9 @@
658671 struct rwnx_hw *rwnx_hw)
659672 {
660673 struct rwnx_txq *txq;
661
-
662
- RWNX_DBG(RWNX_FN_ENTRY_STR);
663
-
674
+#ifdef CREATE_TRACE_POINTS
664675 trace_txq_vif_stop(rwnx_vif->vif_index);
676
+#endif
665677 spin_lock_bh(&rwnx_hw->tx_lock);
666678
667679 #ifdef CONFIG_RWNX_FULLMAC
....@@ -761,8 +773,9 @@
761773 #ifdef CONFIG_RWNX_FULLMAC
762774 if (unlikely(txq->sta && txq->sta->ps.active)) {
763775 txq->sta->ps.pkt_ready[txq->ps_id]++;
776
+#ifdef CREATE_TRACE_POINTS
764777 trace_ps_queue(txq->sta);
765
-
778
+#endif
766779 if (txq->sta->ps.pkt_ready[txq->ps_id] == 1) {
767780 rwnx_set_traffic_status(rwnx_hw, txq->sta, true, txq->ps_id);
768781 }
....@@ -772,11 +785,6 @@
772785 if (!retry) {
773786 /* add buffer in the sk_list */
774787 skb_queue_tail(&txq->sk_list, skb);
775
-#ifdef CONFIG_RWNX_FULLMAC
776
- // to update for SOFTMAC
777
- rwnx_ipc_sta_buffer(rwnx_hw, txq->sta, txq->tid,
778
- ((struct rwnx_txhdr *)skb->data)->sw_hdr->frame_len);
779
-#endif
780788 } else {
781789 if (txq->last_retry_skb)
782790 rwnx_skb_append(txq->last_retry_skb, skb, &txq->sk_list);
....@@ -786,9 +794,9 @@
786794 txq->last_retry_skb = skb;
787795 txq->nb_retry++;
788796 }
789
-
797
+#ifdef CREATE_TRACE_POINTS
790798 trace_txq_queue_skb(skb, txq, retry);
791
-
799
+#endif
792800 /* Flowctrl corresponding netdev queue if needed */
793801 #ifdef CONFIG_RWNX_FULLMAC
794802 /* If too many buffer are queued for this TXQ stop netdev queue */
....@@ -796,7 +804,9 @@
796804 (skb_queue_len(&txq->sk_list) > RWNX_NDEV_FLOW_CTRL_STOP)) {
797805 txq->status |= RWNX_TXQ_NDEV_FLOW_CTRL;
798806 netif_stop_subqueue(txq->ndev, txq->ndev_idx);
807
+#ifdef CREATE_TRACE_POINTS
799808 trace_txq_flowctrl_stop(txq);
809
+#endif
800810 }
801811 #else /* ! CONFIG_RWNX_FULLMAC */
802812
....@@ -852,7 +862,6 @@
852862 if (txq->pkt_pushed[user])
853863 txq->pkt_pushed[user]--;
854864
855
- hwq->credits[user]++;
856865 hwq->need_processing = true;
857866 rwnx_hw->stats.cfm_balance[hwq->id]--;
858867 }
....@@ -1019,7 +1028,7 @@
10191028 struct sk_buff_head *sk_list_push)
10201029 {
10211030 int nb_ready = skb_queue_len(&txq->sk_list);
1022
- int credits = min_t(int, rwnx_txq_get_credits(txq), hwq->credits[user]);
1031
+ int credits = rwnx_txq_get_credits(txq);
10231032 bool res = false;
10241033
10251034 __skb_queue_head_init(sk_list_push);
....@@ -1159,9 +1168,9 @@
11591168 struct rwnx_txq *txq, *next;
11601169 int user, credit_map = 0;
11611170 bool mu_enable;
1162
-
1171
+#ifdef CREATE_TRACE_POINTS
11631172 trace_process_hw_queue(hwq);
1164
-
1173
+#endif
11651174 hwq->need_processing = false;
11661175
11671176 mu_enable = rwnx_txq_take_mu_lock(rwnx_hw);
....@@ -1173,38 +1182,35 @@
11731182 struct sk_buff_head sk_list_push;
11741183 struct sk_buff *skb;
11751184 bool txq_empty;
1176
-
1185
+#ifdef CREATE_TRACE_POINTS
11771186 trace_process_txq(txq);
1178
-
1187
+#endif
11791188 /* sanity check for debug */
11801189 BUG_ON(!(txq->status & RWNX_TXQ_IN_HWQ_LIST));
1190
+ if (txq->idx == TXQ_INACTIVE) {
1191
+ printk("%s txq->idx == TXQ_INACTIVE \r\n", __func__);
1192
+ continue;
1193
+ }
11811194 BUG_ON(txq->idx == TXQ_INACTIVE);
11821195 BUG_ON(txq->credits <= 0);
11831196 BUG_ON(!rwnx_txq_skb_ready(txq));
11841197
1185
- if (!rwnx_txq_select_user(rwnx_hw, mu_enable, txq, hwq, &user))
1186
- continue;
1187
-
1188
- if (!hwq->credits[user]) {
1189
- credit_map |= BIT(user);
1190
- if (credit_map == ALL_HWQ_MASK)
1191
- break;
1198
+ if (!rwnx_txq_select_user(rwnx_hw, mu_enable, txq, hwq, &user)) {
1199
+ printk("select user:%d\n", user);
11921200 continue;
11931201 }
11941202
11951203 txq_empty = rwnx_txq_get_skb_to_push(rwnx_hw, hwq, txq, user,
11961204 &sk_list_push);
1197
-
11981205 while ((skb = __skb_dequeue(&sk_list_push)) != NULL) {
11991206 txhdr = (struct rwnx_txhdr *)skb->data;
12001207 rwnx_tx_push(rwnx_hw, txhdr, 0);
12011208 }
12021209
12031210 if (txq_empty) {
1204
- rwnx_txq_del_from_hw_list(txq);
1205
- txq->pkt_sent = 0;
1206
- } else if ((hwq->credits[user] == 0) &&
1207
- rwnx_txq_is_scheduled(txq)) {
1211
+ rwnx_txq_del_from_hw_list(txq);
1212
+ txq->pkt_sent = 0;
1213
+ } else if (rwnx_txq_is_scheduled(txq)) {
12081214 /* txq not empty,
12091215 - To avoid starving need to process other txq in the list
12101216 - For better aggregation, need to send "as many consecutive
....@@ -1230,10 +1236,12 @@
12301236
12311237 /* restart netdev queue if number of queued buffer is below threshold */
12321238 if (unlikely(txq->status & RWNX_TXQ_NDEV_FLOW_CTRL) &&
1233
- skb_queue_len(&txq->sk_list) < RWNX_NDEV_FLOW_CTRL_RESTART) {
1239
+ (skb_queue_len(&txq->sk_list) < RWNX_NDEV_FLOW_CTRL_RESTART)) {
12341240 txq->status &= ~RWNX_TXQ_NDEV_FLOW_CTRL;
12351241 netif_wake_subqueue(txq->ndev, txq->ndev_idx);
1242
+#ifdef CREATE_TRACE_POINTS
12361243 trace_txq_flowctrl_restart(txq);
1244
+#endif
12371245 }
12381246 #endif /* CONFIG_RWNX_FULLMAC */
12391247 }
....@@ -1272,13 +1280,11 @@
12721280 */
12731281 void rwnx_hwq_init(struct rwnx_hw *rwnx_hw)
12741282 {
1275
- int i, j;
1283
+ int i;
12761284
12771285 for (i = 0; i < ARRAY_SIZE(rwnx_hw->hwq); i++) {
12781286 struct rwnx_hwq *hwq = &rwnx_hw->hwq[i];
12791287
1280
- for (j = 0 ; j < CONFIG_USER_MAX; j++)
1281
- hwq->credits[j] = nx_txdesc_cnt[i];
12821288 hwq->id = i;
12831289 hwq->size = nx_txdesc_cnt[i];
12841290 INIT_LIST_HEAD(&hwq->list);