old mode 100644new mode 100755.. | .. |
---|
137 | 137 | if (sw_txhdr->desc.host.packet_cnt > 1) { |
---|
138 | 138 | struct rwnx_amsdu_txhdr *amsdu_txhdr; |
---|
139 | 139 | list_for_each_entry(amsdu_txhdr, &sw_txhdr->amsdu.hdrs, list) { |
---|
140 | | - dma_unmap_single(rwnx_hw->dev, amsdu_txhdr->dma_addr, |
---|
141 | | - amsdu_txhdr->map_len, DMA_TO_DEVICE); |
---|
| 140 | + //dma_unmap_single(rwnx_hw->dev, amsdu_txhdr->dma_addr, |
---|
| 141 | + // amsdu_txhdr->map_len, DMA_TO_DEVICE); |
---|
142 | 142 | dev_kfree_skb_any(amsdu_txhdr->skb); |
---|
143 | 143 | } |
---|
144 | 144 | } |
---|
145 | 145 | #endif |
---|
146 | 146 | kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr); |
---|
147 | | - dma_unmap_single(rwnx_hw->dev, sw_txhdr->dma_addr, sw_txhdr->map_len, |
---|
148 | | - DMA_TO_DEVICE); |
---|
| 147 | + //dma_unmap_single(rwnx_hw->dev, sw_txhdr->dma_addr, sw_txhdr->map_len, |
---|
| 148 | + // DMA_TO_DEVICE); |
---|
149 | 149 | |
---|
150 | 150 | #ifdef CONFIG_RWNX_FULLMAC |
---|
151 | 151 | dev_kfree_skb_any(skb); |
---|
.. | .. |
---|
257 | 257 | } |
---|
258 | 258 | |
---|
259 | 259 | #endif /* CONFIG_RWNX_FULLMAC*/ |
---|
260 | | - rwnx_ipc_sta_buffer_init(rwnx_hw, rwnx_sta->sta_idx); |
---|
261 | 260 | } |
---|
262 | 261 | |
---|
263 | 262 | /** |
---|
.. | .. |
---|
385 | 384 | void rwnx_txq_add_to_hw_list(struct rwnx_txq *txq) |
---|
386 | 385 | { |
---|
387 | 386 | if (!(txq->status & RWNX_TXQ_IN_HWQ_LIST)) { |
---|
| 387 | +#ifdef CREATE_TRACE_POINTS |
---|
388 | 388 | trace_txq_add_to_hw(txq); |
---|
| 389 | +#endif |
---|
389 | 390 | txq->status |= RWNX_TXQ_IN_HWQ_LIST; |
---|
390 | 391 | list_add_tail(&txq->sched_list, &txq->hwq->list); |
---|
391 | 392 | txq->hwq->need_processing = true; |
---|
.. | .. |
---|
403 | 404 | void rwnx_txq_del_from_hw_list(struct rwnx_txq *txq) |
---|
404 | 405 | { |
---|
405 | 406 | if (txq->status & RWNX_TXQ_IN_HWQ_LIST) { |
---|
| 407 | +#ifdef CREATE_TRACE_POINTS |
---|
406 | 408 | trace_txq_del_from_hw(txq); |
---|
| 409 | +#endif |
---|
407 | 410 | txq->status &= ~RWNX_TXQ_IN_HWQ_LIST; |
---|
408 | 411 | list_del(&txq->sched_list); |
---|
409 | 412 | } |
---|
.. | .. |
---|
441 | 444 | { |
---|
442 | 445 | BUG_ON(txq == NULL); |
---|
443 | 446 | if (txq->idx != TXQ_INACTIVE && (txq->status & reason)) { |
---|
| 447 | +#ifdef CREATE_TRACE_POINTS |
---|
444 | 448 | trace_txq_start(txq, reason); |
---|
| 449 | +#endif |
---|
445 | 450 | txq->status &= ~reason; |
---|
446 | 451 | if (!rwnx_txq_is_stopped(txq) && rwnx_txq_skb_ready(txq)) |
---|
447 | 452 | rwnx_txq_add_to_hw_list(txq); |
---|
.. | .. |
---|
461 | 466 | { |
---|
462 | 467 | BUG_ON(txq == NULL); |
---|
463 | 468 | if (txq->idx != TXQ_INACTIVE) { |
---|
| 469 | +#ifdef CREATE_TRACE_POINTS |
---|
464 | 470 | trace_txq_stop(txq, reason); |
---|
| 471 | +#endif |
---|
465 | 472 | txq->status |= reason; |
---|
466 | 473 | rwnx_txq_del_from_hw_list(txq); |
---|
467 | 474 | } |
---|
.. | .. |
---|
493 | 500 | { |
---|
494 | 501 | struct rwnx_txq *txq; |
---|
495 | 502 | int tid; |
---|
496 | | - |
---|
| 503 | +#ifdef CREATE_TRACE_POINTS |
---|
497 | 504 | trace_txq_sta_start(rwnx_sta->sta_idx); |
---|
| 505 | +#endif |
---|
498 | 506 | |
---|
499 | 507 | foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) { |
---|
500 | 508 | rwnx_txq_start(txq, reason); |
---|
.. | .. |
---|
529 | 537 | |
---|
530 | 538 | if (!rwnx_sta) |
---|
531 | 539 | return; |
---|
532 | | - |
---|
| 540 | +#ifdef CREATE_TRACE_POINTS |
---|
533 | 541 | trace_txq_sta_stop(rwnx_sta->sta_idx); |
---|
| 542 | +#endif |
---|
534 | 543 | foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) { |
---|
535 | 544 | rwnx_txq_stop(txq, reason); |
---|
536 | 545 | } |
---|
.. | .. |
---|
540 | 549 | void rwnx_txq_tdls_sta_start(struct rwnx_vif *rwnx_vif, u16 reason, |
---|
541 | 550 | struct rwnx_hw *rwnx_hw) |
---|
542 | 551 | { |
---|
| 552 | +#ifdef CREATE_TRACE_POINTS |
---|
543 | 553 | trace_txq_vif_start(rwnx_vif->vif_index); |
---|
| 554 | +#endif |
---|
544 | 555 | spin_lock_bh(&rwnx_hw->tx_lock); |
---|
545 | 556 | |
---|
546 | 557 | if (rwnx_vif->sta.tdls_sta) |
---|
.. | .. |
---|
554 | 565 | void rwnx_txq_tdls_sta_stop(struct rwnx_vif *rwnx_vif, u16 reason, |
---|
555 | 566 | struct rwnx_hw *rwnx_hw) |
---|
556 | 567 | { |
---|
| 568 | +#ifdef CREATE_TRACE_POINTS |
---|
557 | 569 | trace_txq_vif_stop(rwnx_vif->vif_index); |
---|
| 570 | +#endif |
---|
558 | 571 | |
---|
559 | 572 | spin_lock_bh(&rwnx_hw->tx_lock); |
---|
560 | 573 | |
---|
.. | .. |
---|
614 | 627 | struct rwnx_hw *rwnx_hw) |
---|
615 | 628 | { |
---|
616 | 629 | struct rwnx_txq *txq; |
---|
617 | | - |
---|
| 630 | +#ifdef CREATE_TRACE_POINTS |
---|
618 | 631 | trace_txq_vif_start(rwnx_vif->vif_index); |
---|
619 | | - |
---|
| 632 | +#endif |
---|
620 | 633 | spin_lock_bh(&rwnx_hw->tx_lock); |
---|
621 | 634 | |
---|
622 | 635 | #ifdef CONFIG_RWNX_FULLMAC |
---|
.. | .. |
---|
658 | 671 | struct rwnx_hw *rwnx_hw) |
---|
659 | 672 | { |
---|
660 | 673 | struct rwnx_txq *txq; |
---|
661 | | - |
---|
662 | | - RWNX_DBG(RWNX_FN_ENTRY_STR); |
---|
663 | | - |
---|
| 674 | +#ifdef CREATE_TRACE_POINTS |
---|
664 | 675 | trace_txq_vif_stop(rwnx_vif->vif_index); |
---|
| 676 | +#endif |
---|
665 | 677 | spin_lock_bh(&rwnx_hw->tx_lock); |
---|
666 | 678 | |
---|
667 | 679 | #ifdef CONFIG_RWNX_FULLMAC |
---|
.. | .. |
---|
761 | 773 | #ifdef CONFIG_RWNX_FULLMAC |
---|
762 | 774 | if (unlikely(txq->sta && txq->sta->ps.active)) { |
---|
763 | 775 | txq->sta->ps.pkt_ready[txq->ps_id]++; |
---|
| 776 | +#ifdef CREATE_TRACE_POINTS |
---|
764 | 777 | trace_ps_queue(txq->sta); |
---|
765 | | - |
---|
| 778 | +#endif |
---|
766 | 779 | if (txq->sta->ps.pkt_ready[txq->ps_id] == 1) { |
---|
767 | 780 | rwnx_set_traffic_status(rwnx_hw, txq->sta, true, txq->ps_id); |
---|
768 | 781 | } |
---|
.. | .. |
---|
772 | 785 | if (!retry) { |
---|
773 | 786 | /* add buffer in the sk_list */ |
---|
774 | 787 | skb_queue_tail(&txq->sk_list, skb); |
---|
775 | | -#ifdef CONFIG_RWNX_FULLMAC |
---|
776 | | - // to update for SOFTMAC |
---|
777 | | - rwnx_ipc_sta_buffer(rwnx_hw, txq->sta, txq->tid, |
---|
778 | | - ((struct rwnx_txhdr *)skb->data)->sw_hdr->frame_len); |
---|
779 | | -#endif |
---|
780 | 788 | } else { |
---|
781 | 789 | if (txq->last_retry_skb) |
---|
782 | 790 | rwnx_skb_append(txq->last_retry_skb, skb, &txq->sk_list); |
---|
.. | .. |
---|
786 | 794 | txq->last_retry_skb = skb; |
---|
787 | 795 | txq->nb_retry++; |
---|
788 | 796 | } |
---|
789 | | - |
---|
| 797 | +#ifdef CREATE_TRACE_POINTS |
---|
790 | 798 | trace_txq_queue_skb(skb, txq, retry); |
---|
791 | | - |
---|
| 799 | +#endif |
---|
792 | 800 | /* Flowctrl corresponding netdev queue if needed */ |
---|
793 | 801 | #ifdef CONFIG_RWNX_FULLMAC |
---|
794 | 802 | /* If too many buffer are queued for this TXQ stop netdev queue */ |
---|
.. | .. |
---|
796 | 804 | (skb_queue_len(&txq->sk_list) > RWNX_NDEV_FLOW_CTRL_STOP)) { |
---|
797 | 805 | txq->status |= RWNX_TXQ_NDEV_FLOW_CTRL; |
---|
798 | 806 | netif_stop_subqueue(txq->ndev, txq->ndev_idx); |
---|
| 807 | +#ifdef CREATE_TRACE_POINTS |
---|
799 | 808 | trace_txq_flowctrl_stop(txq); |
---|
| 809 | +#endif |
---|
800 | 810 | } |
---|
801 | 811 | #else /* ! CONFIG_RWNX_FULLMAC */ |
---|
802 | 812 | |
---|
.. | .. |
---|
852 | 862 | if (txq->pkt_pushed[user]) |
---|
853 | 863 | txq->pkt_pushed[user]--; |
---|
854 | 864 | |
---|
855 | | - hwq->credits[user]++; |
---|
856 | 865 | hwq->need_processing = true; |
---|
857 | 866 | rwnx_hw->stats.cfm_balance[hwq->id]--; |
---|
858 | 867 | } |
---|
.. | .. |
---|
1019 | 1028 | struct sk_buff_head *sk_list_push) |
---|
1020 | 1029 | { |
---|
1021 | 1030 | int nb_ready = skb_queue_len(&txq->sk_list); |
---|
1022 | | - int credits = min_t(int, rwnx_txq_get_credits(txq), hwq->credits[user]); |
---|
| 1031 | + int credits = rwnx_txq_get_credits(txq); |
---|
1023 | 1032 | bool res = false; |
---|
1024 | 1033 | |
---|
1025 | 1034 | __skb_queue_head_init(sk_list_push); |
---|
.. | .. |
---|
1159 | 1168 | struct rwnx_txq *txq, *next; |
---|
1160 | 1169 | int user, credit_map = 0; |
---|
1161 | 1170 | bool mu_enable; |
---|
1162 | | - |
---|
| 1171 | +#ifdef CREATE_TRACE_POINTS |
---|
1163 | 1172 | trace_process_hw_queue(hwq); |
---|
1164 | | - |
---|
| 1173 | +#endif |
---|
1165 | 1174 | hwq->need_processing = false; |
---|
1166 | 1175 | |
---|
1167 | 1176 | mu_enable = rwnx_txq_take_mu_lock(rwnx_hw); |
---|
.. | .. |
---|
1173 | 1182 | struct sk_buff_head sk_list_push; |
---|
1174 | 1183 | struct sk_buff *skb; |
---|
1175 | 1184 | bool txq_empty; |
---|
1176 | | - |
---|
| 1185 | +#ifdef CREATE_TRACE_POINTS |
---|
1177 | 1186 | trace_process_txq(txq); |
---|
1178 | | - |
---|
| 1187 | +#endif |
---|
1179 | 1188 | /* sanity check for debug */ |
---|
1180 | 1189 | BUG_ON(!(txq->status & RWNX_TXQ_IN_HWQ_LIST)); |
---|
| 1190 | + if (txq->idx == TXQ_INACTIVE) { |
---|
| 1191 | + printk("%s txq->idx == TXQ_INACTIVE \r\n", __func__); |
---|
| 1192 | + continue; |
---|
| 1193 | + } |
---|
1181 | 1194 | BUG_ON(txq->idx == TXQ_INACTIVE); |
---|
1182 | 1195 | BUG_ON(txq->credits <= 0); |
---|
1183 | 1196 | BUG_ON(!rwnx_txq_skb_ready(txq)); |
---|
1184 | 1197 | |
---|
1185 | | - if (!rwnx_txq_select_user(rwnx_hw, mu_enable, txq, hwq, &user)) |
---|
1186 | | - continue; |
---|
1187 | | - |
---|
1188 | | - if (!hwq->credits[user]) { |
---|
1189 | | - credit_map |= BIT(user); |
---|
1190 | | - if (credit_map == ALL_HWQ_MASK) |
---|
1191 | | - break; |
---|
| 1198 | + if (!rwnx_txq_select_user(rwnx_hw, mu_enable, txq, hwq, &user)) { |
---|
| 1199 | + printk("select user:%d\n", user); |
---|
1192 | 1200 | continue; |
---|
1193 | 1201 | } |
---|
1194 | 1202 | |
---|
1195 | 1203 | txq_empty = rwnx_txq_get_skb_to_push(rwnx_hw, hwq, txq, user, |
---|
1196 | 1204 | &sk_list_push); |
---|
1197 | | - |
---|
1198 | 1205 | while ((skb = __skb_dequeue(&sk_list_push)) != NULL) { |
---|
1199 | 1206 | txhdr = (struct rwnx_txhdr *)skb->data; |
---|
1200 | 1207 | rwnx_tx_push(rwnx_hw, txhdr, 0); |
---|
1201 | 1208 | } |
---|
1202 | 1209 | |
---|
1203 | 1210 | if (txq_empty) { |
---|
1204 | | - rwnx_txq_del_from_hw_list(txq); |
---|
1205 | | - txq->pkt_sent = 0; |
---|
1206 | | - } else if ((hwq->credits[user] == 0) && |
---|
1207 | | - rwnx_txq_is_scheduled(txq)) { |
---|
| 1211 | + rwnx_txq_del_from_hw_list(txq); |
---|
| 1212 | + txq->pkt_sent = 0; |
---|
| 1213 | + } else if (rwnx_txq_is_scheduled(txq)) { |
---|
1208 | 1214 | /* txq not empty, |
---|
1209 | 1215 | - To avoid starving need to process other txq in the list |
---|
1210 | 1216 | - For better aggregation, need to send "as many consecutive |
---|
.. | .. |
---|
1230 | 1236 | |
---|
1231 | 1237 | /* restart netdev queue if number of queued buffer is below threshold */ |
---|
1232 | 1238 | if (unlikely(txq->status & RWNX_TXQ_NDEV_FLOW_CTRL) && |
---|
1233 | | - skb_queue_len(&txq->sk_list) < RWNX_NDEV_FLOW_CTRL_RESTART) { |
---|
| 1239 | + (skb_queue_len(&txq->sk_list) < RWNX_NDEV_FLOW_CTRL_RESTART)) { |
---|
1234 | 1240 | txq->status &= ~RWNX_TXQ_NDEV_FLOW_CTRL; |
---|
1235 | 1241 | netif_wake_subqueue(txq->ndev, txq->ndev_idx); |
---|
| 1242 | +#ifdef CREATE_TRACE_POINTS |
---|
1236 | 1243 | trace_txq_flowctrl_restart(txq); |
---|
| 1244 | +#endif |
---|
1237 | 1245 | } |
---|
1238 | 1246 | #endif /* CONFIG_RWNX_FULLMAC */ |
---|
1239 | 1247 | } |
---|
.. | .. |
---|
1272 | 1280 | */ |
---|
1273 | 1281 | void rwnx_hwq_init(struct rwnx_hw *rwnx_hw) |
---|
1274 | 1282 | { |
---|
1275 | | - int i, j; |
---|
| 1283 | + int i; |
---|
1276 | 1284 | |
---|
1277 | 1285 | for (i = 0; i < ARRAY_SIZE(rwnx_hw->hwq); i++) { |
---|
1278 | 1286 | struct rwnx_hwq *hwq = &rwnx_hw->hwq[i]; |
---|
1279 | 1287 | |
---|
1280 | | - for (j = 0 ; j < CONFIG_USER_MAX; j++) |
---|
1281 | | - hwq->credits[j] = nx_txdesc_cnt[i]; |
---|
1282 | 1288 | hwq->id = i; |
---|
1283 | 1289 | hwq->size = nx_txdesc_cnt[i]; |
---|
1284 | 1290 | INIT_LIST_HEAD(&hwq->list); |
---|