.. | .. |
---|
280 | 280 | dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, |
---|
281 | 281 | droq->max_count); |
---|
282 | 282 | |
---|
283 | | - droq->recv_buf_list = (struct octeon_recv_buffer *) |
---|
284 | | - vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE), |
---|
285 | | - numa_node); |
---|
| 283 | + droq->recv_buf_list = vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE), |
---|
| 284 | + numa_node); |
---|
286 | 285 | if (!droq->recv_buf_list) |
---|
287 | | - droq->recv_buf_list = (struct octeon_recv_buffer *) |
---|
288 | | - vzalloc(array_size(droq->max_count, |
---|
289 | | - OCT_DROQ_RECVBUF_SIZE)); |
---|
| 286 | + droq->recv_buf_list = vzalloc(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE)); |
---|
290 | 287 | if (!droq->recv_buf_list) { |
---|
291 | 288 | dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); |
---|
292 | 289 | goto init_droq_fail; |
---|
.. | .. |
---|
300 | 297 | |
---|
301 | 298 | dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n", |
---|
302 | 299 | droq->max_empty_descs); |
---|
303 | | - |
---|
304 | | - spin_lock_init(&droq->lock); |
---|
305 | 300 | |
---|
306 | 301 | INIT_LIST_HEAD(&droq->dispatch_list); |
---|
307 | 302 | |
---|
.. | .. |
---|
333 | 328 | * Returns: |
---|
334 | 329 | * Success: Pointer to recv_info_t |
---|
335 | 330 | * Failure: NULL. |
---|
336 | | - * Locks: |
---|
337 | | - * The droq->lock is held when this routine is called. |
---|
338 | 331 | */ |
---|
339 | 332 | static inline struct octeon_recv_info *octeon_create_recv_info( |
---|
340 | 333 | struct octeon_device *octeon_dev, |
---|
.. | .. |
---|
433 | 426 | * up buffers (that were not dispatched) to form a contiguous ring. |
---|
434 | 427 | * Returns: |
---|
435 | 428 | * No of descriptors refilled. |
---|
436 | | - * Locks: |
---|
437 | | - * This routine is called with droq->lock held. |
---|
438 | 429 | */ |
---|
439 | 430 | static u32 |
---|
440 | 431 | octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) |
---|
.. | .. |
---|
449 | 440 | |
---|
450 | 441 | while (droq->refill_count && (desc_refilled < droq->max_count)) { |
---|
451 | 442 | /* If a valid buffer exists (happens if there is no dispatch), |
---|
452 | | - * reuse |
---|
453 | | - * the buffer, else allocate. |
---|
| 443 | + * reuse the buffer, else allocate. |
---|
454 | 444 | */ |
---|
455 | 445 | if (!droq->recv_buf_list[droq->refill_idx].buffer) { |
---|
456 | 446 | pg_info = |
---|
.. | .. |
---|
503 | 493 | |
---|
504 | 494 | /** check if we can allocate packets to get out of oom. |
---|
505 | 495 | * @param droq - Droq being checked. |
---|
506 | | - * @return does not return anything |
---|
| 496 | + * @return 1 if fails to refill minimum |
---|
507 | 497 | */ |
---|
508 | | -void octeon_droq_check_oom(struct octeon_droq *droq) |
---|
| 498 | +int octeon_retry_droq_refill(struct octeon_droq *droq) |
---|
509 | 499 | { |
---|
510 | | - int desc_refilled; |
---|
511 | 500 | struct octeon_device *oct = droq->oct_dev; |
---|
| 501 | + int desc_refilled, reschedule = 1; |
---|
| 502 | + u32 pkts_credit; |
---|
512 | 503 | |
---|
513 | | - if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) { |
---|
514 | | - spin_lock_bh(&droq->lock); |
---|
515 | | - desc_refilled = octeon_droq_refill(oct, droq); |
---|
516 | | - if (desc_refilled) { |
---|
517 | | - /* Flush the droq descriptor data to memory to be sure |
---|
518 | | - * that when we update the credits the data in memory |
---|
519 | | - * is accurate. |
---|
520 | | - */ |
---|
521 | | - wmb(); |
---|
522 | | - writel(desc_refilled, droq->pkts_credit_reg); |
---|
523 | | - /* make sure mmio write completes */ |
---|
524 | | - mmiowb(); |
---|
525 | | - } |
---|
526 | | - spin_unlock_bh(&droq->lock); |
---|
| 504 | + pkts_credit = readl(droq->pkts_credit_reg); |
---|
| 505 | + desc_refilled = octeon_droq_refill(oct, droq); |
---|
| 506 | + if (desc_refilled) { |
---|
| 507 | + /* Flush the droq descriptor data to memory to be sure |
---|
| 508 | + * that when we update the credits the data in memory |
---|
| 509 | + * is accurate. |
---|
| 510 | + */ |
---|
| 511 | + wmb(); |
---|
| 512 | + writel(desc_refilled, droq->pkts_credit_reg); |
---|
| 513 | + |
---|
| 514 | + if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP) |
---|
| 515 | + reschedule = 0; |
---|
527 | 516 | } |
---|
| 517 | + |
---|
| 518 | + return reschedule; |
---|
528 | 519 | } |
---|
529 | 520 | |
---|
530 | 521 | static inline u32 |
---|
531 | 522 | octeon_droq_get_bufcount(u32 buf_size, u32 total_len) |
---|
532 | 523 | { |
---|
533 | | - return ((total_len + buf_size - 1) / buf_size); |
---|
| 524 | + return DIV_ROUND_UP(total_len, buf_size); |
---|
534 | 525 | } |
---|
535 | 526 | |
---|
536 | 527 | static int |
---|
.. | .. |
---|
603 | 594 | struct octeon_droq *droq, |
---|
604 | 595 | u32 pkts_to_process) |
---|
605 | 596 | { |
---|
| 597 | + u32 pkt, total_len = 0, pkt_count, retval; |
---|
606 | 598 | struct octeon_droq_info *info; |
---|
607 | 599 | union octeon_rh *rh; |
---|
608 | | - u32 pkt, total_len = 0, pkt_count; |
---|
609 | 600 | |
---|
610 | 601 | pkt_count = pkts_to_process; |
---|
611 | 602 | |
---|
.. | .. |
---|
709 | 700 | if (droq->refill_count >= droq->refill_threshold) { |
---|
710 | 701 | int desc_refilled = octeon_droq_refill(oct, droq); |
---|
711 | 702 | |
---|
712 | | - /* Flush the droq descriptor data to memory to be sure |
---|
713 | | - * that when we update the credits the data in memory |
---|
714 | | - * is accurate. |
---|
715 | | - */ |
---|
716 | | - wmb(); |
---|
717 | | - writel((desc_refilled), droq->pkts_credit_reg); |
---|
718 | | - /* make sure mmio write completes */ |
---|
719 | | - mmiowb(); |
---|
| 703 | + if (desc_refilled) { |
---|
| 704 | + /* Flush the droq descriptor data to memory to |
---|
| 705 | + * be sure that when we update the credits the |
---|
| 706 | + * data in memory is accurate. |
---|
| 707 | + */ |
---|
| 708 | + wmb(); |
---|
| 709 | + writel(desc_refilled, droq->pkts_credit_reg); |
---|
| 710 | + } |
---|
720 | 711 | } |
---|
721 | | - |
---|
722 | 712 | } /* for (each packet)... */ |
---|
723 | 713 | |
---|
724 | 714 | /* Increment refill_count by the number of buffers processed. */ |
---|
725 | 715 | droq->stats.pkts_received += pkt; |
---|
726 | 716 | droq->stats.bytes_received += total_len; |
---|
727 | 717 | |
---|
| 718 | + retval = pkt; |
---|
728 | 719 | if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { |
---|
729 | 720 | octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); |
---|
730 | 721 | |
---|
731 | 722 | droq->stats.dropped_toomany += (pkts_to_process - pkt); |
---|
732 | | - return pkts_to_process; |
---|
| 723 | + retval = pkts_to_process; |
---|
733 | 724 | } |
---|
734 | 725 | |
---|
735 | | - return pkt; |
---|
| 726 | + atomic_sub(retval, &droq->pkts_pending); |
---|
| 727 | + |
---|
| 728 | + if (droq->refill_count >= droq->refill_threshold && |
---|
| 729 | + readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) { |
---|
| 730 | + octeon_droq_check_hw_for_pkts(droq); |
---|
| 731 | + |
---|
| 732 | + /* Make sure there are no pkts_pending */ |
---|
| 733 | + if (!atomic_read(&droq->pkts_pending)) |
---|
| 734 | + octeon_schedule_rxq_oom_work(oct, droq); |
---|
| 735 | + } |
---|
| 736 | + |
---|
| 737 | + return retval; |
---|
736 | 738 | } |
---|
737 | 739 | |
---|
738 | 740 | int |
---|
.. | .. |
---|
740 | 742 | struct octeon_droq *droq, |
---|
741 | 743 | u32 budget) |
---|
742 | 744 | { |
---|
743 | | - u32 pkt_count = 0, pkts_processed = 0; |
---|
| 745 | + u32 pkt_count = 0; |
---|
744 | 746 | struct list_head *tmp, *tmp2; |
---|
745 | | - |
---|
746 | | - /* Grab the droq lock */ |
---|
747 | | - spin_lock(&droq->lock); |
---|
748 | 747 | |
---|
749 | 748 | octeon_droq_check_hw_for_pkts(droq); |
---|
750 | 749 | pkt_count = atomic_read(&droq->pkts_pending); |
---|
751 | 750 | |
---|
752 | | - if (!pkt_count) { |
---|
753 | | - spin_unlock(&droq->lock); |
---|
| 751 | + if (!pkt_count) |
---|
754 | 752 | return 0; |
---|
755 | | - } |
---|
756 | 753 | |
---|
757 | 754 | if (pkt_count > budget) |
---|
758 | 755 | pkt_count = budget; |
---|
759 | 756 | |
---|
760 | | - pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); |
---|
761 | | - |
---|
762 | | - atomic_sub(pkts_processed, &droq->pkts_pending); |
---|
763 | | - |
---|
764 | | - /* Release the spin lock */ |
---|
765 | | - spin_unlock(&droq->lock); |
---|
| 757 | + octeon_droq_fast_process_packets(oct, droq, pkt_count); |
---|
766 | 758 | |
---|
767 | 759 | list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { |
---|
768 | 760 | struct __dispatch *rdisp = (struct __dispatch *)tmp; |
---|
.. | .. |
---|
782 | 774 | return 0; |
---|
783 | 775 | } |
---|
784 | 776 | |
---|
785 | | -/** |
---|
| 777 | +/* |
---|
786 | 778 | * Utility function to poll for packets. check_hw_for_packets must be |
---|
787 | 779 | * called before calling this routine. |
---|
788 | 780 | */ |
---|
.. | .. |
---|
798 | 790 | if (budget > droq->max_count) |
---|
799 | 791 | budget = droq->max_count; |
---|
800 | 792 | |
---|
801 | | - spin_lock(&droq->lock); |
---|
802 | | - |
---|
803 | 793 | while (total_pkts_processed < budget) { |
---|
804 | 794 | octeon_droq_check_hw_for_pkts(droq); |
---|
805 | 795 | |
---|
.. | .. |
---|
813 | 803 | octeon_droq_fast_process_packets(oct, droq, |
---|
814 | 804 | pkts_available); |
---|
815 | 805 | |
---|
816 | | - atomic_sub(pkts_processed, &droq->pkts_pending); |
---|
817 | | - |
---|
818 | 806 | total_pkts_processed += pkts_processed; |
---|
819 | 807 | } |
---|
820 | | - |
---|
821 | | - spin_unlock(&droq->lock); |
---|
822 | 808 | |
---|
823 | 809 | list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { |
---|
824 | 810 | struct __dispatch *rdisp = (struct __dispatch *)tmp; |
---|
.. | .. |
---|
879 | 865 | int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, |
---|
880 | 866 | struct octeon_droq_ops *ops) |
---|
881 | 867 | { |
---|
882 | | - struct octeon_droq *droq; |
---|
883 | | - unsigned long flags; |
---|
884 | 868 | struct octeon_config *oct_cfg = NULL; |
---|
| 869 | + struct octeon_droq *droq; |
---|
885 | 870 | |
---|
886 | 871 | oct_cfg = octeon_get_conf(oct); |
---|
887 | 872 | |
---|
.. | .. |
---|
901 | 886 | } |
---|
902 | 887 | |
---|
903 | 888 | droq = oct->droq[q_no]; |
---|
904 | | - |
---|
905 | | - spin_lock_irqsave(&droq->lock, flags); |
---|
906 | | - |
---|
907 | 889 | memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops)); |
---|
908 | | - |
---|
909 | | - spin_unlock_irqrestore(&droq->lock, flags); |
---|
910 | 890 | |
---|
911 | 891 | return 0; |
---|
912 | 892 | } |
---|
913 | 893 | |
---|
914 | 894 | int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) |
---|
915 | 895 | { |
---|
916 | | - unsigned long flags; |
---|
917 | | - struct octeon_droq *droq; |
---|
918 | 896 | struct octeon_config *oct_cfg = NULL; |
---|
| 897 | + struct octeon_droq *droq; |
---|
919 | 898 | |
---|
920 | 899 | oct_cfg = octeon_get_conf(oct); |
---|
921 | 900 | |
---|
.. | .. |
---|
936 | 915 | return 0; |
---|
937 | 916 | } |
---|
938 | 917 | |
---|
939 | | - spin_lock_irqsave(&droq->lock, flags); |
---|
940 | | - |
---|
941 | 918 | droq->ops.fptr = NULL; |
---|
942 | 919 | droq->ops.farg = NULL; |
---|
943 | 920 | droq->ops.drop_on_max = 0; |
---|
944 | | - |
---|
945 | | - spin_unlock_irqrestore(&droq->lock, flags); |
---|
946 | 921 | |
---|
947 | 922 | return 0; |
---|
948 | 923 | } |
---|