.. | .. |
---|
95 | 95 | /* Initialize a list to holds requests that have been posted to Octeon |
---|
96 | 96 | * but has yet to be fetched by octeon |
---|
97 | 97 | */ |
---|
98 | | - iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs), |
---|
99 | | - numa_node); |
---|
| 98 | + iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)), |
---|
| 99 | + numa_node); |
---|
100 | 100 | if (!iq->request_list) |
---|
101 | | - iq->request_list = |
---|
102 | | - vmalloc(array_size(num_descs, |
---|
103 | | - sizeof(*iq->request_list))); |
---|
| 101 | + iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list))); |
---|
104 | 102 | if (!iq->request_list) { |
---|
105 | 103 | lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); |
---|
106 | 104 | dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", |
---|
107 | 105 | iq_no); |
---|
108 | 106 | return 1; |
---|
109 | 107 | } |
---|
110 | | - |
---|
111 | | - memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); |
---|
112 | 108 | |
---|
113 | 109 | dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n", |
---|
114 | 110 | iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count); |
---|
.. | .. |
---|
218 | 214 | return 0; |
---|
219 | 215 | } |
---|
220 | 216 | oct->instr_queue[iq_no] = |
---|
221 | | - vmalloc_node(sizeof(struct octeon_instr_queue), numa_node); |
---|
| 217 | + vzalloc_node(sizeof(struct octeon_instr_queue), numa_node); |
---|
222 | 218 | if (!oct->instr_queue[iq_no]) |
---|
223 | 219 | oct->instr_queue[iq_no] = |
---|
224 | | - vmalloc(sizeof(struct octeon_instr_queue)); |
---|
| 220 | + vzalloc(sizeof(struct octeon_instr_queue)); |
---|
225 | 221 | if (!oct->instr_queue[iq_no]) |
---|
226 | 222 | return 1; |
---|
227 | 223 | |
---|
228 | | - memset(oct->instr_queue[iq_no], 0, |
---|
229 | | - sizeof(struct octeon_instr_queue)); |
---|
230 | 224 | |
---|
231 | 225 | oct->instr_queue[iq_no]->q_index = q_index; |
---|
232 | 226 | oct->instr_queue[iq_no]->app_ctx = app_ctx; |
---|
.. | .. |
---|
280 | 274 | if (atomic_read(&oct->status) == OCT_DEV_RUNNING) { |
---|
281 | 275 | writel(iq->fill_cnt, iq->doorbell_reg); |
---|
282 | 276 | /* make sure doorbell write goes through */ |
---|
283 | | - mmiowb(); |
---|
284 | 277 | iq->fill_cnt = 0; |
---|
285 | 278 | iq->last_db_time = jiffies; |
---|
286 | 279 | return; |
---|
.. | .. |
---|
382 | 375 | u32 inst_count = 0; |
---|
383 | 376 | unsigned int pkts_compl = 0, bytes_compl = 0; |
---|
384 | 377 | struct octeon_soft_command *sc; |
---|
385 | | - struct octeon_instr_irh *irh; |
---|
386 | 378 | unsigned long flags; |
---|
387 | 379 | |
---|
388 | 380 | while (old != iq->octeon_read_index) { |
---|
.. | .. |
---|
404 | 396 | case REQTYPE_RESP_NET: |
---|
405 | 397 | case REQTYPE_SOFT_COMMAND: |
---|
406 | 398 | sc = buf; |
---|
407 | | - |
---|
408 | | - if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) |
---|
409 | | - irh = (struct octeon_instr_irh *) |
---|
410 | | - &sc->cmd.cmd3.irh; |
---|
411 | | - else |
---|
412 | | - irh = (struct octeon_instr_irh *) |
---|
413 | | - &sc->cmd.cmd2.irh; |
---|
414 | | - if (irh->rflag) { |
---|
415 | | - /* We're expecting a response from Octeon. |
---|
416 | | - * It's up to lio_process_ordered_list() to |
---|
417 | | - * process sc. Add sc to the ordered soft |
---|
418 | | - * command response list because we expect |
---|
419 | | - * a response from Octeon. |
---|
420 | | - */ |
---|
421 | | - spin_lock_irqsave |
---|
422 | | - (&oct->response_list |
---|
423 | | - [OCTEON_ORDERED_SC_LIST].lock, |
---|
424 | | - flags); |
---|
425 | | - atomic_inc(&oct->response_list |
---|
426 | | - [OCTEON_ORDERED_SC_LIST]. |
---|
427 | | - pending_req_count); |
---|
428 | | - list_add_tail(&sc->node, &oct->response_list |
---|
429 | | - [OCTEON_ORDERED_SC_LIST].head); |
---|
430 | | - spin_unlock_irqrestore |
---|
431 | | - (&oct->response_list |
---|
432 | | - [OCTEON_ORDERED_SC_LIST].lock, |
---|
433 | | - flags); |
---|
434 | | - } else { |
---|
435 | | - if (sc->callback) { |
---|
436 | | - /* This callback must not sleep */ |
---|
437 | | - sc->callback(oct, OCTEON_REQUEST_DONE, |
---|
438 | | - sc->callback_arg); |
---|
439 | | - } |
---|
440 | | - } |
---|
| 399 | + /* We're expecting a response from Octeon. |
---|
| 400 | + * It's up to lio_process_ordered_list() to |
---|
| 401 | + * process sc. Add sc to the ordered soft |
---|
| 402 | + * command response list because we expect |
---|
| 403 | + * a response from Octeon. |
---|
| 404 | + */ |
---|
| 405 | + spin_lock_irqsave(&oct->response_list |
---|
| 406 | + [OCTEON_ORDERED_SC_LIST].lock, flags); |
---|
| 407 | + atomic_inc(&oct->response_list |
---|
| 408 | + [OCTEON_ORDERED_SC_LIST].pending_req_count); |
---|
| 409 | + list_add_tail(&sc->node, &oct->response_list |
---|
| 410 | + [OCTEON_ORDERED_SC_LIST].head); |
---|
| 411 | + spin_unlock_irqrestore(&oct->response_list |
---|
| 412 | + [OCTEON_ORDERED_SC_LIST].lock, |
---|
| 413 | + flags); |
---|
441 | 414 | break; |
---|
442 | 415 | default: |
---|
443 | 416 | dev_err(&oct->pci_dev->dev, |
---|
.. | .. |
---|
462 | 435 | |
---|
463 | 436 | if (atomic_read(&oct->response_list |
---|
464 | 437 | [OCTEON_ORDERED_SC_LIST].pending_req_count)) |
---|
465 | | - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); |
---|
| 438 | + queue_work(cwq->wq, &cwq->wk.work.work); |
---|
466 | 439 | |
---|
467 | 440 | return inst_count; |
---|
468 | 441 | } |
---|
.. | .. |
---|
757 | 730 | len = (u32)ih2->dlengsz; |
---|
758 | 731 | } |
---|
759 | 732 | |
---|
760 | | - if (sc->wait_time) |
---|
761 | | - sc->timeout = jiffies + sc->wait_time; |
---|
| 733 | + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); |
---|
762 | 734 | |
---|
763 | 735 | return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, |
---|
764 | 736 | len, REQTYPE_SOFT_COMMAND)); |
---|
.. | .. |
---|
793 | 765 | return 0; |
---|
794 | 766 | } |
---|
795 | 767 | |
---|
| 768 | +int octeon_free_sc_done_list(struct octeon_device *oct) |
---|
| 769 | +{ |
---|
| 770 | + struct octeon_response_list *done_sc_list, *zombie_sc_list; |
---|
| 771 | + struct octeon_soft_command *sc; |
---|
| 772 | + struct list_head *tmp, *tmp2; |
---|
| 773 | + spinlock_t *sc_lists_lock; /* lock for response_list */ |
---|
| 774 | + |
---|
| 775 | + done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST]; |
---|
| 776 | + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; |
---|
| 777 | + |
---|
| 778 | + if (!atomic_read(&done_sc_list->pending_req_count)) |
---|
| 779 | + return 0; |
---|
| 780 | + |
---|
| 781 | + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; |
---|
| 782 | + |
---|
| 783 | + spin_lock_bh(sc_lists_lock); |
---|
| 784 | + |
---|
| 785 | + list_for_each_safe(tmp, tmp2, &done_sc_list->head) { |
---|
| 786 | + sc = list_entry(tmp, struct octeon_soft_command, node); |
---|
| 787 | + |
---|
| 788 | + if (READ_ONCE(sc->caller_is_done)) { |
---|
| 789 | + list_del(&sc->node); |
---|
| 790 | + atomic_dec(&done_sc_list->pending_req_count); |
---|
| 791 | + |
---|
| 792 | + if (*sc->status_word == COMPLETION_WORD_INIT) { |
---|
| 793 | + /* timeout; move sc to zombie list */ |
---|
| 794 | + list_add_tail(&sc->node, &zombie_sc_list->head); |
---|
| 795 | + atomic_inc(&zombie_sc_list->pending_req_count); |
---|
| 796 | + } else { |
---|
| 797 | + octeon_free_soft_command(oct, sc); |
---|
| 798 | + } |
---|
| 799 | + } |
---|
| 800 | + } |
---|
| 801 | + |
---|
| 802 | + spin_unlock_bh(sc_lists_lock); |
---|
| 803 | + |
---|
| 804 | + return 0; |
---|
| 805 | +} |
---|
| 806 | + |
---|
| 807 | +int octeon_free_sc_zombie_list(struct octeon_device *oct) |
---|
| 808 | +{ |
---|
| 809 | + struct octeon_response_list *zombie_sc_list; |
---|
| 810 | + struct octeon_soft_command *sc; |
---|
| 811 | + struct list_head *tmp, *tmp2; |
---|
| 812 | + spinlock_t *sc_lists_lock; /* lock for response_list */ |
---|
| 813 | + |
---|
| 814 | + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; |
---|
| 815 | + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; |
---|
| 816 | + |
---|
| 817 | + spin_lock_bh(sc_lists_lock); |
---|
| 818 | + |
---|
| 819 | + list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) { |
---|
| 820 | + list_del(tmp); |
---|
| 821 | + atomic_dec(&zombie_sc_list->pending_req_count); |
---|
| 822 | + sc = list_entry(tmp, struct octeon_soft_command, node); |
---|
| 823 | + octeon_free_soft_command(oct, sc); |
---|
| 824 | + } |
---|
| 825 | + |
---|
| 826 | + spin_unlock_bh(sc_lists_lock); |
---|
| 827 | + |
---|
| 828 | + return 0; |
---|
| 829 | +} |
---|
| 830 | + |
---|
796 | 831 | int octeon_free_sc_buffer_pool(struct octeon_device *oct) |
---|
797 | 832 | { |
---|
798 | 833 | struct list_head *tmp, *tmp2; |
---|
799 | 834 | struct octeon_soft_command *sc; |
---|
| 835 | + |
---|
| 836 | + octeon_free_sc_zombie_list(oct); |
---|
800 | 837 | |
---|
801 | 838 | spin_lock_bh(&oct->sc_buf_pool.lock); |
---|
802 | 839 | |
---|
.. | .. |
---|
826 | 863 | struct octeon_soft_command *sc = NULL; |
---|
827 | 864 | struct list_head *tmp; |
---|
828 | 865 | |
---|
| 866 | + if (!rdatasize) |
---|
| 867 | + rdatasize = 16; |
---|
| 868 | + |
---|
829 | 869 | WARN_ON((offset + datasize + rdatasize + ctxsize) > |
---|
830 | 870 | SOFT_COMMAND_BUFFER_SIZE); |
---|
831 | 871 | |
---|