forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/ethernet/cavium/liquidio/request_manager.c
....@@ -95,20 +95,16 @@
9595 /* Initialize a list to holds requests that have been posted to Octeon
9696 * but has yet to be fetched by octeon
9797 */
98
- iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
99
- numa_node);
98
+ iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)),
99
+ numa_node);
100100 if (!iq->request_list)
101
- iq->request_list =
102
- vmalloc(array_size(num_descs,
103
- sizeof(*iq->request_list)));
101
+ iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list)));
104102 if (!iq->request_list) {
105103 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
106104 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
107105 iq_no);
108106 return 1;
109107 }
110
-
111
- memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
112108
113109 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
114110 iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
....@@ -218,15 +214,13 @@
218214 return 0;
219215 }
220216 oct->instr_queue[iq_no] =
221
- vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
217
+ vzalloc_node(sizeof(struct octeon_instr_queue), numa_node);
222218 if (!oct->instr_queue[iq_no])
223219 oct->instr_queue[iq_no] =
224
- vmalloc(sizeof(struct octeon_instr_queue));
220
+ vzalloc(sizeof(struct octeon_instr_queue));
225221 if (!oct->instr_queue[iq_no])
226222 return 1;
227223
228
- memset(oct->instr_queue[iq_no], 0,
229
- sizeof(struct octeon_instr_queue));
230224
231225 oct->instr_queue[iq_no]->q_index = q_index;
232226 oct->instr_queue[iq_no]->app_ctx = app_ctx;
....@@ -280,7 +274,6 @@
280274 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
281275 writel(iq->fill_cnt, iq->doorbell_reg);
282276 /* make sure doorbell write goes through */
283
- mmiowb();
284277 iq->fill_cnt = 0;
285278 iq->last_db_time = jiffies;
286279 return;
....@@ -382,7 +375,6 @@
382375 u32 inst_count = 0;
383376 unsigned int pkts_compl = 0, bytes_compl = 0;
384377 struct octeon_soft_command *sc;
385
- struct octeon_instr_irh *irh;
386378 unsigned long flags;
387379
388380 while (old != iq->octeon_read_index) {
....@@ -404,40 +396,21 @@
404396 case REQTYPE_RESP_NET:
405397 case REQTYPE_SOFT_COMMAND:
406398 sc = buf;
407
-
408
- if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))
409
- irh = (struct octeon_instr_irh *)
410
- &sc->cmd.cmd3.irh;
411
- else
412
- irh = (struct octeon_instr_irh *)
413
- &sc->cmd.cmd2.irh;
414
- if (irh->rflag) {
415
- /* We're expecting a response from Octeon.
416
- * It's up to lio_process_ordered_list() to
417
- * process sc. Add sc to the ordered soft
418
- * command response list because we expect
419
- * a response from Octeon.
420
- */
421
- spin_lock_irqsave
422
- (&oct->response_list
423
- [OCTEON_ORDERED_SC_LIST].lock,
424
- flags);
425
- atomic_inc(&oct->response_list
426
- [OCTEON_ORDERED_SC_LIST].
427
- pending_req_count);
428
- list_add_tail(&sc->node, &oct->response_list
429
- [OCTEON_ORDERED_SC_LIST].head);
430
- spin_unlock_irqrestore
431
- (&oct->response_list
432
- [OCTEON_ORDERED_SC_LIST].lock,
433
- flags);
434
- } else {
435
- if (sc->callback) {
436
- /* This callback must not sleep */
437
- sc->callback(oct, OCTEON_REQUEST_DONE,
438
- sc->callback_arg);
439
- }
440
- }
399
+ /* We're expecting a response from Octeon.
400
+ * It's up to lio_process_ordered_list() to
401
+ * process sc. Add sc to the ordered soft
402
+ * command response list because we expect
403
+ * a response from Octeon.
404
+ */
405
+ spin_lock_irqsave(&oct->response_list
406
+ [OCTEON_ORDERED_SC_LIST].lock, flags);
407
+ atomic_inc(&oct->response_list
408
+ [OCTEON_ORDERED_SC_LIST].pending_req_count);
409
+ list_add_tail(&sc->node, &oct->response_list
410
+ [OCTEON_ORDERED_SC_LIST].head);
411
+ spin_unlock_irqrestore(&oct->response_list
412
+ [OCTEON_ORDERED_SC_LIST].lock,
413
+ flags);
441414 break;
442415 default:
443416 dev_err(&oct->pci_dev->dev,
....@@ -462,7 +435,7 @@
462435
463436 if (atomic_read(&oct->response_list
464437 [OCTEON_ORDERED_SC_LIST].pending_req_count))
465
- queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1));
438
+ queue_work(cwq->wq, &cwq->wk.work.work);
466439
467440 return inst_count;
468441 }
....@@ -757,8 +730,7 @@
757730 len = (u32)ih2->dlengsz;
758731 }
759732
760
- if (sc->wait_time)
761
- sc->timeout = jiffies + sc->wait_time;
733
+ sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS);
762734
763735 return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
764736 len, REQTYPE_SOFT_COMMAND));
....@@ -793,10 +765,75 @@
793765 return 0;
794766 }
795767
768
+int octeon_free_sc_done_list(struct octeon_device *oct)
769
+{
770
+ struct octeon_response_list *done_sc_list, *zombie_sc_list;
771
+ struct octeon_soft_command *sc;
772
+ struct list_head *tmp, *tmp2;
773
+ spinlock_t *sc_lists_lock; /* lock for response_list */
774
+
775
+ done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST];
776
+ zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
777
+
778
+ if (!atomic_read(&done_sc_list->pending_req_count))
779
+ return 0;
780
+
781
+ sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
782
+
783
+ spin_lock_bh(sc_lists_lock);
784
+
785
+ list_for_each_safe(tmp, tmp2, &done_sc_list->head) {
786
+ sc = list_entry(tmp, struct octeon_soft_command, node);
787
+
788
+ if (READ_ONCE(sc->caller_is_done)) {
789
+ list_del(&sc->node);
790
+ atomic_dec(&done_sc_list->pending_req_count);
791
+
792
+ if (*sc->status_word == COMPLETION_WORD_INIT) {
793
+ /* timeout; move sc to zombie list */
794
+ list_add_tail(&sc->node, &zombie_sc_list->head);
795
+ atomic_inc(&zombie_sc_list->pending_req_count);
796
+ } else {
797
+ octeon_free_soft_command(oct, sc);
798
+ }
799
+ }
800
+ }
801
+
802
+ spin_unlock_bh(sc_lists_lock);
803
+
804
+ return 0;
805
+}
806
+
807
+int octeon_free_sc_zombie_list(struct octeon_device *oct)
808
+{
809
+ struct octeon_response_list *zombie_sc_list;
810
+ struct octeon_soft_command *sc;
811
+ struct list_head *tmp, *tmp2;
812
+ spinlock_t *sc_lists_lock; /* lock for response_list */
813
+
814
+ zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
815
+ sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
816
+
817
+ spin_lock_bh(sc_lists_lock);
818
+
819
+ list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) {
820
+ list_del(tmp);
821
+ atomic_dec(&zombie_sc_list->pending_req_count);
822
+ sc = list_entry(tmp, struct octeon_soft_command, node);
823
+ octeon_free_soft_command(oct, sc);
824
+ }
825
+
826
+ spin_unlock_bh(sc_lists_lock);
827
+
828
+ return 0;
829
+}
830
+
796831 int octeon_free_sc_buffer_pool(struct octeon_device *oct)
797832 {
798833 struct list_head *tmp, *tmp2;
799834 struct octeon_soft_command *sc;
835
+
836
+ octeon_free_sc_zombie_list(oct);
800837
801838 spin_lock_bh(&oct->sc_buf_pool.lock);
802839
....@@ -826,6 +863,9 @@
826863 struct octeon_soft_command *sc = NULL;
827864 struct list_head *tmp;
828865
866
+ if (!rdatasize)
867
+ rdatasize = 16;
868
+
829869 WARN_ON((offset + datasize + rdatasize + ctxsize) >
830870 SOFT_COMMAND_BUFFER_SIZE);
831871