forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/s390/cio/qdio_main.c
....@@ -131,7 +131,7 @@
131131 case 96:
132132 /* not all buffers processed */
133133 qperf_inc(q, eqbs_partial);
134
- DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
134
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
135135 tmp_count);
136136 return count - tmp_count;
137137 case 97:
....@@ -143,7 +143,7 @@
143143 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
144144 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
145145 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
146
- q->first_to_kick, count, q->irq_ptr->int_parm);
146
+ q->first_to_check, count, q->irq_ptr->int_parm);
147147 return 0;
148148 }
149149 }
....@@ -191,7 +191,7 @@
191191 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
192192 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
193193 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
194
- q->first_to_kick, count, q->irq_ptr->int_parm);
194
+ q->first_to_check, count, q->irq_ptr->int_parm);
195195 return 0;
196196 }
197197 }
....@@ -205,17 +205,22 @@
205205 int auto_ack, int merge_pending)
206206 {
207207 unsigned char __state = 0;
208
- int i;
208
+ int i = 1;
209209
210210 if (is_qebsm(q))
211211 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
212212
213213 /* get initial state: */
214214 __state = q->slsb.val[bufnr];
215
+
216
+ /* Bail out early if there is no work on the queue: */
217
+ if (__state & SLSB_OWNER_CU)
218
+ goto out;
219
+
215220 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
216221 __state = SLSB_P_OUTPUT_EMPTY;
217222
218
- for (i = 1; i < count; i++) {
223
+ for (; i < count; i++) {
219224 bufnr = next_buf(bufnr);
220225
221226 /* merge PENDING into EMPTY: */
....@@ -228,6 +233,8 @@
228233 if (q->slsb.val[bufnr] != __state)
229234 break;
230235 }
236
+
237
+out:
231238 *state = __state;
232239 return i;
233240 }
....@@ -247,10 +254,17 @@
247254 if (is_qebsm(q))
248255 return qdio_do_sqbs(q, state, bufnr, count);
249256
257
+ /* Ensure that all preceding changes to the SBALs are visible: */
258
+ mb();
259
+
250260 for (i = 0; i < count; i++) {
251
- xchg(&q->slsb.val[bufnr], state);
261
+ WRITE_ONCE(q->slsb.val[bufnr], state);
252262 bufnr = next_buf(bufnr);
253263 }
264
+
265
+ /* Make our SLSB changes visible: */
266
+ mb();
267
+
254268 return count;
255269 }
256270
....@@ -303,20 +317,19 @@
303317 return qdio_siga_sync(q, q->mask, 0);
304318 }
305319
306
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
307
- unsigned long aob)
320
+static int qdio_siga_output(struct qdio_q *q, unsigned int count,
321
+ unsigned int *busy_bit, unsigned long aob)
308322 {
309323 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
310324 unsigned int fc = QDIO_SIGA_WRITE;
311325 u64 start_time = 0;
312326 int retries = 0, cc;
313
- unsigned long laob = 0;
314327
315
- WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
316
- !q->u.out.use_cq));
317
- if (q->u.out.use_cq && aob != 0) {
318
- fc = QDIO_SIGA_WRITEQ;
319
- laob = aob;
328
+ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
329
+ if (count > 1)
330
+ fc = QDIO_SIGA_WRITEM;
331
+ else if (aob)
332
+ fc = QDIO_SIGA_WRITEQ;
320333 }
321334
322335 if (is_qebsm(q)) {
....@@ -324,7 +337,7 @@
324337 fc |= QDIO_SIGA_QEBSM_FLAG;
325338 }
326339 again:
327
- cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
340
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
328341
329342 /* hipersocket busy condition */
330343 if (unlikely(*busy_bit)) {
....@@ -371,7 +384,7 @@
371384 static inline void qdio_sync_queues(struct qdio_q *q)
372385 {
373386 /* PCI capable outbound queues will also be scanned so sync them too */
374
- if (pci_out_supported(q))
387
+ if (pci_out_supported(q->irq_ptr))
375388 qdio_siga_sync_all(q);
376389 else
377390 qdio_siga_sync_q(q);
....@@ -382,186 +395,125 @@
382395 {
383396 if (need_siga_sync(q))
384397 qdio_siga_sync_q(q);
385
- return get_buf_states(q, bufnr, state, 1, 0, 0);
398
+ return get_buf_state(q, bufnr, state, 0);
386399 }
387400
388401 static inline void qdio_stop_polling(struct qdio_q *q)
389402 {
390
- if (!q->u.in.polling)
403
+ if (!q->u.in.batch_count)
391404 return;
392405
393
- q->u.in.polling = 0;
394406 qperf_inc(q, stop_polling);
395407
396408 /* show the card that we are not polling anymore */
397
- if (is_qebsm(q)) {
398
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
399
- q->u.in.ack_count);
400
- q->u.in.ack_count = 0;
401
- } else
402
- set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
409
+ set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
410
+ q->u.in.batch_count);
411
+ q->u.in.batch_count = 0;
403412 }
404413
405414 static inline void account_sbals(struct qdio_q *q, unsigned int count)
406415 {
407
- int pos;
408
-
409416 q->q_stats.nr_sbal_total += count;
410
- if (count == QDIO_MAX_BUFFERS_MASK) {
411
- q->q_stats.nr_sbals[7]++;
412
- return;
413
- }
414
- pos = ilog2(count);
415
- q->q_stats.nr_sbals[pos]++;
417
+ q->q_stats.nr_sbals[ilog2(count)]++;
416418 }
417419
418
-static void process_buffer_error(struct qdio_q *q, int count)
420
+static void process_buffer_error(struct qdio_q *q, unsigned int start,
421
+ int count)
419422 {
420
- unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
421
- SLSB_P_OUTPUT_NOT_INIT;
422
-
423423 q->qdio_error = QDIO_ERROR_SLSB_STATE;
424424
425425 /* special handling for no target buffer empty */
426426 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
427
- q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
427
+ q->sbal[start]->element[15].sflags == 0x10) {
428428 qperf_inc(q, target_full);
429
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
430
- q->first_to_check);
431
- goto set;
429
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
430
+ return;
432431 }
433432
434433 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
435434 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
436
- DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
435
+ DBF_ERROR("FTC:%3d C:%3d", start, count);
437436 DBF_ERROR("F14:%2x F15:%2x",
438
- q->sbal[q->first_to_check]->element[14].sflags,
439
- q->sbal[q->first_to_check]->element[15].sflags);
440
-
441
-set:
442
- /*
443
- * Interrupts may be avoided as long as the error is present
444
- * so change the buffer state immediately to avoid starvation.
445
- */
446
- set_buf_states(q, q->first_to_check, state, count);
437
+ q->sbal[start]->element[14].sflags,
438
+ q->sbal[start]->element[15].sflags);
447439 }
448440
449
-static inline void inbound_primed(struct qdio_q *q, int count)
441
+static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
442
+ int count, bool auto_ack)
450443 {
451
- int new;
444
+ /* ACK the newest SBAL: */
445
+ if (!auto_ack)
446
+ set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
452447
453
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
454
-
455
- /* for QEBSM the ACK was already set by EQBS */
456
- if (is_qebsm(q)) {
457
- if (!q->u.in.polling) {
458
- q->u.in.polling = 1;
459
- q->u.in.ack_count = count;
460
- q->u.in.ack_start = q->first_to_check;
461
- return;
462
- }
463
-
464
- /* delete the previous ACK's */
465
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
466
- q->u.in.ack_count);
467
- q->u.in.ack_count = count;
468
- q->u.in.ack_start = q->first_to_check;
469
- return;
470
- }
471
-
472
- /*
473
- * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
474
- * or by the next inbound run.
475
- */
476
- new = add_buf(q->first_to_check, count - 1);
477
- if (q->u.in.polling) {
478
- /* reset the previous ACK but first set the new one */
479
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
480
- set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
481
- } else {
482
- q->u.in.polling = 1;
483
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
484
- }
485
-
486
- q->u.in.ack_start = new;
487
- count--;
488
- if (!count)
489
- return;
490
- /* need to change ALL buffers to get more interrupts */
491
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
448
+ if (!q->u.in.batch_count)
449
+ q->u.in.batch_start = start;
450
+ q->u.in.batch_count += count;
492451 }
493452
494
-static int get_inbound_buffer_frontier(struct qdio_q *q)
453
+static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
495454 {
496455 unsigned char state = 0;
497456 int count;
498457
499458 q->timestamp = get_tod_clock_fast();
500459
501
- /*
502
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
503
- * would return 0.
504
- */
505
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
460
+ count = atomic_read(&q->nr_buf_used);
506461 if (!count)
507
- goto out;
462
+ return 0;
508463
509464 /*
510465 * No siga sync here, as a PCI or we after a thin interrupt
511466 * already sync'ed the queues.
512467 */
513
- count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
468
+ count = get_buf_states(q, start, &state, count, 1, 0);
514469 if (!count)
515
- goto out;
470
+ return 0;
516471
517472 switch (state) {
518473 case SLSB_P_INPUT_PRIMED:
519
- inbound_primed(q, count);
520
- q->first_to_check = add_buf(q->first_to_check, count);
474
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
475
+ count);
476
+
477
+ inbound_handle_work(q, start, count, is_qebsm(q));
521478 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
522479 qperf_inc(q, inbound_queue_full);
523480 if (q->irq_ptr->perf_stat_enabled)
524481 account_sbals(q, count);
525
- break;
482
+ return count;
526483 case SLSB_P_INPUT_ERROR:
527
- process_buffer_error(q, count);
528
- q->first_to_check = add_buf(q->first_to_check, count);
484
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
485
+ count);
486
+
487
+ process_buffer_error(q, start, count);
488
+ inbound_handle_work(q, start, count, false);
529489 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
530490 qperf_inc(q, inbound_queue_full);
531491 if (q->irq_ptr->perf_stat_enabled)
532492 account_sbals_error(q, count);
533
- break;
493
+ return count;
534494 case SLSB_CU_INPUT_EMPTY:
535
- case SLSB_P_INPUT_NOT_INIT:
536
- case SLSB_P_INPUT_ACK:
537495 if (q->irq_ptr->perf_stat_enabled)
538496 q->q_stats.nr_sbal_nop++;
539497 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
540
- q->nr, q->first_to_check);
541
- break;
542
- default:
543
- WARN_ON_ONCE(1);
544
- }
545
-out:
546
- return q->first_to_check;
547
-}
548
-
549
-static int qdio_inbound_q_moved(struct qdio_q *q)
550
-{
551
- int bufnr;
552
-
553
- bufnr = get_inbound_buffer_frontier(q);
554
-
555
- if (bufnr != q->last_move) {
556
- q->last_move = bufnr;
557
- if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
558
- q->u.in.timestamp = get_tod_clock();
559
- return 1;
560
- } else
498
+ q->nr, start);
561499 return 0;
500
+ case SLSB_P_INPUT_NOT_INIT:
501
+ case SLSB_P_INPUT_ACK:
502
+ /* We should never see this state, throw a WARN: */
503
+ default:
504
+ dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
505
+ "found state %#x at index %u on queue %u\n",
506
+ state, start, q->nr);
507
+ return 0;
508
+ }
562509 }
563510
564
-static inline int qdio_inbound_q_done(struct qdio_q *q)
511
+static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
512
+{
513
+ return get_inbound_buffer_frontier(q, start);
514
+}
515
+
516
+static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
565517 {
566518 unsigned char state = 0;
567519
....@@ -570,59 +522,13 @@
570522
571523 if (need_siga_sync(q))
572524 qdio_siga_sync_q(q);
573
- get_buf_state(q, q->first_to_check, &state, 0);
525
+ get_buf_state(q, start, &state, 0);
574526
575527 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
576528 /* more work coming */
577529 return 0;
578530
579
- if (is_thinint_irq(q->irq_ptr))
580
- return 1;
581
-
582
- /* don't poll under z/VM */
583
- if (MACHINE_IS_VM)
584
- return 1;
585
-
586
- /*
587
- * At this point we know, that inbound first_to_check
588
- * has (probably) not moved (see qdio_inbound_processing).
589
- */
590
- if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
591
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
592
- q->first_to_check);
593
- return 1;
594
- } else
595
- return 0;
596
-}
597
-
598
-static inline int contains_aobs(struct qdio_q *q)
599
-{
600
- return !q->is_input_q && q->u.out.use_cq;
601
-}
602
-
603
-static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
604
-{
605
- unsigned char state = 0;
606
- int j, b = start;
607
-
608
- if (!contains_aobs(q))
609
- return;
610
-
611
- for (j = 0; j < count; ++j) {
612
- get_buf_state(q, b, &state, 0);
613
- if (state == SLSB_P_OUTPUT_PENDING) {
614
- struct qaob *aob = q->u.out.aobs[b];
615
- if (aob == NULL)
616
- continue;
617
-
618
- q->u.out.sbal_state[b].flags |=
619
- QDIO_OUTBUF_STATE_FLAG_PENDING;
620
- q->u.out.aobs[b] = NULL;
621
- } else if (state == SLSB_P_OUTPUT_EMPTY) {
622
- q->u.out.sbal_state[b].aob = NULL;
623
- }
624
- b = next_buf(b);
625
- }
531
+ return 1;
626532 }
627533
628534 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
....@@ -630,15 +536,11 @@
630536 {
631537 unsigned long phys_aob = 0;
632538
633
- if (!q->use_cq)
634
- return 0;
635
-
636539 if (!q->aobs[bufnr]) {
637540 struct qaob *aob = qdio_allocate_aob();
638541 q->aobs[bufnr] = aob;
639542 }
640543 if (q->aobs[bufnr]) {
641
- q->sbal_state[bufnr].aob = q->aobs[bufnr];
642544 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
643545 phys_aob = virt_to_phys(q->aobs[bufnr]);
644546 WARN_ON_ONCE(phys_aob & 0xFF);
....@@ -648,16 +550,11 @@
648550 return phys_aob;
649551 }
650552
651
-static void qdio_kick_handler(struct qdio_q *q)
553
+static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
554
+ unsigned int count)
652555 {
653
- int start = q->first_to_kick;
654
- int end = q->first_to_check;
655
- int count;
656
-
657556 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
658557 return;
659
-
660
- count = sub_buf(end, start);
661558
662559 if (q->is_input_q) {
663560 qperf_inc(q, inbound_handler);
....@@ -668,13 +565,10 @@
668565 start, count);
669566 }
670567
671
- qdio_handle_aobs(q, start, count);
672
-
673568 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
674569 q->irq_ptr->int_parm);
675570
676571 /* for the next time */
677
- q->first_to_kick = end;
678572 q->qdio_error = 0;
679573 }
680574
....@@ -689,14 +583,20 @@
689583
690584 static void __qdio_inbound_processing(struct qdio_q *q)
691585 {
586
+ unsigned int start = q->first_to_check;
587
+ int count;
588
+
692589 qperf_inc(q, tasklet_inbound);
693590
694
- if (!qdio_inbound_q_moved(q))
591
+ count = qdio_inbound_q_moved(q, start);
592
+ if (count == 0)
695593 return;
696594
697
- qdio_kick_handler(q);
595
+ qdio_kick_handler(q, start, count);
596
+ start = add_buf(start, count);
597
+ q->first_to_check = start;
698598
699
- if (!qdio_inbound_q_done(q)) {
599
+ if (!qdio_inbound_q_done(q, start)) {
700600 /* means poll time is not yet over */
701601 qperf_inc(q, tasklet_inbound_resched);
702602 if (!qdio_tasklet_schedule(q))
....@@ -708,7 +608,7 @@
708608 * We need to check again to not lose initiative after
709609 * resetting the ACK state.
710610 */
711
- if (!qdio_inbound_q_done(q)) {
611
+ if (!qdio_inbound_q_done(q, start)) {
712612 qperf_inc(q, tasklet_inbound_resched2);
713613 qdio_tasklet_schedule(q);
714614 }
....@@ -720,7 +620,20 @@
720620 __qdio_inbound_processing(q);
721621 }
722622
723
-static int get_outbound_buffer_frontier(struct qdio_q *q)
623
+static void qdio_check_pending(struct qdio_q *q, unsigned int index)
624
+{
625
+ unsigned char state;
626
+
627
+ if (get_buf_state(q, index, &state, 0) > 0 &&
628
+ state == SLSB_P_OUTPUT_PENDING &&
629
+ q->u.out.aobs[index]) {
630
+ q->u.out.sbal_state[index].flags |=
631
+ QDIO_OUTBUF_STATE_FLAG_PENDING;
632
+ q->u.out.aobs[index] = NULL;
633
+ }
634
+}
635
+
636
+static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
724637 {
725638 unsigned char state = 0;
726639 int count;
....@@ -729,23 +642,18 @@
729642
730643 if (need_siga_sync(q))
731644 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
732
- !pci_out_supported(q)) ||
645
+ !pci_out_supported(q->irq_ptr)) ||
733646 (queue_type(q) == QDIO_IQDIO_QFMT &&
734647 multicast_outbound(q)))
735648 qdio_siga_sync_q(q);
736649
737
- /*
738
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
739
- * would return 0.
740
- */
741
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
650
+ count = atomic_read(&q->nr_buf_used);
742651 if (!count)
743
- goto out;
652
+ return 0;
744653
745
- count = get_buf_states(q, q->first_to_check, &state, count, 0,
746
- q->u.out.use_cq);
654
+ count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
747655 if (!count)
748
- goto out;
656
+ return 0;
749657
750658 switch (state) {
751659 case SLSB_P_OUTPUT_EMPTY:
....@@ -755,34 +663,32 @@
755663 "out empty:%1d %02x", q->nr, count);
756664
757665 atomic_sub(count, &q->nr_buf_used);
758
- q->first_to_check = add_buf(q->first_to_check, count);
759666 if (q->irq_ptr->perf_stat_enabled)
760667 account_sbals(q, count);
761
-
762
- break;
668
+ return count;
763669 case SLSB_P_OUTPUT_ERROR:
764
- process_buffer_error(q, count);
765
- q->first_to_check = add_buf(q->first_to_check, count);
670
+ process_buffer_error(q, start, count);
766671 atomic_sub(count, &q->nr_buf_used);
767672 if (q->irq_ptr->perf_stat_enabled)
768673 account_sbals_error(q, count);
769
- break;
674
+ return count;
770675 case SLSB_CU_OUTPUT_PRIMED:
771676 /* the adapter has not fetched the output yet */
772677 if (q->irq_ptr->perf_stat_enabled)
773678 q->q_stats.nr_sbal_nop++;
774679 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
775680 q->nr);
776
- break;
777
- case SLSB_P_OUTPUT_NOT_INIT:
681
+ return 0;
778682 case SLSB_P_OUTPUT_HALTED:
779
- break;
683
+ return 0;
684
+ case SLSB_P_OUTPUT_NOT_INIT:
685
+ /* We should never see this state, throw a WARN: */
780686 default:
781
- WARN_ON_ONCE(1);
687
+ dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
688
+ "found state %#x at index %u on queue %u\n",
689
+ state, start, q->nr);
690
+ return 0;
782691 }
783
-
784
-out:
785
- return q->first_to_check;
786692 }
787693
788694 /* all buffers processed? */
....@@ -791,21 +697,28 @@
791697 return atomic_read(&q->nr_buf_used) == 0;
792698 }
793699
794
-static inline int qdio_outbound_q_moved(struct qdio_q *q)
700
+static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
795701 {
796
- int bufnr;
702
+ int count;
797703
798
- bufnr = get_outbound_buffer_frontier(q);
704
+ count = get_outbound_buffer_frontier(q, start);
799705
800
- if (bufnr != q->last_move) {
801
- q->last_move = bufnr;
706
+ if (count) {
802707 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
803
- return 1;
804
- } else
805
- return 0;
708
+
709
+ if (q->u.out.use_cq) {
710
+ unsigned int i;
711
+
712
+ for (i = 0; i < count; i++)
713
+ qdio_check_pending(q, QDIO_BUFNR(start + i));
714
+ }
715
+ }
716
+
717
+ return count;
806718 }
807719
808
-static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
720
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
721
+ unsigned long aob)
809722 {
810723 int retries = 0, cc;
811724 unsigned int busy_bit;
....@@ -817,7 +730,7 @@
817730 retry:
818731 qperf_inc(q, siga_write);
819732
820
- cc = qdio_siga_output(q, &busy_bit, aob);
733
+ cc = qdio_siga_output(q, count, &busy_bit, aob);
821734 switch (cc) {
822735 case 0:
823736 break;
....@@ -849,15 +762,21 @@
849762
850763 static void __qdio_outbound_processing(struct qdio_q *q)
851764 {
765
+ unsigned int start = q->first_to_check;
766
+ int count;
767
+
852768 qperf_inc(q, tasklet_outbound);
853769 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
854770
855
- if (qdio_outbound_q_moved(q))
856
- qdio_kick_handler(q);
771
+ count = qdio_outbound_q_moved(q, start);
772
+ if (count) {
773
+ q->first_to_check = add_buf(start, count);
774
+ qdio_kick_handler(q, start, count);
775
+ }
857776
858
- if (queue_type(q) == QDIO_ZFCP_QFMT)
859
- if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
860
- goto sched;
777
+ if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
778
+ !qdio_outbound_q_done(q))
779
+ goto sched;
861780
862781 if (q->u.out.pci_out_enabled)
863782 return;
....@@ -893,57 +812,30 @@
893812 qdio_tasklet_schedule(q);
894813 }
895814
896
-static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
815
+static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
897816 {
898817 struct qdio_q *out;
899818 int i;
900819
901
- if (!pci_out_supported(q))
820
+ if (!pci_out_supported(irq) || !irq->scan_threshold)
902821 return;
903822
904
- for_each_output_queue(q->irq_ptr, out, i)
823
+ for_each_output_queue(irq, out, i)
905824 if (!qdio_outbound_q_done(out))
906825 qdio_tasklet_schedule(out);
907
-}
908
-
909
-static void __tiqdio_inbound_processing(struct qdio_q *q)
910
-{
911
- qperf_inc(q, tasklet_inbound);
912
- if (need_siga_sync(q) && need_siga_sync_after_ai(q))
913
- qdio_sync_queues(q);
914
-
915
- /*
916
- * The interrupt could be caused by a PCI request. Check the
917
- * PCI capable outbound queues.
918
- */
919
- qdio_check_outbound_after_thinint(q);
920
-
921
- if (!qdio_inbound_q_moved(q))
922
- return;
923
-
924
- qdio_kick_handler(q);
925
-
926
- if (!qdio_inbound_q_done(q)) {
927
- qperf_inc(q, tasklet_inbound_resched);
928
- if (!qdio_tasklet_schedule(q))
929
- return;
930
- }
931
-
932
- qdio_stop_polling(q);
933
- /*
934
- * We need to check again to not lose initiative after
935
- * resetting the ACK state.
936
- */
937
- if (!qdio_inbound_q_done(q)) {
938
- qperf_inc(q, tasklet_inbound_resched2);
939
- qdio_tasklet_schedule(q);
940
- }
941826 }
942827
943828 void tiqdio_inbound_processing(unsigned long data)
944829 {
945830 struct qdio_q *q = (struct qdio_q *)data;
946
- __tiqdio_inbound_processing(q);
831
+
832
+ if (need_siga_sync(q) && need_siga_sync_after_ai(q))
833
+ qdio_sync_queues(q);
834
+
835
+ /* The interrupt could be caused by a PCI request: */
836
+ qdio_check_outbound_pci_queues(q->irq_ptr);
837
+
838
+ __qdio_inbound_processing(q);
947839 }
948840
949841 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
....@@ -973,22 +865,17 @@
973865 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
974866 return;
975867
976
- for_each_input_queue(irq_ptr, q, i) {
977
- if (q->u.in.queue_start_poll) {
978
- /* skip if polling is enabled or already in work */
979
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
980
- &q->u.in.queue_irq_state)) {
981
- qperf_inc(q, int_discarded);
982
- continue;
983
- }
984
- q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
985
- q->irq_ptr->int_parm);
986
- } else {
868
+ if (irq_ptr->irq_poll) {
869
+ if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
870
+ irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
871
+ else
872
+ QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
873
+ } else {
874
+ for_each_input_queue(irq_ptr, q, i)
987875 tasklet_schedule(&q->tasklet);
988
- }
989876 }
990877
991
- if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
878
+ if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
992879 return;
993880
994881 for_each_output_queue(irq_ptr, q, i) {
....@@ -1000,12 +887,11 @@
1000887 }
1001888 }
1002889
1003
-static void qdio_handle_activate_check(struct ccw_device *cdev,
1004
- unsigned long intparm, int cstat, int dstat)
890
+static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
891
+ unsigned long intparm, int cstat,
892
+ int dstat)
1005893 {
1006
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1007894 struct qdio_q *q;
1008
- int count;
1009895
1010896 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1011897 DBF_ERROR("intp :%lx", intparm);
....@@ -1020,9 +906,8 @@
1020906 goto no_handler;
1021907 }
1022908
1023
- count = sub_buf(q->first_to_check, q->first_to_kick);
1024909 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1025
- q->nr, q->first_to_kick, count, irq_ptr->int_parm);
910
+ q->nr, q->first_to_check, 0, irq_ptr->int_parm);
1026911 no_handler:
1027912 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1028913 /*
....@@ -1032,11 +917,9 @@
1032917 lgr_info_log();
1033918 }
1034919
1035
-static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
920
+static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
1036921 int dstat)
1037922 {
1038
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1039
-
1040923 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1041924
1042925 if (cstat)
....@@ -1083,7 +966,7 @@
1083966
1084967 switch (irq_ptr->state) {
1085968 case QDIO_IRQ_STATE_INACTIVE:
1086
- qdio_establish_handle_irq(cdev, cstat, dstat);
969
+ qdio_establish_handle_irq(irq_ptr, cstat, dstat);
1087970 break;
1088971 case QDIO_IRQ_STATE_CLEANUP:
1089972 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
....@@ -1095,7 +978,7 @@
1095978 return;
1096979 }
1097980 if (cstat || dstat)
1098
- qdio_handle_activate_check(cdev, intparm, cstat,
981
+ qdio_handle_activate_check(irq_ptr, intparm, cstat,
1099982 dstat);
1100983 break;
1101984 case QDIO_IRQ_STATE_STOPPED:
....@@ -1128,9 +1011,8 @@
11281011 }
11291012 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
11301013
1131
-static void qdio_shutdown_queues(struct ccw_device *cdev)
1014
+static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
11321015 {
1133
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
11341016 struct qdio_q *q;
11351017 int i;
11361018
....@@ -1141,6 +1023,33 @@
11411023 del_timer_sync(&q->u.out.timer);
11421024 tasklet_kill(&q->tasklet);
11431025 }
1026
+}
1027
+
1028
+static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
1029
+{
1030
+ struct ccw_device *cdev = irq->cdev;
1031
+ int rc;
1032
+
1033
+ spin_lock_irq(get_ccwdev_lock(cdev));
1034
+ qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
1035
+ if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1036
+ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1037
+ else
1038
+ /* default behaviour is halt */
1039
+ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1040
+ spin_unlock_irq(get_ccwdev_lock(cdev));
1041
+ if (rc) {
1042
+ DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
1043
+ DBF_ERROR("rc:%4d", rc);
1044
+ return rc;
1045
+ }
1046
+
1047
+ wait_event_interruptible_timeout(cdev->private->wait_q,
1048
+ irq->state == QDIO_IRQ_STATE_INACTIVE ||
1049
+ irq->state == QDIO_IRQ_STATE_ERR,
1050
+ 10 * HZ);
1051
+
1052
+ return 0;
11441053 }
11451054
11461055 /**
....@@ -1177,41 +1086,13 @@
11771086 */
11781087 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
11791088
1180
- tiqdio_remove_input_queues(irq_ptr);
1181
- qdio_shutdown_queues(cdev);
1089
+ tiqdio_remove_device(irq_ptr);
1090
+ qdio_shutdown_queues(irq_ptr);
11821091 qdio_shutdown_debug_entries(irq_ptr);
11831092
1184
- /* cleanup subchannel */
1185
- spin_lock_irq(get_ccwdev_lock(cdev));
1186
-
1187
- if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1188
- rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1189
- else
1190
- /* default behaviour is halt */
1191
- rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1192
- if (rc) {
1193
- DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1194
- DBF_ERROR("rc:%4d", rc);
1195
- goto no_cleanup;
1196
- }
1197
-
1198
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1199
- spin_unlock_irq(get_ccwdev_lock(cdev));
1200
- wait_event_interruptible_timeout(cdev->private->wait_q,
1201
- irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1202
- irq_ptr->state == QDIO_IRQ_STATE_ERR,
1203
- 10 * HZ);
1204
- spin_lock_irq(get_ccwdev_lock(cdev));
1205
-
1206
-no_cleanup:
1093
+ rc = qdio_cancel_ccw(irq_ptr, how);
12071094 qdio_shutdown_thinint(irq_ptr);
1208
-
1209
- /* restore interrupt handler */
1210
- if ((void *)cdev->handler == (void *)qdio_int_handler) {
1211
- cdev->handler = irq_ptr->orig_handler;
1212
- cdev->private->intparm = 0;
1213
- }
1214
- spin_unlock_irq(get_ccwdev_lock(cdev));
1095
+ qdio_shutdown_irq(irq_ptr);
12151096
12161097 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
12171098 mutex_unlock(&irq_ptr->setup_mutex);
....@@ -1242,43 +1123,47 @@
12421123 cdev->private->qdio_data = NULL;
12431124 mutex_unlock(&irq_ptr->setup_mutex);
12441125
1245
- qdio_release_memory(irq_ptr);
1126
+ qdio_free_async_data(irq_ptr);
1127
+ qdio_free_queues(irq_ptr);
1128
+ free_page((unsigned long) irq_ptr->qdr);
1129
+ free_page(irq_ptr->chsc_page);
1130
+ free_page((unsigned long) irq_ptr);
12461131 return 0;
12471132 }
12481133 EXPORT_SYMBOL_GPL(qdio_free);
12491134
12501135 /**
12511136 * qdio_allocate - allocate qdio queues and associated data
1252
- * @init_data: initialization data
1137
+ * @cdev: associated ccw device
1138
+ * @no_input_qs: allocate this number of Input Queues
1139
+ * @no_output_qs: allocate this number of Output Queues
12531140 */
1254
-int qdio_allocate(struct qdio_initialize *init_data)
1141
+int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
1142
+ unsigned int no_output_qs)
12551143 {
12561144 struct subchannel_id schid;
12571145 struct qdio_irq *irq_ptr;
1146
+ int rc = -ENOMEM;
12581147
1259
- ccw_device_get_schid(init_data->cdev, &schid);
1148
+ ccw_device_get_schid(cdev, &schid);
12601149 DBF_EVENT("qallocate:%4x", schid.sch_no);
12611150
1262
- if ((init_data->no_input_qs && !init_data->input_handler) ||
1263
- (init_data->no_output_qs && !init_data->output_handler))
1264
- return -EINVAL;
1265
-
1266
- if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1267
- (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1268
- return -EINVAL;
1269
-
1270
- if ((!init_data->input_sbal_addr_array) ||
1271
- (!init_data->output_sbal_addr_array))
1151
+ if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
1152
+ no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
12721153 return -EINVAL;
12731154
12741155 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
12751156 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
12761157 if (!irq_ptr)
1277
- goto out_err;
1158
+ return -ENOMEM;
12781159
1160
+ irq_ptr->cdev = cdev;
12791161 mutex_init(&irq_ptr->setup_mutex);
1280
- if (qdio_allocate_dbf(init_data, irq_ptr))
1281
- goto out_rel;
1162
+ if (qdio_allocate_dbf(irq_ptr))
1163
+ goto err_dbf;
1164
+
1165
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
1166
+ no_output_qs);
12821167
12831168 /*
12841169 * Allocate a page for the chsc calls in qdio_establish.
....@@ -1288,24 +1173,30 @@
12881173 */
12891174 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
12901175 if (!irq_ptr->chsc_page)
1291
- goto out_rel;
1176
+ goto err_chsc;
12921177
12931178 /* qdr is used in ccw1.cda which is u32 */
12941179 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
12951180 if (!irq_ptr->qdr)
1296
- goto out_rel;
1181
+ goto err_qdr;
12971182
1298
- if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1299
- init_data->no_output_qs))
1300
- goto out_rel;
1183
+ rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
1184
+ if (rc)
1185
+ goto err_queues;
13011186
1302
- init_data->cdev->private->qdio_data = irq_ptr;
1187
+ INIT_LIST_HEAD(&irq_ptr->entry);
1188
+ cdev->private->qdio_data = irq_ptr;
13031189 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
13041190 return 0;
1305
-out_rel:
1306
- qdio_release_memory(irq_ptr);
1307
-out_err:
1308
- return -ENOMEM;
1191
+
1192
+err_queues:
1193
+ free_page((unsigned long) irq_ptr->qdr);
1194
+err_qdr:
1195
+ free_page(irq_ptr->chsc_page);
1196
+err_chsc:
1197
+err_dbf:
1198
+ free_page((unsigned long) irq_ptr);
1199
+ return rc;
13091200 }
13101201 EXPORT_SYMBOL_GPL(qdio_allocate);
13111202
....@@ -1319,6 +1210,8 @@
13191210
13201211 for_each_output_queue(irq_ptr, q, i) {
13211212 if (use_cq) {
1213
+ if (multicast_outbound(q))
1214
+ continue;
13221215 if (qdio_enable_async_operation(&q->u.out) < 0) {
13231216 use_cq = 0;
13241217 continue;
....@@ -1329,33 +1222,62 @@
13291222 DBF_EVENT("use_cq:%d", use_cq);
13301223 }
13311224
1225
+static void qdio_trace_init_data(struct qdio_irq *irq,
1226
+ struct qdio_initialize *data)
1227
+{
1228
+ DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
1229
+ DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
1230
+ DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
1231
+ DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
1232
+ DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
1233
+ DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
1234
+ data->no_output_qs);
1235
+ DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
1236
+ DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
1237
+ DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
1238
+ DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
1239
+ DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
1240
+ DBF_ERR);
1241
+}
1242
+
13321243 /**
13331244 * qdio_establish - establish queues on a qdio subchannel
1245
+ * @cdev: associated ccw device
13341246 * @init_data: initialization data
13351247 */
1336
-int qdio_establish(struct qdio_initialize *init_data)
1248
+int qdio_establish(struct ccw_device *cdev,
1249
+ struct qdio_initialize *init_data)
13371250 {
1338
- struct ccw_device *cdev = init_data->cdev;
1251
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
13391252 struct subchannel_id schid;
1340
- struct qdio_irq *irq_ptr;
1253
+ long timeout;
13411254 int rc;
13421255
13431256 ccw_device_get_schid(cdev, &schid);
13441257 DBF_EVENT("qestablish:%4x", schid.sch_no);
13451258
1346
- irq_ptr = cdev->private->qdio_data;
13471259 if (!irq_ptr)
13481260 return -ENODEV;
13491261
1262
+ if (init_data->no_input_qs > irq_ptr->max_input_qs ||
1263
+ init_data->no_output_qs > irq_ptr->max_output_qs)
1264
+ return -EINVAL;
1265
+
1266
+ if ((init_data->no_input_qs && !init_data->input_handler) ||
1267
+ (init_data->no_output_qs && !init_data->output_handler))
1268
+ return -EINVAL;
1269
+
1270
+ if (!init_data->input_sbal_addr_array ||
1271
+ !init_data->output_sbal_addr_array)
1272
+ return -EINVAL;
1273
+
13501274 mutex_lock(&irq_ptr->setup_mutex);
1351
- qdio_setup_irq(init_data);
1275
+ qdio_trace_init_data(irq_ptr, init_data);
1276
+ qdio_setup_irq(irq_ptr, init_data);
13521277
13531278 rc = qdio_establish_thinint(irq_ptr);
1354
- if (rc) {
1355
- mutex_unlock(&irq_ptr->setup_mutex);
1356
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1357
- return rc;
1358
- }
1279
+ if (rc)
1280
+ goto err_thinint;
13591281
13601282 /* establish q */
13611283 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
....@@ -1371,14 +1293,16 @@
13711293 if (rc) {
13721294 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
13731295 DBF_ERROR("rc:%4x", rc);
1374
- mutex_unlock(&irq_ptr->setup_mutex);
1375
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1376
- return rc;
1296
+ goto err_ccw_start;
13771297 }
13781298
1379
- wait_event_interruptible_timeout(cdev->private->wait_q,
1380
- irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1381
- irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1299
+ timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
1300
+ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1301
+ irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1302
+ if (timeout <= 0) {
1303
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1304
+ goto err_ccw_timeout;
1305
+ }
13821306
13831307 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
13841308 mutex_unlock(&irq_ptr->setup_mutex);
....@@ -1394,9 +1318,19 @@
13941318 qdio_init_buf_states(irq_ptr);
13951319
13961320 mutex_unlock(&irq_ptr->setup_mutex);
1397
- qdio_print_subchannel_info(irq_ptr, cdev);
1398
- qdio_setup_debug_entries(irq_ptr, cdev);
1321
+ qdio_print_subchannel_info(irq_ptr);
1322
+ qdio_setup_debug_entries(irq_ptr);
13991323 return 0;
1324
+
1325
+err_ccw_timeout:
1326
+ qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
1327
+err_ccw_start:
1328
+ qdio_shutdown_thinint(irq_ptr);
1329
+err_thinint:
1330
+ qdio_shutdown_irq(irq_ptr);
1331
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1332
+ mutex_unlock(&irq_ptr->setup_mutex);
1333
+ return rc;
14001334 }
14011335 EXPORT_SYMBOL_GPL(qdio_establish);
14021336
....@@ -1406,14 +1340,13 @@
14061340 */
14071341 int qdio_activate(struct ccw_device *cdev)
14081342 {
1343
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
14091344 struct subchannel_id schid;
1410
- struct qdio_irq *irq_ptr;
14111345 int rc;
14121346
14131347 ccw_device_get_schid(cdev, &schid);
14141348 DBF_EVENT("qactivate:%4x", schid.sch_no);
14151349
1416
- irq_ptr = cdev->private->qdio_data;
14171350 if (!irq_ptr)
14181351 return -ENODEV;
14191352
....@@ -1441,7 +1374,7 @@
14411374 }
14421375
14431376 if (is_thinint_irq(irq_ptr))
1444
- tiqdio_add_input_queues(irq_ptr);
1377
+ tiqdio_add_device(irq_ptr);
14451378
14461379 /* wait for subchannel to become active */
14471380 msleep(5);
....@@ -1461,25 +1394,6 @@
14611394 }
14621395 EXPORT_SYMBOL_GPL(qdio_activate);
14631396
1464
-static inline int buf_in_between(int bufnr, int start, int count)
1465
-{
1466
- int end = add_buf(start, count);
1467
-
1468
- if (end > start) {
1469
- if (bufnr >= start && bufnr < end)
1470
- return 1;
1471
- else
1472
- return 0;
1473
- }
1474
-
1475
- /* wrap-around case */
1476
- if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1477
- (bufnr < end))
1478
- return 1;
1479
- else
1480
- return 0;
1481
-}
1482
-
14831397 /**
14841398 * handle_inbound - reset processed input buffers
14851399 * @q: queue containing the buffers
....@@ -1490,38 +1404,18 @@
14901404 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
14911405 int bufnr, int count)
14921406 {
1493
- int diff;
1407
+ int overlap;
14941408
14951409 qperf_inc(q, inbound_call);
14961410
1497
- if (!q->u.in.polling)
1498
- goto set;
1499
-
1500
- /* protect against stop polling setting an ACK for an emptied slsb */
1501
- if (count == QDIO_MAX_BUFFERS_PER_Q) {
1502
- /* overwriting everything, just delete polling status */
1503
- q->u.in.polling = 0;
1504
- q->u.in.ack_count = 0;
1505
- goto set;
1506
- } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1507
- if (is_qebsm(q)) {
1508
- /* partial overwrite, just update ack_start */
1509
- diff = add_buf(bufnr, count);
1510
- diff = sub_buf(diff, q->u.in.ack_start);
1511
- q->u.in.ack_count -= diff;
1512
- if (q->u.in.ack_count <= 0) {
1513
- q->u.in.polling = 0;
1514
- q->u.in.ack_count = 0;
1515
- goto set;
1516
- }
1517
- q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1518
- }
1519
- else
1520
- /* the only ACK will be deleted, so stop polling */
1521
- q->u.in.polling = 0;
1411
+ /* If any processed SBALs are returned to HW, adjust our tracking: */
1412
+ overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
1413
+ q->u.in.batch_count);
1414
+ if (overlap > 0) {
1415
+ q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
1416
+ q->u.in.batch_count -= overlap;
15221417 }
15231418
1524
-set:
15251419 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
15261420 atomic_add(count, &q->nr_buf_used);
15271421
....@@ -1539,8 +1433,9 @@
15391433 * @count: how many buffers are filled
15401434 */
15411435 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1542
- int bufnr, int count)
1436
+ unsigned int bufnr, unsigned int count)
15431437 {
1438
+ const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
15441439 unsigned char state = 0;
15451440 int used, rc = 0;
15461441
....@@ -1561,12 +1456,10 @@
15611456 if (queue_type(q) == QDIO_IQDIO_QFMT) {
15621457 unsigned long phys_aob = 0;
15631458
1564
- /* One SIGA-W per buffer required for unicast HSI */
1565
- WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1459
+ if (q->u.out.use_cq && count == 1)
1460
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
15661461
1567
- phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1568
-
1569
- rc = qdio_kick_outbound_q(q, phys_aob);
1462
+ rc = qdio_kick_outbound_q(q, count, phys_aob);
15701463 } else if (need_siga_sync(q)) {
15711464 rc = qdio_siga_sync_q(q);
15721465 } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
....@@ -1575,11 +1468,15 @@
15751468 /* The previous buffer is not processed yet, tack on. */
15761469 qperf_inc(q, fast_requeue);
15771470 } else {
1578
- rc = qdio_kick_outbound_q(q, 0);
1471
+ rc = qdio_kick_outbound_q(q, count, 0);
15791472 }
15801473
1474
+ /* Let drivers implement their own completion scanning: */
1475
+ if (!scan_threshold)
1476
+ return rc;
1477
+
15811478 /* in case of SIGA errors we must process the error immediately */
1582
- if (used >= q->u.out.scan_threshold || rc)
1479
+ if (used >= scan_threshold || rc)
15831480 qdio_tasklet_schedule(q);
15841481 else
15851482 /* free the SBALs in case of no further traffic */
....@@ -1600,12 +1497,11 @@
16001497 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
16011498 int q_nr, unsigned int bufnr, unsigned int count)
16021499 {
1603
- struct qdio_irq *irq_ptr;
1500
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
16041501
16051502 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
16061503 return -EINVAL;
16071504
1608
- irq_ptr = cdev->private->qdio_data;
16091505 if (!irq_ptr)
16101506 return -ENODEV;
16111507
....@@ -1627,26 +1523,26 @@
16271523 EXPORT_SYMBOL_GPL(do_QDIO);
16281524
16291525 /**
1630
- * qdio_start_irq - process input buffers
1526
+ * qdio_start_irq - enable interrupt processing for the device
16311527 * @cdev: associated ccw_device for the qdio subchannel
1632
- * @nr: input queue number
16331528 *
16341529 * Return codes
16351530 * 0 - success
16361531 * 1 - irqs not started since new data is available
16371532 */
1638
-int qdio_start_irq(struct ccw_device *cdev, int nr)
1533
+int qdio_start_irq(struct ccw_device *cdev)
16391534 {
16401535 struct qdio_q *q;
16411536 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1537
+ unsigned int i;
16421538
16431539 if (!irq_ptr)
16441540 return -ENODEV;
1645
- q = irq_ptr->input_qs[nr];
16461541
1647
- clear_nonshared_ind(irq_ptr);
1648
- qdio_stop_polling(q);
1649
- clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1542
+ for_each_input_queue(irq_ptr, q, i)
1543
+ qdio_stop_polling(q);
1544
+
1545
+ clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
16501546
16511547 /*
16521548 * We need to check again to not lose initiative after
....@@ -1654,19 +1550,60 @@
16541550 */
16551551 if (test_nonshared_ind(irq_ptr))
16561552 goto rescan;
1657
- if (!qdio_inbound_q_done(q))
1658
- goto rescan;
1553
+
1554
+ for_each_input_queue(irq_ptr, q, i) {
1555
+ if (!qdio_inbound_q_done(q, q->first_to_check))
1556
+ goto rescan;
1557
+ }
1558
+
16591559 return 0;
16601560
16611561 rescan:
1662
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1663
- &q->u.in.queue_irq_state))
1562
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
16641563 return 0;
16651564 else
16661565 return 1;
16671566
16681567 }
16691568 EXPORT_SYMBOL(qdio_start_irq);
1569
+
1570
+static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
1571
+ unsigned int *error)
1572
+{
1573
+ unsigned int start = q->first_to_check;
1574
+ int count;
1575
+
1576
+ count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
1577
+ qdio_outbound_q_moved(q, start);
1578
+ if (count == 0)
1579
+ return 0;
1580
+
1581
+ *bufnr = start;
1582
+ *error = q->qdio_error;
1583
+
1584
+ /* for the next time */
1585
+ q->first_to_check = add_buf(start, count);
1586
+ q->qdio_error = 0;
1587
+
1588
+ return count;
1589
+}
1590
+
1591
+int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
1592
+ unsigned int *bufnr, unsigned int *error)
1593
+{
1594
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1595
+ struct qdio_q *q;
1596
+
1597
+ if (!irq_ptr)
1598
+ return -ENODEV;
1599
+ q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
1600
+
1601
+ if (need_siga_sync(q))
1602
+ qdio_siga_sync_q(q);
1603
+
1604
+ return __qdio_inspect_queue(q, bufnr, error);
1605
+}
1606
+EXPORT_SYMBOL_GPL(qdio_inspect_queue);
16701607
16711608 /**
16721609 * qdio_get_next_buffers - process input buffers
....@@ -1684,7 +1621,6 @@
16841621 int *error)
16851622 {
16861623 struct qdio_q *q;
1687
- int start, end;
16881624 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
16891625
16901626 if (!irq_ptr)
....@@ -1698,141 +1634,37 @@
16981634 if (need_siga_sync(q))
16991635 qdio_sync_queues(q);
17001636
1701
- /* check the PCI capable outbound queues. */
1702
- qdio_check_outbound_after_thinint(q);
1703
-
1704
- if (!qdio_inbound_q_moved(q))
1705
- return 0;
1637
+ qdio_check_outbound_pci_queues(irq_ptr);
17061638
17071639 /* Note: upper-layer MUST stop processing immediately here ... */
17081640 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
17091641 return -EIO;
17101642
1711
- start = q->first_to_kick;
1712
- end = q->first_to_check;
1713
- *bufnr = start;
1714
- *error = q->qdio_error;
1715
-
1716
- /* for the next time */
1717
- q->first_to_kick = end;
1718
- q->qdio_error = 0;
1719
- return sub_buf(end, start);
1643
+ return __qdio_inspect_queue(q, bufnr, error);
17201644 }
17211645 EXPORT_SYMBOL(qdio_get_next_buffers);
17221646
17231647 /**
17241648 * qdio_stop_irq - disable interrupt processing for the device
17251649 * @cdev: associated ccw_device for the qdio subchannel
1726
- * @nr: input queue number
17271650 *
17281651 * Return codes
17291652 * 0 - interrupts were already disabled
17301653 * 1 - interrupts successfully disabled
17311654 */
1732
-int qdio_stop_irq(struct ccw_device *cdev, int nr)
1655
+int qdio_stop_irq(struct ccw_device *cdev)
17331656 {
1734
- struct qdio_q *q;
17351657 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
17361658
17371659 if (!irq_ptr)
17381660 return -ENODEV;
1739
- q = irq_ptr->input_qs[nr];
17401661
1741
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1742
- &q->u.in.queue_irq_state))
1662
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
17431663 return 0;
17441664 else
17451665 return 1;
17461666 }
17471667 EXPORT_SYMBOL(qdio_stop_irq);
1748
-
1749
-/**
1750
- * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
1751
- * @schid: Subchannel ID.
1752
- * @cnc: Boolean Change-Notification Control
1753
- * @response: Response code will be stored at this address
1754
- * @cb: Callback function will be executed for each element
1755
- * of the address list
1756
- * @priv: Pointer to pass to the callback function.
1757
- *
1758
- * Performs "Store-network-bridging-information list" operation and calls
1759
- * the callback function for every entry in the list. If "change-
1760
- * notification-control" is set, further changes in the address list
1761
- * will be reported via the IPA command.
1762
- */
1763
-int qdio_pnso_brinfo(struct subchannel_id schid,
1764
- int cnc, u16 *response,
1765
- void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
1766
- void *entry),
1767
- void *priv)
1768
-{
1769
- struct chsc_pnso_area *rr;
1770
- int rc;
1771
- u32 prev_instance = 0;
1772
- int isfirstblock = 1;
1773
- int i, size, elems;
1774
-
1775
- rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
1776
- if (rr == NULL)
1777
- return -ENOMEM;
1778
- do {
1779
- /* on the first iteration, naihdr.resume_token will be zero */
1780
- rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
1781
- if (rc != 0 && rc != -EBUSY)
1782
- goto out;
1783
- if (rr->response.code != 1) {
1784
- rc = -EIO;
1785
- continue;
1786
- } else
1787
- rc = 0;
1788
-
1789
- if (cb == NULL)
1790
- continue;
1791
-
1792
- size = rr->naihdr.naids;
1793
- elems = (rr->response.length -
1794
- sizeof(struct chsc_header) -
1795
- sizeof(struct chsc_brinfo_naihdr)) /
1796
- size;
1797
-
1798
- if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
1799
- /* Inform the caller that they need to scrap */
1800
- /* the data that was already reported via cb */
1801
- rc = -EAGAIN;
1802
- break;
1803
- }
1804
- isfirstblock = 0;
1805
- prev_instance = rr->naihdr.instance;
1806
- for (i = 0; i < elems; i++)
1807
- switch (size) {
1808
- case sizeof(struct qdio_brinfo_entry_l3_ipv6):
1809
- (*cb)(priv, l3_ipv6_addr,
1810
- &rr->entries.l3_ipv6[i]);
1811
- break;
1812
- case sizeof(struct qdio_brinfo_entry_l3_ipv4):
1813
- (*cb)(priv, l3_ipv4_addr,
1814
- &rr->entries.l3_ipv4[i]);
1815
- break;
1816
- case sizeof(struct qdio_brinfo_entry_l2):
1817
- (*cb)(priv, l2_addr_lnid,
1818
- &rr->entries.l2[i]);
1819
- break;
1820
- default:
1821
- WARN_ON_ONCE(1);
1822
- rc = -EIO;
1823
- goto out;
1824
- }
1825
- } while (rr->response.code == 0x0107 || /* channel busy */
1826
- (rr->response.code == 1 && /* list stored */
1827
- /* resume token is non-zero => list incomplete */
1828
- (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
1829
- (*response) = rr->response.code;
1830
-
1831
-out:
1832
- free_page((unsigned long)rr);
1833
- return rc;
1834
-}
1835
-EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
18361668
18371669 static int __init init_QDIO(void)
18381670 {
....@@ -1844,16 +1676,11 @@
18441676 rc = qdio_setup_init();
18451677 if (rc)
18461678 goto out_debug;
1847
- rc = tiqdio_allocate_memory();
1679
+ rc = qdio_thinint_init();
18481680 if (rc)
18491681 goto out_cache;
1850
- rc = tiqdio_register_thinints();
1851
- if (rc)
1852
- goto out_ti;
18531682 return 0;
18541683
1855
-out_ti:
1856
- tiqdio_free_memory();
18571684 out_cache:
18581685 qdio_setup_exit();
18591686 out_debug:
....@@ -1863,8 +1690,7 @@
18631690
18641691 static void __exit exit_QDIO(void)
18651692 {
1866
- tiqdio_unregister_thinints();
1867
- tiqdio_free_memory();
1693
+ qdio_thinint_exit();
18681694 qdio_setup_exit();
18691695 qdio_debug_exit();
18701696 }