forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-02-20 ea08eeccae9297f7aabd2ef7f0c2517ac4549acc
kernel/drivers/s390/cio/qdio_main.c
....@@ -31,38 +31,41 @@
3131 MODULE_LICENSE("GPL");
3232
3333 static inline int do_siga_sync(unsigned long schid,
34
- unsigned int out_mask, unsigned int in_mask,
34
+ unsigned long out_mask, unsigned long in_mask,
3535 unsigned int fc)
3636 {
37
- register unsigned long __fc asm ("0") = fc;
38
- register unsigned long __schid asm ("1") = schid;
39
- register unsigned long out asm ("2") = out_mask;
40
- register unsigned long in asm ("3") = in_mask;
4137 int cc;
4238
4339 asm volatile(
40
+ " lgr 0,%[fc]\n"
41
+ " lgr 1,%[schid]\n"
42
+ " lgr 2,%[out]\n"
43
+ " lgr 3,%[in]\n"
4444 " siga 0\n"
45
- " ipm %0\n"
46
- " srl %0,28\n"
47
- : "=d" (cc)
48
- : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
45
+ " ipm %[cc]\n"
46
+ " srl %[cc],28\n"
47
+ : [cc] "=&d" (cc)
48
+ : [fc] "d" (fc), [schid] "d" (schid),
49
+ [out] "d" (out_mask), [in] "d" (in_mask)
50
+ : "cc", "0", "1", "2", "3");
4951 return cc;
5052 }
5153
52
-static inline int do_siga_input(unsigned long schid, unsigned int mask,
53
- unsigned int fc)
54
+static inline int do_siga_input(unsigned long schid, unsigned long mask,
55
+ unsigned long fc)
5456 {
55
- register unsigned long __fc asm ("0") = fc;
56
- register unsigned long __schid asm ("1") = schid;
57
- register unsigned long __mask asm ("2") = mask;
5857 int cc;
5958
6059 asm volatile(
60
+ " lgr 0,%[fc]\n"
61
+ " lgr 1,%[schid]\n"
62
+ " lgr 2,%[mask]\n"
6163 " siga 0\n"
62
- " ipm %0\n"
63
- " srl %0,28\n"
64
- : "=d" (cc)
65
- : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
64
+ " ipm %[cc]\n"
65
+ " srl %[cc],28\n"
66
+ : [cc] "=&d" (cc)
67
+ : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
68
+ : "cc", "0", "1", "2");
6669 return cc;
6770 }
6871
....@@ -78,23 +81,24 @@
7881 * Note: For IQDC unicast queues only the highest priority queue is processed.
7982 */
8083 static inline int do_siga_output(unsigned long schid, unsigned long mask,
81
- unsigned int *bb, unsigned int fc,
84
+ unsigned int *bb, unsigned long fc,
8285 unsigned long aob)
8386 {
84
- register unsigned long __fc asm("0") = fc;
85
- register unsigned long __schid asm("1") = schid;
86
- register unsigned long __mask asm("2") = mask;
87
- register unsigned long __aob asm("3") = aob;
8887 int cc;
8988
9089 asm volatile(
90
+ " lgr 0,%[fc]\n"
91
+ " lgr 1,%[schid]\n"
92
+ " lgr 2,%[mask]\n"
93
+ " lgr 3,%[aob]\n"
9194 " siga 0\n"
92
- " ipm %0\n"
93
- " srl %0,28\n"
94
- : "=d" (cc), "+d" (__fc), "+d" (__aob)
95
- : "d" (__schid), "d" (__mask)
96
- : "cc");
97
- *bb = __fc >> 31;
95
+ " lgr %[fc],0\n"
96
+ " ipm %[cc]\n"
97
+ " srl %[cc],28\n"
98
+ : [cc] "=&d" (cc), [fc] "+&d" (fc)
99
+ : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
100
+ : "cc", "0", "1", "2", "3");
101
+ *bb = fc >> 31;
98102 return cc;
99103 }
100104
....@@ -131,7 +135,7 @@
131135 case 96:
132136 /* not all buffers processed */
133137 qperf_inc(q, eqbs_partial);
134
- DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
138
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
135139 tmp_count);
136140 return count - tmp_count;
137141 case 97:
....@@ -143,7 +147,7 @@
143147 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
144148 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
145149 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
146
- q->first_to_kick, count, q->irq_ptr->int_parm);
150
+ q->first_to_check, count, q->irq_ptr->int_parm);
147151 return 0;
148152 }
149153 }
....@@ -191,7 +195,7 @@
191195 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
192196 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
193197 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
194
- q->first_to_kick, count, q->irq_ptr->int_parm);
198
+ q->first_to_check, count, q->irq_ptr->int_parm);
195199 return 0;
196200 }
197201 }
....@@ -205,17 +209,22 @@
205209 int auto_ack, int merge_pending)
206210 {
207211 unsigned char __state = 0;
208
- int i;
212
+ int i = 1;
209213
210214 if (is_qebsm(q))
211215 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
212216
213217 /* get initial state: */
214218 __state = q->slsb.val[bufnr];
219
+
220
+ /* Bail out early if there is no work on the queue: */
221
+ if (__state & SLSB_OWNER_CU)
222
+ goto out;
223
+
215224 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
216225 __state = SLSB_P_OUTPUT_EMPTY;
217226
218
- for (i = 1; i < count; i++) {
227
+ for (; i < count; i++) {
219228 bufnr = next_buf(bufnr);
220229
221230 /* merge PENDING into EMPTY: */
....@@ -228,6 +237,8 @@
228237 if (q->slsb.val[bufnr] != __state)
229238 break;
230239 }
240
+
241
+out:
231242 *state = __state;
232243 return i;
233244 }
....@@ -247,10 +258,17 @@
247258 if (is_qebsm(q))
248259 return qdio_do_sqbs(q, state, bufnr, count);
249260
261
+ /* Ensure that all preceding changes to the SBALs are visible: */
262
+ mb();
263
+
250264 for (i = 0; i < count; i++) {
251
- xchg(&q->slsb.val[bufnr], state);
265
+ WRITE_ONCE(q->slsb.val[bufnr], state);
252266 bufnr = next_buf(bufnr);
253267 }
268
+
269
+ /* Make our SLSB changes visible: */
270
+ mb();
271
+
254272 return count;
255273 }
256274
....@@ -303,20 +321,19 @@
303321 return qdio_siga_sync(q, q->mask, 0);
304322 }
305323
306
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
307
- unsigned long aob)
324
+static int qdio_siga_output(struct qdio_q *q, unsigned int count,
325
+ unsigned int *busy_bit, unsigned long aob)
308326 {
309327 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
310328 unsigned int fc = QDIO_SIGA_WRITE;
311329 u64 start_time = 0;
312330 int retries = 0, cc;
313
- unsigned long laob = 0;
314331
315
- WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
316
- !q->u.out.use_cq));
317
- if (q->u.out.use_cq && aob != 0) {
318
- fc = QDIO_SIGA_WRITEQ;
319
- laob = aob;
332
+ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
333
+ if (count > 1)
334
+ fc = QDIO_SIGA_WRITEM;
335
+ else if (aob)
336
+ fc = QDIO_SIGA_WRITEQ;
320337 }
321338
322339 if (is_qebsm(q)) {
....@@ -324,7 +341,7 @@
324341 fc |= QDIO_SIGA_QEBSM_FLAG;
325342 }
326343 again:
327
- cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
344
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
328345
329346 /* hipersocket busy condition */
330347 if (unlikely(*busy_bit)) {
....@@ -371,7 +388,7 @@
371388 static inline void qdio_sync_queues(struct qdio_q *q)
372389 {
373390 /* PCI capable outbound queues will also be scanned so sync them too */
374
- if (pci_out_supported(q))
391
+ if (pci_out_supported(q->irq_ptr))
375392 qdio_siga_sync_all(q);
376393 else
377394 qdio_siga_sync_q(q);
....@@ -382,186 +399,125 @@
382399 {
383400 if (need_siga_sync(q))
384401 qdio_siga_sync_q(q);
385
- return get_buf_states(q, bufnr, state, 1, 0, 0);
402
+ return get_buf_state(q, bufnr, state, 0);
386403 }
387404
388405 static inline void qdio_stop_polling(struct qdio_q *q)
389406 {
390
- if (!q->u.in.polling)
407
+ if (!q->u.in.batch_count)
391408 return;
392409
393
- q->u.in.polling = 0;
394410 qperf_inc(q, stop_polling);
395411
396412 /* show the card that we are not polling anymore */
397
- if (is_qebsm(q)) {
398
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
399
- q->u.in.ack_count);
400
- q->u.in.ack_count = 0;
401
- } else
402
- set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
413
+ set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
414
+ q->u.in.batch_count);
415
+ q->u.in.batch_count = 0;
403416 }
404417
405418 static inline void account_sbals(struct qdio_q *q, unsigned int count)
406419 {
407
- int pos;
408
-
409420 q->q_stats.nr_sbal_total += count;
410
- if (count == QDIO_MAX_BUFFERS_MASK) {
411
- q->q_stats.nr_sbals[7]++;
412
- return;
413
- }
414
- pos = ilog2(count);
415
- q->q_stats.nr_sbals[pos]++;
421
+ q->q_stats.nr_sbals[ilog2(count)]++;
416422 }
417423
418
-static void process_buffer_error(struct qdio_q *q, int count)
424
+static void process_buffer_error(struct qdio_q *q, unsigned int start,
425
+ int count)
419426 {
420
- unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
421
- SLSB_P_OUTPUT_NOT_INIT;
422
-
423427 q->qdio_error = QDIO_ERROR_SLSB_STATE;
424428
425429 /* special handling for no target buffer empty */
426430 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
427
- q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
431
+ q->sbal[start]->element[15].sflags == 0x10) {
428432 qperf_inc(q, target_full);
429
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
430
- q->first_to_check);
431
- goto set;
433
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
434
+ return;
432435 }
433436
434437 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
435438 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
436
- DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
439
+ DBF_ERROR("FTC:%3d C:%3d", start, count);
437440 DBF_ERROR("F14:%2x F15:%2x",
438
- q->sbal[q->first_to_check]->element[14].sflags,
439
- q->sbal[q->first_to_check]->element[15].sflags);
440
-
441
-set:
442
- /*
443
- * Interrupts may be avoided as long as the error is present
444
- * so change the buffer state immediately to avoid starvation.
445
- */
446
- set_buf_states(q, q->first_to_check, state, count);
441
+ q->sbal[start]->element[14].sflags,
442
+ q->sbal[start]->element[15].sflags);
447443 }
448444
449
-static inline void inbound_primed(struct qdio_q *q, int count)
445
+static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
446
+ int count, bool auto_ack)
450447 {
451
- int new;
448
+ /* ACK the newest SBAL: */
449
+ if (!auto_ack)
450
+ set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
452451
453
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
454
-
455
- /* for QEBSM the ACK was already set by EQBS */
456
- if (is_qebsm(q)) {
457
- if (!q->u.in.polling) {
458
- q->u.in.polling = 1;
459
- q->u.in.ack_count = count;
460
- q->u.in.ack_start = q->first_to_check;
461
- return;
462
- }
463
-
464
- /* delete the previous ACK's */
465
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
466
- q->u.in.ack_count);
467
- q->u.in.ack_count = count;
468
- q->u.in.ack_start = q->first_to_check;
469
- return;
470
- }
471
-
472
- /*
473
- * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
474
- * or by the next inbound run.
475
- */
476
- new = add_buf(q->first_to_check, count - 1);
477
- if (q->u.in.polling) {
478
- /* reset the previous ACK but first set the new one */
479
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
480
- set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
481
- } else {
482
- q->u.in.polling = 1;
483
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
484
- }
485
-
486
- q->u.in.ack_start = new;
487
- count--;
488
- if (!count)
489
- return;
490
- /* need to change ALL buffers to get more interrupts */
491
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
452
+ if (!q->u.in.batch_count)
453
+ q->u.in.batch_start = start;
454
+ q->u.in.batch_count += count;
492455 }
493456
494
-static int get_inbound_buffer_frontier(struct qdio_q *q)
457
+static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
495458 {
496459 unsigned char state = 0;
497460 int count;
498461
499462 q->timestamp = get_tod_clock_fast();
500463
501
- /*
502
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
503
- * would return 0.
504
- */
505
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
464
+ count = atomic_read(&q->nr_buf_used);
506465 if (!count)
507
- goto out;
466
+ return 0;
508467
509468 /*
510469 * No siga sync here, as a PCI or we after a thin interrupt
511470 * already sync'ed the queues.
512471 */
513
- count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
472
+ count = get_buf_states(q, start, &state, count, 1, 0);
514473 if (!count)
515
- goto out;
474
+ return 0;
516475
517476 switch (state) {
518477 case SLSB_P_INPUT_PRIMED:
519
- inbound_primed(q, count);
520
- q->first_to_check = add_buf(q->first_to_check, count);
478
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
479
+ count);
480
+
481
+ inbound_handle_work(q, start, count, is_qebsm(q));
521482 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
522483 qperf_inc(q, inbound_queue_full);
523484 if (q->irq_ptr->perf_stat_enabled)
524485 account_sbals(q, count);
525
- break;
486
+ return count;
526487 case SLSB_P_INPUT_ERROR:
527
- process_buffer_error(q, count);
528
- q->first_to_check = add_buf(q->first_to_check, count);
488
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
489
+ count);
490
+
491
+ process_buffer_error(q, start, count);
492
+ inbound_handle_work(q, start, count, false);
529493 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
530494 qperf_inc(q, inbound_queue_full);
531495 if (q->irq_ptr->perf_stat_enabled)
532496 account_sbals_error(q, count);
533
- break;
497
+ return count;
534498 case SLSB_CU_INPUT_EMPTY:
535
- case SLSB_P_INPUT_NOT_INIT:
536
- case SLSB_P_INPUT_ACK:
537499 if (q->irq_ptr->perf_stat_enabled)
538500 q->q_stats.nr_sbal_nop++;
539501 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
540
- q->nr, q->first_to_check);
541
- break;
542
- default:
543
- WARN_ON_ONCE(1);
544
- }
545
-out:
546
- return q->first_to_check;
547
-}
548
-
549
-static int qdio_inbound_q_moved(struct qdio_q *q)
550
-{
551
- int bufnr;
552
-
553
- bufnr = get_inbound_buffer_frontier(q);
554
-
555
- if (bufnr != q->last_move) {
556
- q->last_move = bufnr;
557
- if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
558
- q->u.in.timestamp = get_tod_clock();
559
- return 1;
560
- } else
502
+ q->nr, start);
561503 return 0;
504
+ case SLSB_P_INPUT_NOT_INIT:
505
+ case SLSB_P_INPUT_ACK:
506
+ /* We should never see this state, throw a WARN: */
507
+ default:
508
+ dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
509
+ "found state %#x at index %u on queue %u\n",
510
+ state, start, q->nr);
511
+ return 0;
512
+ }
562513 }
563514
564
-static inline int qdio_inbound_q_done(struct qdio_q *q)
515
+static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
516
+{
517
+ return get_inbound_buffer_frontier(q, start);
518
+}
519
+
520
+static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
565521 {
566522 unsigned char state = 0;
567523
....@@ -570,59 +526,13 @@
570526
571527 if (need_siga_sync(q))
572528 qdio_siga_sync_q(q);
573
- get_buf_state(q, q->first_to_check, &state, 0);
529
+ get_buf_state(q, start, &state, 0);
574530
575531 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
576532 /* more work coming */
577533 return 0;
578534
579
- if (is_thinint_irq(q->irq_ptr))
580
- return 1;
581
-
582
- /* don't poll under z/VM */
583
- if (MACHINE_IS_VM)
584
- return 1;
585
-
586
- /*
587
- * At this point we know, that inbound first_to_check
588
- * has (probably) not moved (see qdio_inbound_processing).
589
- */
590
- if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
591
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
592
- q->first_to_check);
593
- return 1;
594
- } else
595
- return 0;
596
-}
597
-
598
-static inline int contains_aobs(struct qdio_q *q)
599
-{
600
- return !q->is_input_q && q->u.out.use_cq;
601
-}
602
-
603
-static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
604
-{
605
- unsigned char state = 0;
606
- int j, b = start;
607
-
608
- if (!contains_aobs(q))
609
- return;
610
-
611
- for (j = 0; j < count; ++j) {
612
- get_buf_state(q, b, &state, 0);
613
- if (state == SLSB_P_OUTPUT_PENDING) {
614
- struct qaob *aob = q->u.out.aobs[b];
615
- if (aob == NULL)
616
- continue;
617
-
618
- q->u.out.sbal_state[b].flags |=
619
- QDIO_OUTBUF_STATE_FLAG_PENDING;
620
- q->u.out.aobs[b] = NULL;
621
- } else if (state == SLSB_P_OUTPUT_EMPTY) {
622
- q->u.out.sbal_state[b].aob = NULL;
623
- }
624
- b = next_buf(b);
625
- }
535
+ return 1;
626536 }
627537
628538 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
....@@ -630,15 +540,11 @@
630540 {
631541 unsigned long phys_aob = 0;
632542
633
- if (!q->use_cq)
634
- return 0;
635
-
636543 if (!q->aobs[bufnr]) {
637544 struct qaob *aob = qdio_allocate_aob();
638545 q->aobs[bufnr] = aob;
639546 }
640547 if (q->aobs[bufnr]) {
641
- q->sbal_state[bufnr].aob = q->aobs[bufnr];
642548 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
643549 phys_aob = virt_to_phys(q->aobs[bufnr]);
644550 WARN_ON_ONCE(phys_aob & 0xFF);
....@@ -648,16 +554,11 @@
648554 return phys_aob;
649555 }
650556
651
-static void qdio_kick_handler(struct qdio_q *q)
557
+static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
558
+ unsigned int count)
652559 {
653
- int start = q->first_to_kick;
654
- int end = q->first_to_check;
655
- int count;
656
-
657560 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
658561 return;
659
-
660
- count = sub_buf(end, start);
661562
662563 if (q->is_input_q) {
663564 qperf_inc(q, inbound_handler);
....@@ -668,13 +569,10 @@
668569 start, count);
669570 }
670571
671
- qdio_handle_aobs(q, start, count);
672
-
673572 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
674573 q->irq_ptr->int_parm);
675574
676575 /* for the next time */
677
- q->first_to_kick = end;
678576 q->qdio_error = 0;
679577 }
680578
....@@ -689,14 +587,20 @@
689587
690588 static void __qdio_inbound_processing(struct qdio_q *q)
691589 {
590
+ unsigned int start = q->first_to_check;
591
+ int count;
592
+
692593 qperf_inc(q, tasklet_inbound);
693594
694
- if (!qdio_inbound_q_moved(q))
595
+ count = qdio_inbound_q_moved(q, start);
596
+ if (count == 0)
695597 return;
696598
697
- qdio_kick_handler(q);
599
+ qdio_kick_handler(q, start, count);
600
+ start = add_buf(start, count);
601
+ q->first_to_check = start;
698602
699
- if (!qdio_inbound_q_done(q)) {
603
+ if (!qdio_inbound_q_done(q, start)) {
700604 /* means poll time is not yet over */
701605 qperf_inc(q, tasklet_inbound_resched);
702606 if (!qdio_tasklet_schedule(q))
....@@ -708,7 +612,7 @@
708612 * We need to check again to not lose initiative after
709613 * resetting the ACK state.
710614 */
711
- if (!qdio_inbound_q_done(q)) {
615
+ if (!qdio_inbound_q_done(q, start)) {
712616 qperf_inc(q, tasklet_inbound_resched2);
713617 qdio_tasklet_schedule(q);
714618 }
....@@ -720,7 +624,20 @@
720624 __qdio_inbound_processing(q);
721625 }
722626
723
-static int get_outbound_buffer_frontier(struct qdio_q *q)
627
+static void qdio_check_pending(struct qdio_q *q, unsigned int index)
628
+{
629
+ unsigned char state;
630
+
631
+ if (get_buf_state(q, index, &state, 0) > 0 &&
632
+ state == SLSB_P_OUTPUT_PENDING &&
633
+ q->u.out.aobs[index]) {
634
+ q->u.out.sbal_state[index].flags |=
635
+ QDIO_OUTBUF_STATE_FLAG_PENDING;
636
+ q->u.out.aobs[index] = NULL;
637
+ }
638
+}
639
+
640
+static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
724641 {
725642 unsigned char state = 0;
726643 int count;
....@@ -729,23 +646,18 @@
729646
730647 if (need_siga_sync(q))
731648 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
732
- !pci_out_supported(q)) ||
649
+ !pci_out_supported(q->irq_ptr)) ||
733650 (queue_type(q) == QDIO_IQDIO_QFMT &&
734651 multicast_outbound(q)))
735652 qdio_siga_sync_q(q);
736653
737
- /*
738
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
739
- * would return 0.
740
- */
741
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
654
+ count = atomic_read(&q->nr_buf_used);
742655 if (!count)
743
- goto out;
656
+ return 0;
744657
745
- count = get_buf_states(q, q->first_to_check, &state, count, 0,
746
- q->u.out.use_cq);
658
+ count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
747659 if (!count)
748
- goto out;
660
+ return 0;
749661
750662 switch (state) {
751663 case SLSB_P_OUTPUT_EMPTY:
....@@ -755,34 +667,32 @@
755667 "out empty:%1d %02x", q->nr, count);
756668
757669 atomic_sub(count, &q->nr_buf_used);
758
- q->first_to_check = add_buf(q->first_to_check, count);
759670 if (q->irq_ptr->perf_stat_enabled)
760671 account_sbals(q, count);
761
-
762
- break;
672
+ return count;
763673 case SLSB_P_OUTPUT_ERROR:
764
- process_buffer_error(q, count);
765
- q->first_to_check = add_buf(q->first_to_check, count);
674
+ process_buffer_error(q, start, count);
766675 atomic_sub(count, &q->nr_buf_used);
767676 if (q->irq_ptr->perf_stat_enabled)
768677 account_sbals_error(q, count);
769
- break;
678
+ return count;
770679 case SLSB_CU_OUTPUT_PRIMED:
771680 /* the adapter has not fetched the output yet */
772681 if (q->irq_ptr->perf_stat_enabled)
773682 q->q_stats.nr_sbal_nop++;
774683 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
775684 q->nr);
776
- break;
777
- case SLSB_P_OUTPUT_NOT_INIT:
685
+ return 0;
778686 case SLSB_P_OUTPUT_HALTED:
779
- break;
687
+ return 0;
688
+ case SLSB_P_OUTPUT_NOT_INIT:
689
+ /* We should never see this state, throw a WARN: */
780690 default:
781
- WARN_ON_ONCE(1);
691
+ dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
692
+ "found state %#x at index %u on queue %u\n",
693
+ state, start, q->nr);
694
+ return 0;
782695 }
783
-
784
-out:
785
- return q->first_to_check;
786696 }
787697
788698 /* all buffers processed? */
....@@ -791,21 +701,28 @@
791701 return atomic_read(&q->nr_buf_used) == 0;
792702 }
793703
794
-static inline int qdio_outbound_q_moved(struct qdio_q *q)
704
+static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
795705 {
796
- int bufnr;
706
+ int count;
797707
798
- bufnr = get_outbound_buffer_frontier(q);
708
+ count = get_outbound_buffer_frontier(q, start);
799709
800
- if (bufnr != q->last_move) {
801
- q->last_move = bufnr;
710
+ if (count) {
802711 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
803
- return 1;
804
- } else
805
- return 0;
712
+
713
+ if (q->u.out.use_cq) {
714
+ unsigned int i;
715
+
716
+ for (i = 0; i < count; i++)
717
+ qdio_check_pending(q, QDIO_BUFNR(start + i));
718
+ }
719
+ }
720
+
721
+ return count;
806722 }
807723
808
-static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
724
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
725
+ unsigned long aob)
809726 {
810727 int retries = 0, cc;
811728 unsigned int busy_bit;
....@@ -817,7 +734,7 @@
817734 retry:
818735 qperf_inc(q, siga_write);
819736
820
- cc = qdio_siga_output(q, &busy_bit, aob);
737
+ cc = qdio_siga_output(q, count, &busy_bit, aob);
821738 switch (cc) {
822739 case 0:
823740 break;
....@@ -849,15 +766,21 @@
849766
850767 static void __qdio_outbound_processing(struct qdio_q *q)
851768 {
769
+ unsigned int start = q->first_to_check;
770
+ int count;
771
+
852772 qperf_inc(q, tasklet_outbound);
853773 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
854774
855
- if (qdio_outbound_q_moved(q))
856
- qdio_kick_handler(q);
775
+ count = qdio_outbound_q_moved(q, start);
776
+ if (count) {
777
+ q->first_to_check = add_buf(start, count);
778
+ qdio_kick_handler(q, start, count);
779
+ }
857780
858
- if (queue_type(q) == QDIO_ZFCP_QFMT)
859
- if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
860
- goto sched;
781
+ if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
782
+ !qdio_outbound_q_done(q))
783
+ goto sched;
861784
862785 if (q->u.out.pci_out_enabled)
863786 return;
....@@ -893,57 +816,30 @@
893816 qdio_tasklet_schedule(q);
894817 }
895818
896
-static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
819
+static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
897820 {
898821 struct qdio_q *out;
899822 int i;
900823
901
- if (!pci_out_supported(q))
824
+ if (!pci_out_supported(irq) || !irq->scan_threshold)
902825 return;
903826
904
- for_each_output_queue(q->irq_ptr, out, i)
827
+ for_each_output_queue(irq, out, i)
905828 if (!qdio_outbound_q_done(out))
906829 qdio_tasklet_schedule(out);
907
-}
908
-
909
-static void __tiqdio_inbound_processing(struct qdio_q *q)
910
-{
911
- qperf_inc(q, tasklet_inbound);
912
- if (need_siga_sync(q) && need_siga_sync_after_ai(q))
913
- qdio_sync_queues(q);
914
-
915
- /*
916
- * The interrupt could be caused by a PCI request. Check the
917
- * PCI capable outbound queues.
918
- */
919
- qdio_check_outbound_after_thinint(q);
920
-
921
- if (!qdio_inbound_q_moved(q))
922
- return;
923
-
924
- qdio_kick_handler(q);
925
-
926
- if (!qdio_inbound_q_done(q)) {
927
- qperf_inc(q, tasklet_inbound_resched);
928
- if (!qdio_tasklet_schedule(q))
929
- return;
930
- }
931
-
932
- qdio_stop_polling(q);
933
- /*
934
- * We need to check again to not lose initiative after
935
- * resetting the ACK state.
936
- */
937
- if (!qdio_inbound_q_done(q)) {
938
- qperf_inc(q, tasklet_inbound_resched2);
939
- qdio_tasklet_schedule(q);
940
- }
941830 }
942831
943832 void tiqdio_inbound_processing(unsigned long data)
944833 {
945834 struct qdio_q *q = (struct qdio_q *)data;
946
- __tiqdio_inbound_processing(q);
835
+
836
+ if (need_siga_sync(q) && need_siga_sync_after_ai(q))
837
+ qdio_sync_queues(q);
838
+
839
+ /* The interrupt could be caused by a PCI request: */
840
+ qdio_check_outbound_pci_queues(q->irq_ptr);
841
+
842
+ __qdio_inbound_processing(q);
947843 }
948844
949845 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
....@@ -973,22 +869,17 @@
973869 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
974870 return;
975871
976
- for_each_input_queue(irq_ptr, q, i) {
977
- if (q->u.in.queue_start_poll) {
978
- /* skip if polling is enabled or already in work */
979
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
980
- &q->u.in.queue_irq_state)) {
981
- qperf_inc(q, int_discarded);
982
- continue;
983
- }
984
- q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
985
- q->irq_ptr->int_parm);
986
- } else {
872
+ if (irq_ptr->irq_poll) {
873
+ if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
874
+ irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
875
+ else
876
+ QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
877
+ } else {
878
+ for_each_input_queue(irq_ptr, q, i)
987879 tasklet_schedule(&q->tasklet);
988
- }
989880 }
990881
991
- if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
882
+ if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
992883 return;
993884
994885 for_each_output_queue(irq_ptr, q, i) {
....@@ -1000,12 +891,11 @@
1000891 }
1001892 }
1002893
1003
-static void qdio_handle_activate_check(struct ccw_device *cdev,
1004
- unsigned long intparm, int cstat, int dstat)
894
+static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
895
+ unsigned long intparm, int cstat,
896
+ int dstat)
1005897 {
1006
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1007898 struct qdio_q *q;
1008
- int count;
1009899
1010900 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1011901 DBF_ERROR("intp :%lx", intparm);
....@@ -1020,9 +910,8 @@
1020910 goto no_handler;
1021911 }
1022912
1023
- count = sub_buf(q->first_to_check, q->first_to_kick);
1024913 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1025
- q->nr, q->first_to_kick, count, irq_ptr->int_parm);
914
+ q->nr, q->first_to_check, 0, irq_ptr->int_parm);
1026915 no_handler:
1027916 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1028917 /*
....@@ -1032,11 +921,9 @@
1032921 lgr_info_log();
1033922 }
1034923
1035
-static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
924
+static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
1036925 int dstat)
1037926 {
1038
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1039
-
1040927 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1041928
1042929 if (cstat)
....@@ -1083,7 +970,7 @@
1083970
1084971 switch (irq_ptr->state) {
1085972 case QDIO_IRQ_STATE_INACTIVE:
1086
- qdio_establish_handle_irq(cdev, cstat, dstat);
973
+ qdio_establish_handle_irq(irq_ptr, cstat, dstat);
1087974 break;
1088975 case QDIO_IRQ_STATE_CLEANUP:
1089976 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
....@@ -1095,7 +982,7 @@
1095982 return;
1096983 }
1097984 if (cstat || dstat)
1098
- qdio_handle_activate_check(cdev, intparm, cstat,
985
+ qdio_handle_activate_check(irq_ptr, intparm, cstat,
1099986 dstat);
1100987 break;
1101988 case QDIO_IRQ_STATE_STOPPED:
....@@ -1128,9 +1015,8 @@
11281015 }
11291016 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
11301017
1131
-static void qdio_shutdown_queues(struct ccw_device *cdev)
1018
+static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
11321019 {
1133
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
11341020 struct qdio_q *q;
11351021 int i;
11361022
....@@ -1141,6 +1027,33 @@
11411027 del_timer_sync(&q->u.out.timer);
11421028 tasklet_kill(&q->tasklet);
11431029 }
1030
+}
1031
+
1032
+static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
1033
+{
1034
+ struct ccw_device *cdev = irq->cdev;
1035
+ int rc;
1036
+
1037
+ spin_lock_irq(get_ccwdev_lock(cdev));
1038
+ qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
1039
+ if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1040
+ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1041
+ else
1042
+ /* default behaviour is halt */
1043
+ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1044
+ spin_unlock_irq(get_ccwdev_lock(cdev));
1045
+ if (rc) {
1046
+ DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
1047
+ DBF_ERROR("rc:%4d", rc);
1048
+ return rc;
1049
+ }
1050
+
1051
+ wait_event_interruptible_timeout(cdev->private->wait_q,
1052
+ irq->state == QDIO_IRQ_STATE_INACTIVE ||
1053
+ irq->state == QDIO_IRQ_STATE_ERR,
1054
+ 10 * HZ);
1055
+
1056
+ return 0;
11441057 }
11451058
11461059 /**
....@@ -1177,41 +1090,13 @@
11771090 */
11781091 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
11791092
1180
- tiqdio_remove_input_queues(irq_ptr);
1181
- qdio_shutdown_queues(cdev);
1093
+ tiqdio_remove_device(irq_ptr);
1094
+ qdio_shutdown_queues(irq_ptr);
11821095 qdio_shutdown_debug_entries(irq_ptr);
11831096
1184
- /* cleanup subchannel */
1185
- spin_lock_irq(get_ccwdev_lock(cdev));
1186
-
1187
- if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1188
- rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1189
- else
1190
- /* default behaviour is halt */
1191
- rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1192
- if (rc) {
1193
- DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1194
- DBF_ERROR("rc:%4d", rc);
1195
- goto no_cleanup;
1196
- }
1197
-
1198
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1199
- spin_unlock_irq(get_ccwdev_lock(cdev));
1200
- wait_event_interruptible_timeout(cdev->private->wait_q,
1201
- irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1202
- irq_ptr->state == QDIO_IRQ_STATE_ERR,
1203
- 10 * HZ);
1204
- spin_lock_irq(get_ccwdev_lock(cdev));
1205
-
1206
-no_cleanup:
1097
+ rc = qdio_cancel_ccw(irq_ptr, how);
12071098 qdio_shutdown_thinint(irq_ptr);
1208
-
1209
- /* restore interrupt handler */
1210
- if ((void *)cdev->handler == (void *)qdio_int_handler) {
1211
- cdev->handler = irq_ptr->orig_handler;
1212
- cdev->private->intparm = 0;
1213
- }
1214
- spin_unlock_irq(get_ccwdev_lock(cdev));
1099
+ qdio_shutdown_irq(irq_ptr);
12151100
12161101 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
12171102 mutex_unlock(&irq_ptr->setup_mutex);
....@@ -1242,43 +1127,47 @@
12421127 cdev->private->qdio_data = NULL;
12431128 mutex_unlock(&irq_ptr->setup_mutex);
12441129
1245
- qdio_release_memory(irq_ptr);
1130
+ qdio_free_async_data(irq_ptr);
1131
+ qdio_free_queues(irq_ptr);
1132
+ free_page((unsigned long) irq_ptr->qdr);
1133
+ free_page(irq_ptr->chsc_page);
1134
+ free_page((unsigned long) irq_ptr);
12461135 return 0;
12471136 }
12481137 EXPORT_SYMBOL_GPL(qdio_free);
12491138
12501139 /**
12511140 * qdio_allocate - allocate qdio queues and associated data
1252
- * @init_data: initialization data
1141
+ * @cdev: associated ccw device
1142
+ * @no_input_qs: allocate this number of Input Queues
1143
+ * @no_output_qs: allocate this number of Output Queues
12531144 */
1254
-int qdio_allocate(struct qdio_initialize *init_data)
1145
+int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
1146
+ unsigned int no_output_qs)
12551147 {
12561148 struct subchannel_id schid;
12571149 struct qdio_irq *irq_ptr;
1150
+ int rc = -ENOMEM;
12581151
1259
- ccw_device_get_schid(init_data->cdev, &schid);
1152
+ ccw_device_get_schid(cdev, &schid);
12601153 DBF_EVENT("qallocate:%4x", schid.sch_no);
12611154
1262
- if ((init_data->no_input_qs && !init_data->input_handler) ||
1263
- (init_data->no_output_qs && !init_data->output_handler))
1264
- return -EINVAL;
1265
-
1266
- if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1267
- (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1268
- return -EINVAL;
1269
-
1270
- if ((!init_data->input_sbal_addr_array) ||
1271
- (!init_data->output_sbal_addr_array))
1155
+ if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
1156
+ no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
12721157 return -EINVAL;
12731158
12741159 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
12751160 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
12761161 if (!irq_ptr)
1277
- goto out_err;
1162
+ return -ENOMEM;
12781163
1164
+ irq_ptr->cdev = cdev;
12791165 mutex_init(&irq_ptr->setup_mutex);
1280
- if (qdio_allocate_dbf(init_data, irq_ptr))
1281
- goto out_rel;
1166
+ if (qdio_allocate_dbf(irq_ptr))
1167
+ goto err_dbf;
1168
+
1169
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
1170
+ no_output_qs);
12821171
12831172 /*
12841173 * Allocate a page for the chsc calls in qdio_establish.
....@@ -1288,24 +1177,30 @@
12881177 */
12891178 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
12901179 if (!irq_ptr->chsc_page)
1291
- goto out_rel;
1180
+ goto err_chsc;
12921181
12931182 /* qdr is used in ccw1.cda which is u32 */
12941183 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
12951184 if (!irq_ptr->qdr)
1296
- goto out_rel;
1185
+ goto err_qdr;
12971186
1298
- if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1299
- init_data->no_output_qs))
1300
- goto out_rel;
1187
+ rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
1188
+ if (rc)
1189
+ goto err_queues;
13011190
1302
- init_data->cdev->private->qdio_data = irq_ptr;
1191
+ INIT_LIST_HEAD(&irq_ptr->entry);
1192
+ cdev->private->qdio_data = irq_ptr;
13031193 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
13041194 return 0;
1305
-out_rel:
1306
- qdio_release_memory(irq_ptr);
1307
-out_err:
1308
- return -ENOMEM;
1195
+
1196
+err_queues:
1197
+ free_page((unsigned long) irq_ptr->qdr);
1198
+err_qdr:
1199
+ free_page(irq_ptr->chsc_page);
1200
+err_chsc:
1201
+err_dbf:
1202
+ free_page((unsigned long) irq_ptr);
1203
+ return rc;
13091204 }
13101205 EXPORT_SYMBOL_GPL(qdio_allocate);
13111206
....@@ -1319,6 +1214,8 @@
13191214
13201215 for_each_output_queue(irq_ptr, q, i) {
13211216 if (use_cq) {
1217
+ if (multicast_outbound(q))
1218
+ continue;
13221219 if (qdio_enable_async_operation(&q->u.out) < 0) {
13231220 use_cq = 0;
13241221 continue;
....@@ -1329,33 +1226,62 @@
13291226 DBF_EVENT("use_cq:%d", use_cq);
13301227 }
13311228
1229
+static void qdio_trace_init_data(struct qdio_irq *irq,
1230
+ struct qdio_initialize *data)
1231
+{
1232
+ DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
1233
+ DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
1234
+ DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
1235
+ DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
1236
+ DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
1237
+ DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
1238
+ data->no_output_qs);
1239
+ DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
1240
+ DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
1241
+ DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
1242
+ DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
1243
+ DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
1244
+ DBF_ERR);
1245
+}
1246
+
13321247 /**
13331248 * qdio_establish - establish queues on a qdio subchannel
1249
+ * @cdev: associated ccw device
13341250 * @init_data: initialization data
13351251 */
1336
-int qdio_establish(struct qdio_initialize *init_data)
1252
+int qdio_establish(struct ccw_device *cdev,
1253
+ struct qdio_initialize *init_data)
13371254 {
1338
- struct ccw_device *cdev = init_data->cdev;
1255
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
13391256 struct subchannel_id schid;
1340
- struct qdio_irq *irq_ptr;
1257
+ long timeout;
13411258 int rc;
13421259
13431260 ccw_device_get_schid(cdev, &schid);
13441261 DBF_EVENT("qestablish:%4x", schid.sch_no);
13451262
1346
- irq_ptr = cdev->private->qdio_data;
13471263 if (!irq_ptr)
13481264 return -ENODEV;
13491265
1266
+ if (init_data->no_input_qs > irq_ptr->max_input_qs ||
1267
+ init_data->no_output_qs > irq_ptr->max_output_qs)
1268
+ return -EINVAL;
1269
+
1270
+ if ((init_data->no_input_qs && !init_data->input_handler) ||
1271
+ (init_data->no_output_qs && !init_data->output_handler))
1272
+ return -EINVAL;
1273
+
1274
+ if (!init_data->input_sbal_addr_array ||
1275
+ !init_data->output_sbal_addr_array)
1276
+ return -EINVAL;
1277
+
13501278 mutex_lock(&irq_ptr->setup_mutex);
1351
- qdio_setup_irq(init_data);
1279
+ qdio_trace_init_data(irq_ptr, init_data);
1280
+ qdio_setup_irq(irq_ptr, init_data);
13521281
13531282 rc = qdio_establish_thinint(irq_ptr);
1354
- if (rc) {
1355
- mutex_unlock(&irq_ptr->setup_mutex);
1356
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1357
- return rc;
1358
- }
1283
+ if (rc)
1284
+ goto err_thinint;
13591285
13601286 /* establish q */
13611287 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
....@@ -1371,14 +1297,16 @@
13711297 if (rc) {
13721298 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
13731299 DBF_ERROR("rc:%4x", rc);
1374
- mutex_unlock(&irq_ptr->setup_mutex);
1375
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1376
- return rc;
1300
+ goto err_ccw_start;
13771301 }
13781302
1379
- wait_event_interruptible_timeout(cdev->private->wait_q,
1380
- irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1381
- irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1303
+ timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
1304
+ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1305
+ irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1306
+ if (timeout <= 0) {
1307
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1308
+ goto err_ccw_timeout;
1309
+ }
13821310
13831311 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
13841312 mutex_unlock(&irq_ptr->setup_mutex);
....@@ -1394,9 +1322,19 @@
13941322 qdio_init_buf_states(irq_ptr);
13951323
13961324 mutex_unlock(&irq_ptr->setup_mutex);
1397
- qdio_print_subchannel_info(irq_ptr, cdev);
1398
- qdio_setup_debug_entries(irq_ptr, cdev);
1325
+ qdio_print_subchannel_info(irq_ptr);
1326
+ qdio_setup_debug_entries(irq_ptr);
13991327 return 0;
1328
+
1329
+err_ccw_timeout:
1330
+ qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
1331
+err_ccw_start:
1332
+ qdio_shutdown_thinint(irq_ptr);
1333
+err_thinint:
1334
+ qdio_shutdown_irq(irq_ptr);
1335
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1336
+ mutex_unlock(&irq_ptr->setup_mutex);
1337
+ return rc;
14001338 }
14011339 EXPORT_SYMBOL_GPL(qdio_establish);
14021340
....@@ -1406,14 +1344,13 @@
14061344 */
14071345 int qdio_activate(struct ccw_device *cdev)
14081346 {
1347
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
14091348 struct subchannel_id schid;
1410
- struct qdio_irq *irq_ptr;
14111349 int rc;
14121350
14131351 ccw_device_get_schid(cdev, &schid);
14141352 DBF_EVENT("qactivate:%4x", schid.sch_no);
14151353
1416
- irq_ptr = cdev->private->qdio_data;
14171354 if (!irq_ptr)
14181355 return -ENODEV;
14191356
....@@ -1441,7 +1378,7 @@
14411378 }
14421379
14431380 if (is_thinint_irq(irq_ptr))
1444
- tiqdio_add_input_queues(irq_ptr);
1381
+ tiqdio_add_device(irq_ptr);
14451382
14461383 /* wait for subchannel to become active */
14471384 msleep(5);
....@@ -1461,25 +1398,6 @@
14611398 }
14621399 EXPORT_SYMBOL_GPL(qdio_activate);
14631400
1464
-static inline int buf_in_between(int bufnr, int start, int count)
1465
-{
1466
- int end = add_buf(start, count);
1467
-
1468
- if (end > start) {
1469
- if (bufnr >= start && bufnr < end)
1470
- return 1;
1471
- else
1472
- return 0;
1473
- }
1474
-
1475
- /* wrap-around case */
1476
- if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1477
- (bufnr < end))
1478
- return 1;
1479
- else
1480
- return 0;
1481
-}
1482
-
14831401 /**
14841402 * handle_inbound - reset processed input buffers
14851403 * @q: queue containing the buffers
....@@ -1490,38 +1408,18 @@
14901408 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
14911409 int bufnr, int count)
14921410 {
1493
- int diff;
1411
+ int overlap;
14941412
14951413 qperf_inc(q, inbound_call);
14961414
1497
- if (!q->u.in.polling)
1498
- goto set;
1499
-
1500
- /* protect against stop polling setting an ACK for an emptied slsb */
1501
- if (count == QDIO_MAX_BUFFERS_PER_Q) {
1502
- /* overwriting everything, just delete polling status */
1503
- q->u.in.polling = 0;
1504
- q->u.in.ack_count = 0;
1505
- goto set;
1506
- } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1507
- if (is_qebsm(q)) {
1508
- /* partial overwrite, just update ack_start */
1509
- diff = add_buf(bufnr, count);
1510
- diff = sub_buf(diff, q->u.in.ack_start);
1511
- q->u.in.ack_count -= diff;
1512
- if (q->u.in.ack_count <= 0) {
1513
- q->u.in.polling = 0;
1514
- q->u.in.ack_count = 0;
1515
- goto set;
1516
- }
1517
- q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1518
- }
1519
- else
1520
- /* the only ACK will be deleted, so stop polling */
1521
- q->u.in.polling = 0;
1415
+ /* If any processed SBALs are returned to HW, adjust our tracking: */
1416
+ overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
1417
+ q->u.in.batch_count);
1418
+ if (overlap > 0) {
1419
+ q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
1420
+ q->u.in.batch_count -= overlap;
15221421 }
15231422
1524
-set:
15251423 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
15261424 atomic_add(count, &q->nr_buf_used);
15271425
....@@ -1539,8 +1437,9 @@
15391437 * @count: how many buffers are filled
15401438 */
15411439 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1542
- int bufnr, int count)
1440
+ unsigned int bufnr, unsigned int count)
15431441 {
1442
+ const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
15441443 unsigned char state = 0;
15451444 int used, rc = 0;
15461445
....@@ -1561,12 +1460,10 @@
15611460 if (queue_type(q) == QDIO_IQDIO_QFMT) {
15621461 unsigned long phys_aob = 0;
15631462
1564
- /* One SIGA-W per buffer required for unicast HSI */
1565
- WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1463
+ if (q->u.out.use_cq && count == 1)
1464
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
15661465
1567
- phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1568
-
1569
- rc = qdio_kick_outbound_q(q, phys_aob);
1466
+ rc = qdio_kick_outbound_q(q, count, phys_aob);
15701467 } else if (need_siga_sync(q)) {
15711468 rc = qdio_siga_sync_q(q);
15721469 } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
....@@ -1575,11 +1472,15 @@
15751472 /* The previous buffer is not processed yet, tack on. */
15761473 qperf_inc(q, fast_requeue);
15771474 } else {
1578
- rc = qdio_kick_outbound_q(q, 0);
1475
+ rc = qdio_kick_outbound_q(q, count, 0);
15791476 }
15801477
1478
+ /* Let drivers implement their own completion scanning: */
1479
+ if (!scan_threshold)
1480
+ return rc;
1481
+
15811482 /* in case of SIGA errors we must process the error immediately */
1582
- if (used >= q->u.out.scan_threshold || rc)
1483
+ if (used >= scan_threshold || rc)
15831484 qdio_tasklet_schedule(q);
15841485 else
15851486 /* free the SBALs in case of no further traffic */
....@@ -1600,12 +1501,11 @@
16001501 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
16011502 int q_nr, unsigned int bufnr, unsigned int count)
16021503 {
1603
- struct qdio_irq *irq_ptr;
1504
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
16041505
16051506 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
16061507 return -EINVAL;
16071508
1608
- irq_ptr = cdev->private->qdio_data;
16091509 if (!irq_ptr)
16101510 return -ENODEV;
16111511
....@@ -1627,26 +1527,26 @@
16271527 EXPORT_SYMBOL_GPL(do_QDIO);
16281528
16291529 /**
1630
- * qdio_start_irq - process input buffers
1530
+ * qdio_start_irq - enable interrupt processing for the device
16311531 * @cdev: associated ccw_device for the qdio subchannel
1632
- * @nr: input queue number
16331532 *
16341533 * Return codes
16351534 * 0 - success
16361535 * 1 - irqs not started since new data is available
16371536 */
1638
-int qdio_start_irq(struct ccw_device *cdev, int nr)
1537
+int qdio_start_irq(struct ccw_device *cdev)
16391538 {
16401539 struct qdio_q *q;
16411540 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1541
+ unsigned int i;
16421542
16431543 if (!irq_ptr)
16441544 return -ENODEV;
1645
- q = irq_ptr->input_qs[nr];
16461545
1647
- clear_nonshared_ind(irq_ptr);
1648
- qdio_stop_polling(q);
1649
- clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1546
+ for_each_input_queue(irq_ptr, q, i)
1547
+ qdio_stop_polling(q);
1548
+
1549
+ clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
16501550
16511551 /*
16521552 * We need to check again to not lose initiative after
....@@ -1654,19 +1554,60 @@
16541554 */
16551555 if (test_nonshared_ind(irq_ptr))
16561556 goto rescan;
1657
- if (!qdio_inbound_q_done(q))
1658
- goto rescan;
1557
+
1558
+ for_each_input_queue(irq_ptr, q, i) {
1559
+ if (!qdio_inbound_q_done(q, q->first_to_check))
1560
+ goto rescan;
1561
+ }
1562
+
16591563 return 0;
16601564
16611565 rescan:
1662
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1663
- &q->u.in.queue_irq_state))
1566
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
16641567 return 0;
16651568 else
16661569 return 1;
16671570
16681571 }
16691572 EXPORT_SYMBOL(qdio_start_irq);
1573
+
1574
+static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
1575
+ unsigned int *error)
1576
+{
1577
+ unsigned int start = q->first_to_check;
1578
+ int count;
1579
+
1580
+ count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
1581
+ qdio_outbound_q_moved(q, start);
1582
+ if (count == 0)
1583
+ return 0;
1584
+
1585
+ *bufnr = start;
1586
+ *error = q->qdio_error;
1587
+
1588
+ /* for the next time */
1589
+ q->first_to_check = add_buf(start, count);
1590
+ q->qdio_error = 0;
1591
+
1592
+ return count;
1593
+}
1594
+
1595
+int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
1596
+ unsigned int *bufnr, unsigned int *error)
1597
+{
1598
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1599
+ struct qdio_q *q;
1600
+
1601
+ if (!irq_ptr)
1602
+ return -ENODEV;
1603
+ q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
1604
+
1605
+ if (need_siga_sync(q))
1606
+ qdio_siga_sync_q(q);
1607
+
1608
+ return __qdio_inspect_queue(q, bufnr, error);
1609
+}
1610
+EXPORT_SYMBOL_GPL(qdio_inspect_queue);
16701611
16711612 /**
16721613 * qdio_get_next_buffers - process input buffers
....@@ -1684,7 +1625,6 @@
16841625 int *error)
16851626 {
16861627 struct qdio_q *q;
1687
- int start, end;
16881628 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
16891629
16901630 if (!irq_ptr)
....@@ -1698,141 +1638,37 @@
16981638 if (need_siga_sync(q))
16991639 qdio_sync_queues(q);
17001640
1701
- /* check the PCI capable outbound queues. */
1702
- qdio_check_outbound_after_thinint(q);
1703
-
1704
- if (!qdio_inbound_q_moved(q))
1705
- return 0;
1641
+ qdio_check_outbound_pci_queues(irq_ptr);
17061642
17071643 /* Note: upper-layer MUST stop processing immediately here ... */
17081644 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
17091645 return -EIO;
17101646
1711
- start = q->first_to_kick;
1712
- end = q->first_to_check;
1713
- *bufnr = start;
1714
- *error = q->qdio_error;
1715
-
1716
- /* for the next time */
1717
- q->first_to_kick = end;
1718
- q->qdio_error = 0;
1719
- return sub_buf(end, start);
1647
+ return __qdio_inspect_queue(q, bufnr, error);
17201648 }
17211649 EXPORT_SYMBOL(qdio_get_next_buffers);
17221650
17231651 /**
17241652 * qdio_stop_irq - disable interrupt processing for the device
17251653 * @cdev: associated ccw_device for the qdio subchannel
1726
- * @nr: input queue number
17271654 *
17281655 * Return codes
17291656 * 0 - interrupts were already disabled
17301657 * 1 - interrupts successfully disabled
17311658 */
1732
-int qdio_stop_irq(struct ccw_device *cdev, int nr)
1659
+int qdio_stop_irq(struct ccw_device *cdev)
17331660 {
1734
- struct qdio_q *q;
17351661 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
17361662
17371663 if (!irq_ptr)
17381664 return -ENODEV;
1739
- q = irq_ptr->input_qs[nr];
17401665
1741
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1742
- &q->u.in.queue_irq_state))
1666
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
17431667 return 0;
17441668 else
17451669 return 1;
17461670 }
17471671 EXPORT_SYMBOL(qdio_stop_irq);
1748
-
1749
-/**
1750
- * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
1751
- * @schid: Subchannel ID.
1752
- * @cnc: Boolean Change-Notification Control
1753
- * @response: Response code will be stored at this address
1754
- * @cb: Callback function will be executed for each element
1755
- * of the address list
1756
- * @priv: Pointer to pass to the callback function.
1757
- *
1758
- * Performs "Store-network-bridging-information list" operation and calls
1759
- * the callback function for every entry in the list. If "change-
1760
- * notification-control" is set, further changes in the address list
1761
- * will be reported via the IPA command.
1762
- */
1763
-int qdio_pnso_brinfo(struct subchannel_id schid,
1764
- int cnc, u16 *response,
1765
- void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
1766
- void *entry),
1767
- void *priv)
1768
-{
1769
- struct chsc_pnso_area *rr;
1770
- int rc;
1771
- u32 prev_instance = 0;
1772
- int isfirstblock = 1;
1773
- int i, size, elems;
1774
-
1775
- rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
1776
- if (rr == NULL)
1777
- return -ENOMEM;
1778
- do {
1779
- /* on the first iteration, naihdr.resume_token will be zero */
1780
- rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
1781
- if (rc != 0 && rc != -EBUSY)
1782
- goto out;
1783
- if (rr->response.code != 1) {
1784
- rc = -EIO;
1785
- continue;
1786
- } else
1787
- rc = 0;
1788
-
1789
- if (cb == NULL)
1790
- continue;
1791
-
1792
- size = rr->naihdr.naids;
1793
- elems = (rr->response.length -
1794
- sizeof(struct chsc_header) -
1795
- sizeof(struct chsc_brinfo_naihdr)) /
1796
- size;
1797
-
1798
- if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
1799
- /* Inform the caller that they need to scrap */
1800
- /* the data that was already reported via cb */
1801
- rc = -EAGAIN;
1802
- break;
1803
- }
1804
- isfirstblock = 0;
1805
- prev_instance = rr->naihdr.instance;
1806
- for (i = 0; i < elems; i++)
1807
- switch (size) {
1808
- case sizeof(struct qdio_brinfo_entry_l3_ipv6):
1809
- (*cb)(priv, l3_ipv6_addr,
1810
- &rr->entries.l3_ipv6[i]);
1811
- break;
1812
- case sizeof(struct qdio_brinfo_entry_l3_ipv4):
1813
- (*cb)(priv, l3_ipv4_addr,
1814
- &rr->entries.l3_ipv4[i]);
1815
- break;
1816
- case sizeof(struct qdio_brinfo_entry_l2):
1817
- (*cb)(priv, l2_addr_lnid,
1818
- &rr->entries.l2[i]);
1819
- break;
1820
- default:
1821
- WARN_ON_ONCE(1);
1822
- rc = -EIO;
1823
- goto out;
1824
- }
1825
- } while (rr->response.code == 0x0107 || /* channel busy */
1826
- (rr->response.code == 1 && /* list stored */
1827
- /* resume token is non-zero => list incomplete */
1828
- (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
1829
- (*response) = rr->response.code;
1830
-
1831
-out:
1832
- free_page((unsigned long)rr);
1833
- return rc;
1834
-}
1835
-EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
18361672
18371673 static int __init init_QDIO(void)
18381674 {
....@@ -1844,16 +1680,11 @@
18441680 rc = qdio_setup_init();
18451681 if (rc)
18461682 goto out_debug;
1847
- rc = tiqdio_allocate_memory();
1683
+ rc = qdio_thinint_init();
18481684 if (rc)
18491685 goto out_cache;
1850
- rc = tiqdio_register_thinints();
1851
- if (rc)
1852
- goto out_ti;
18531686 return 0;
18541687
1855
-out_ti:
1856
- tiqdio_free_memory();
18571688 out_cache:
18581689 qdio_setup_exit();
18591690 out_debug:
....@@ -1863,8 +1694,7 @@
18631694
18641695 static void __exit exit_QDIO(void)
18651696 {
1866
- tiqdio_unregister_thinints();
1867
- tiqdio_free_memory();
1697
+ qdio_thinint_exit();
18681698 qdio_setup_exit();
18691699 qdio_debug_exit();
18701700 }