From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB
---
kernel/drivers/s390/cio/qdio_main.c | 1016 ++++++++++++++++++++++++----------------------------------
1 files changed, 423 insertions(+), 593 deletions(-)
diff --git a/kernel/drivers/s390/cio/qdio_main.c b/kernel/drivers/s390/cio/qdio_main.c
index 4b7cc8d..e3c55fc 100644
--- a/kernel/drivers/s390/cio/qdio_main.c
+++ b/kernel/drivers/s390/cio/qdio_main.c
@@ -31,38 +31,41 @@
MODULE_LICENSE("GPL");
static inline int do_siga_sync(unsigned long schid,
- unsigned int out_mask, unsigned int in_mask,
+ unsigned long out_mask, unsigned long in_mask,
unsigned int fc)
{
- register unsigned long __fc asm ("0") = fc;
- register unsigned long __schid asm ("1") = schid;
- register unsigned long out asm ("2") = out_mask;
- register unsigned long in asm ("3") = in_mask;
int cc;
asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[out]\n"
+ " lgr 3,%[in]\n"
" siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc)
- : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc)
+ : [fc] "d" (fc), [schid] "d" (schid),
+ [out] "d" (out_mask), [in] "d" (in_mask)
+ : "cc", "0", "1", "2", "3");
return cc;
}
-static inline int do_siga_input(unsigned long schid, unsigned int mask,
- unsigned int fc)
+static inline int do_siga_input(unsigned long schid, unsigned long mask,
+ unsigned long fc)
{
- register unsigned long __fc asm ("0") = fc;
- register unsigned long __schid asm ("1") = schid;
- register unsigned long __mask asm ("2") = mask;
int cc;
asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[mask]\n"
" siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc)
- : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc)
+ : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
+ : "cc", "0", "1", "2");
return cc;
}
@@ -78,23 +81,24 @@
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
- unsigned int *bb, unsigned int fc,
+ unsigned int *bb, unsigned long fc,
unsigned long aob)
{
- register unsigned long __fc asm("0") = fc;
- register unsigned long __schid asm("1") = schid;
- register unsigned long __mask asm("2") = mask;
- register unsigned long __aob asm("3") = aob;
int cc;
asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[mask]\n"
+ " lgr 3,%[aob]\n"
" siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc), "+d" (__fc), "+d" (__aob)
- : "d" (__schid), "d" (__mask)
- : "cc");
- *bb = __fc >> 31;
+ " lgr %[fc],0\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [fc] "+&d" (fc)
+ : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
+ : "cc", "0", "1", "2", "3");
+ *bb = fc >> 31;
return cc;
}
@@ -131,7 +135,7 @@
case 96:
/* not all buffers processed */
qperf_inc(q, eqbs_partial);
- DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
tmp_count);
return count - tmp_count;
case 97:
@@ -143,7 +147,7 @@
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
- q->first_to_kick, count, q->irq_ptr->int_parm);
+ q->first_to_check, count, q->irq_ptr->int_parm);
return 0;
}
}
@@ -191,7 +195,7 @@
DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
- q->first_to_kick, count, q->irq_ptr->int_parm);
+ q->first_to_check, count, q->irq_ptr->int_parm);
return 0;
}
}
@@ -205,17 +209,22 @@
int auto_ack, int merge_pending)
{
unsigned char __state = 0;
- int i;
+ int i = 1;
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
/* get initial state: */
__state = q->slsb.val[bufnr];
+
+ /* Bail out early if there is no work on the queue: */
+ if (__state & SLSB_OWNER_CU)
+ goto out;
+
if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
__state = SLSB_P_OUTPUT_EMPTY;
- for (i = 1; i < count; i++) {
+ for (; i < count; i++) {
bufnr = next_buf(bufnr);
/* merge PENDING into EMPTY: */
@@ -228,6 +237,8 @@
if (q->slsb.val[bufnr] != __state)
break;
}
+
+out:
*state = __state;
return i;
}
@@ -247,10 +258,17 @@
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
+ /* Ensure that all preceding changes to the SBALs are visible: */
+ mb();
+
for (i = 0; i < count; i++) {
- xchg(&q->slsb.val[bufnr], state);
+ WRITE_ONCE(q->slsb.val[bufnr], state);
bufnr = next_buf(bufnr);
}
+
+ /* Make our SLSB changes visible: */
+ mb();
+
return count;
}
@@ -303,20 +321,19 @@
return qdio_siga_sync(q, q->mask, 0);
}
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
- unsigned long aob)
+static int qdio_siga_output(struct qdio_q *q, unsigned int count,
+ unsigned int *busy_bit, unsigned long aob)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
int retries = 0, cc;
- unsigned long laob = 0;
- WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
- !q->u.out.use_cq));
- if (q->u.out.use_cq && aob != 0) {
- fc = QDIO_SIGA_WRITEQ;
- laob = aob;
+ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
+ if (count > 1)
+ fc = QDIO_SIGA_WRITEM;
+ else if (aob)
+ fc = QDIO_SIGA_WRITEQ;
}
if (is_qebsm(q)) {
@@ -324,7 +341,7 @@
fc |= QDIO_SIGA_QEBSM_FLAG;
}
again:
- cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
@@ -371,7 +388,7 @@
static inline void qdio_sync_queues(struct qdio_q *q)
{
/* PCI capable outbound queues will also be scanned so sync them too */
- if (pci_out_supported(q))
+ if (pci_out_supported(q->irq_ptr))
qdio_siga_sync_all(q);
else
qdio_siga_sync_q(q);
@@ -382,186 +399,125 @@
{
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- return get_buf_states(q, bufnr, state, 1, 0, 0);
+ return get_buf_state(q, bufnr, state, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
{
- if (!q->u.in.polling)
+ if (!q->u.in.batch_count)
return;
- q->u.in.polling = 0;
qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
- if (is_qebsm(q)) {
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
- q->u.in.ack_count);
- q->u.in.ack_count = 0;
- } else
- set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+ set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.batch_count);
+ q->u.in.batch_count = 0;
}
static inline void account_sbals(struct qdio_q *q, unsigned int count)
{
- int pos;
-
q->q_stats.nr_sbal_total += count;
- if (count == QDIO_MAX_BUFFERS_MASK) {
- q->q_stats.nr_sbals[7]++;
- return;
- }
- pos = ilog2(count);
- q->q_stats.nr_sbals[pos]++;
+ q->q_stats.nr_sbals[ilog2(count)]++;
}
-static void process_buffer_error(struct qdio_q *q, int count)
+static void process_buffer_error(struct qdio_q *q, unsigned int start,
+ int count)
{
- unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
- SLSB_P_OUTPUT_NOT_INIT;
-
q->qdio_error = QDIO_ERROR_SLSB_STATE;
/* special handling for no target buffer empty */
if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
- q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
+ q->sbal[start]->element[15].sflags == 0x10) {
qperf_inc(q, target_full);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
- q->first_to_check);
- goto set;
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
+ return;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
- DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
+ DBF_ERROR("FTC:%3d C:%3d", start, count);
DBF_ERROR("F14:%2x F15:%2x",
- q->sbal[q->first_to_check]->element[14].sflags,
- q->sbal[q->first_to_check]->element[15].sflags);
-
-set:
- /*
- * Interrupts may be avoided as long as the error is present
- * so change the buffer state immediately to avoid starvation.
- */
- set_buf_states(q, q->first_to_check, state, count);
+ q->sbal[start]->element[14].sflags,
+ q->sbal[start]->element[15].sflags);
}
-static inline void inbound_primed(struct qdio_q *q, int count)
+static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
+ int count, bool auto_ack)
{
- int new;
+ /* ACK the newest SBAL: */
+ if (!auto_ack)
+ set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
-
- /* for QEBSM the ACK was already set by EQBS */
- if (is_qebsm(q)) {
- if (!q->u.in.polling) {
- q->u.in.polling = 1;
- q->u.in.ack_count = count;
- q->u.in.ack_start = q->first_to_check;
- return;
- }
-
- /* delete the previous ACK's */
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
- q->u.in.ack_count);
- q->u.in.ack_count = count;
- q->u.in.ack_start = q->first_to_check;
- return;
- }
-
- /*
- * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
- * or by the next inbound run.
- */
- new = add_buf(q->first_to_check, count - 1);
- if (q->u.in.polling) {
- /* reset the previous ACK but first set the new one */
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
- set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
- } else {
- q->u.in.polling = 1;
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
- }
-
- q->u.in.ack_start = new;
- count--;
- if (!count)
- return;
- /* need to change ALL buffers to get more interrupts */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
+ if (!q->u.in.batch_count)
+ q->u.in.batch_start = start;
+ q->u.in.batch_count += count;
}
-static int get_inbound_buffer_frontier(struct qdio_q *q)
+static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
int count;
q->timestamp = get_tod_clock_fast();
- /*
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
- * would return 0.
- */
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ count = atomic_read(&q->nr_buf_used);
if (!count)
- goto out;
+ return 0;
/*
* No siga sync here, as a PCI or we after a thin interrupt
* already sync'ed the queues.
*/
- count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
+ count = get_buf_states(q, start, &state, count, 1, 0);
if (!count)
- goto out;
+ return 0;
switch (state) {
case SLSB_P_INPUT_PRIMED:
- inbound_primed(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
+ count);
+
+ inbound_handle_work(q, start, count, is_qebsm(q));
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
- break;
+ return count;
case SLSB_P_INPUT_ERROR:
- process_buffer_error(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
+ count);
+
+ process_buffer_error(q, start, count);
+ inbound_handle_work(q, start, count, false);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
- break;
+ return count;
case SLSB_CU_INPUT_EMPTY:
- case SLSB_P_INPUT_NOT_INIT:
- case SLSB_P_INPUT_ACK:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
- q->nr, q->first_to_check);
- break;
- default:
- WARN_ON_ONCE(1);
- }
-out:
- return q->first_to_check;
-}
-
-static int qdio_inbound_q_moved(struct qdio_q *q)
-{
- int bufnr;
-
- bufnr = get_inbound_buffer_frontier(q);
-
- if (bufnr != q->last_move) {
- q->last_move = bufnr;
- if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_tod_clock();
- return 1;
- } else
+ q->nr, start);
return 0;
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_INPUT_ACK:
+ /* We should never see this state, throw a WARN: */
+ default:
+ dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
+ "found state %#x at index %u on queue %u\n",
+ state, start, q->nr);
+ return 0;
+ }
}
-static inline int qdio_inbound_q_done(struct qdio_q *q)
+static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
+{
+ return get_inbound_buffer_frontier(q, start);
+}
+
+static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
@@ -570,59 +526,13 @@
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state, 0);
+ get_buf_state(q, start, &state, 0);
if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
return 0;
- if (is_thinint_irq(q->irq_ptr))
- return 1;
-
- /* don't poll under z/VM */
- if (MACHINE_IS_VM)
- return 1;
-
- /*
- * At this point we know, that inbound first_to_check
- * has (probably) not moved (see qdio_inbound_processing).
- */
- if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
- q->first_to_check);
- return 1;
- } else
- return 0;
-}
-
-static inline int contains_aobs(struct qdio_q *q)
-{
- return !q->is_input_q && q->u.out.use_cq;
-}
-
-static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
-{
- unsigned char state = 0;
- int j, b = start;
-
- if (!contains_aobs(q))
- return;
-
- for (j = 0; j < count; ++j) {
- get_buf_state(q, b, &state, 0);
- if (state == SLSB_P_OUTPUT_PENDING) {
- struct qaob *aob = q->u.out.aobs[b];
- if (aob == NULL)
- continue;
-
- q->u.out.sbal_state[b].flags |=
- QDIO_OUTBUF_STATE_FLAG_PENDING;
- q->u.out.aobs[b] = NULL;
- } else if (state == SLSB_P_OUTPUT_EMPTY) {
- q->u.out.sbal_state[b].aob = NULL;
- }
- b = next_buf(b);
- }
+ return 1;
}
static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
@@ -630,15 +540,11 @@
{
unsigned long phys_aob = 0;
- if (!q->use_cq)
- return 0;
-
if (!q->aobs[bufnr]) {
struct qaob *aob = qdio_allocate_aob();
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
WARN_ON_ONCE(phys_aob & 0xFF);
@@ -648,16 +554,11 @@
return phys_aob;
}
-static void qdio_kick_handler(struct qdio_q *q)
+static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
+ unsigned int count)
{
- int start = q->first_to_kick;
- int end = q->first_to_check;
- int count;
-
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
-
- count = sub_buf(end, start);
if (q->is_input_q) {
qperf_inc(q, inbound_handler);
@@ -668,13 +569,10 @@
start, count);
}
- qdio_handle_aobs(q, start, count);
-
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
q->irq_ptr->int_parm);
/* for the next time */
- q->first_to_kick = end;
q->qdio_error = 0;
}
@@ -689,14 +587,20 @@
static void __qdio_inbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_inbound);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return;
- qdio_kick_handler(q);
+ qdio_kick_handler(q, start, count);
+ start = add_buf(start, count);
+ q->first_to_check = start;
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
if (!qdio_tasklet_schedule(q))
@@ -708,7 +612,7 @@
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched2);
qdio_tasklet_schedule(q);
}
@@ -720,7 +624,20 @@
__qdio_inbound_processing(q);
}
-static int get_outbound_buffer_frontier(struct qdio_q *q)
+static void qdio_check_pending(struct qdio_q *q, unsigned int index)
+{
+ unsigned char state;
+
+ if (get_buf_state(q, index, &state, 0) > 0 &&
+ state == SLSB_P_OUTPUT_PENDING &&
+ q->u.out.aobs[index]) {
+ q->u.out.sbal_state[index].flags |=
+ QDIO_OUTBUF_STATE_FLAG_PENDING;
+ q->u.out.aobs[index] = NULL;
+ }
+}
+
+static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
int count;
@@ -729,23 +646,18 @@
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
- !pci_out_supported(q)) ||
+ !pci_out_supported(q->irq_ptr)) ||
(queue_type(q) == QDIO_IQDIO_QFMT &&
multicast_outbound(q)))
qdio_siga_sync_q(q);
- /*
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
- * would return 0.
- */
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ count = atomic_read(&q->nr_buf_used);
if (!count)
- goto out;
+ return 0;
- count = get_buf_states(q, q->first_to_check, &state, count, 0,
- q->u.out.use_cq);
+ count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
if (!count)
- goto out;
+ return 0;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
@@ -755,34 +667,32 @@
"out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
- q->first_to_check = add_buf(q->first_to_check, count);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
-
- break;
+ return count;
case SLSB_P_OUTPUT_ERROR:
- process_buffer_error(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ process_buffer_error(q, start, count);
atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
- break;
+ return count;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
q->nr);
- break;
- case SLSB_P_OUTPUT_NOT_INIT:
+ return 0;
case SLSB_P_OUTPUT_HALTED:
- break;
+ return 0;
+ case SLSB_P_OUTPUT_NOT_INIT:
+ /* We should never see this state, throw a WARN: */
default:
- WARN_ON_ONCE(1);
+ dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
+ "found state %#x at index %u on queue %u\n",
+ state, start, q->nr);
+ return 0;
}
-
-out:
- return q->first_to_check;
}
/* all buffers processed? */
@@ -791,21 +701,28 @@
return atomic_read(&q->nr_buf_used) == 0;
}
-static inline int qdio_outbound_q_moved(struct qdio_q *q)
+static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
{
- int bufnr;
+ int count;
- bufnr = get_outbound_buffer_frontier(q);
+ count = get_outbound_buffer_frontier(q, start);
- if (bufnr != q->last_move) {
- q->last_move = bufnr;
+ if (count) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
- return 1;
- } else
- return 0;
+
+ if (q->u.out.use_cq) {
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ qdio_check_pending(q, QDIO_BUFNR(start + i));
+ }
+ }
+
+ return count;
}
-static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
+ unsigned long aob)
{
int retries = 0, cc;
unsigned int busy_bit;
@@ -817,7 +734,7 @@
retry:
qperf_inc(q, siga_write);
- cc = qdio_siga_output(q, &busy_bit, aob);
+ cc = qdio_siga_output(q, count, &busy_bit, aob);
switch (cc) {
case 0:
break;
@@ -849,15 +766,21 @@
static void __qdio_outbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_outbound);
WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
- if (qdio_outbound_q_moved(q))
- qdio_kick_handler(q);
+ count = qdio_outbound_q_moved(q, start);
+ if (count) {
+ q->first_to_check = add_buf(start, count);
+ qdio_kick_handler(q, start, count);
+ }
- if (queue_type(q) == QDIO_ZFCP_QFMT)
- if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
- goto sched;
+ if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
+ !qdio_outbound_q_done(q))
+ goto sched;
if (q->u.out.pci_out_enabled)
return;
@@ -893,57 +816,30 @@
qdio_tasklet_schedule(q);
}
-static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
+static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
{
struct qdio_q *out;
int i;
- if (!pci_out_supported(q))
+ if (!pci_out_supported(irq) || !irq->scan_threshold)
return;
- for_each_output_queue(q->irq_ptr, out, i)
+ for_each_output_queue(irq, out, i)
if (!qdio_outbound_q_done(out))
qdio_tasklet_schedule(out);
-}
-
-static void __tiqdio_inbound_processing(struct qdio_q *q)
-{
- qperf_inc(q, tasklet_inbound);
- if (need_siga_sync(q) && need_siga_sync_after_ai(q))
- qdio_sync_queues(q);
-
- /*
- * The interrupt could be caused by a PCI request. Check the
- * PCI capable outbound queues.
- */
- qdio_check_outbound_after_thinint(q);
-
- if (!qdio_inbound_q_moved(q))
- return;
-
- qdio_kick_handler(q);
-
- if (!qdio_inbound_q_done(q)) {
- qperf_inc(q, tasklet_inbound_resched);
- if (!qdio_tasklet_schedule(q))
- return;
- }
-
- qdio_stop_polling(q);
- /*
- * We need to check again to not lose initiative after
- * resetting the ACK state.
- */
- if (!qdio_inbound_q_done(q)) {
- qperf_inc(q, tasklet_inbound_resched2);
- qdio_tasklet_schedule(q);
- }
}
void tiqdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
- __tiqdio_inbound_processing(q);
+
+ if (need_siga_sync(q) && need_siga_sync_after_ai(q))
+ qdio_sync_queues(q);
+
+ /* The interrupt could be caused by a PCI request: */
+ qdio_check_outbound_pci_queues(q->irq_ptr);
+
+ __qdio_inbound_processing(q);
}
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
@@ -973,22 +869,17 @@
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
- for_each_input_queue(irq_ptr, q, i) {
- if (q->u.in.queue_start_poll) {
- /* skip if polling is enabled or already in work */
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state)) {
- qperf_inc(q, int_discarded);
- continue;
- }
- q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
- q->irq_ptr->int_parm);
- } else {
+ if (irq_ptr->irq_poll) {
+ if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
+ irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
+ else
+ QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
+ } else {
+ for_each_input_queue(irq_ptr, q, i)
tasklet_schedule(&q->tasklet);
- }
}
- if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
+ if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
return;
for_each_output_queue(irq_ptr, q, i) {
@@ -1000,12 +891,11 @@
}
}
-static void qdio_handle_activate_check(struct ccw_device *cdev,
- unsigned long intparm, int cstat, int dstat)
+static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
+ unsigned long intparm, int cstat,
+ int dstat)
{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
- int count;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
DBF_ERROR("intp :%lx", intparm);
@@ -1020,9 +910,8 @@
goto no_handler;
}
- count = sub_buf(q->first_to_check, q->first_to_kick);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
- q->nr, q->first_to_kick, count, irq_ptr->int_parm);
+ q->nr, q->first_to_check, 0, irq_ptr->int_parm);
no_handler:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
/*
@@ -1032,11 +921,9 @@
lgr_info_log();
}
-static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
+static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
int dstat)
{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
-
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
if (cstat)
@@ -1083,7 +970,7 @@
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
- qdio_establish_handle_irq(cdev, cstat, dstat);
+ qdio_establish_handle_irq(irq_ptr, cstat, dstat);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -1095,7 +982,7 @@
return;
}
if (cstat || dstat)
- qdio_handle_activate_check(cdev, intparm, cstat,
+ qdio_handle_activate_check(irq_ptr, intparm, cstat,
dstat);
break;
case QDIO_IRQ_STATE_STOPPED:
@@ -1128,9 +1015,8 @@
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
-static void qdio_shutdown_queues(struct ccw_device *cdev)
+static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
int i;
@@ -1141,6 +1027,33 @@
del_timer_sync(&q->u.out.timer);
tasklet_kill(&q->tasklet);
}
+}
+
+static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
+{
+ struct ccw_device *cdev = irq->cdev;
+ int rc;
+
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
+ if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+ else
+ /* default behaviour is halt */
+ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ if (rc) {
+ DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
+ DBF_ERROR("rc:%4d", rc);
+ return rc;
+ }
+
+ wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq->state == QDIO_IRQ_STATE_INACTIVE ||
+ irq->state == QDIO_IRQ_STATE_ERR,
+ 10 * HZ);
+
+ return 0;
}
/**
@@ -1177,41 +1090,13 @@
*/
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
- tiqdio_remove_input_queues(irq_ptr);
- qdio_shutdown_queues(cdev);
+ tiqdio_remove_device(irq_ptr);
+ qdio_shutdown_queues(irq_ptr);
qdio_shutdown_debug_entries(irq_ptr);
- /* cleanup subchannel */
- spin_lock_irq(get_ccwdev_lock(cdev));
-
- if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
- rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
- else
- /* default behaviour is halt */
- rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
- if (rc) {
- DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
- DBF_ERROR("rc:%4d", rc);
- goto no_cleanup;
- }
-
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
- spin_unlock_irq(get_ccwdev_lock(cdev));
- wait_event_interruptible_timeout(cdev->private->wait_q,
- irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
- irq_ptr->state == QDIO_IRQ_STATE_ERR,
- 10 * HZ);
- spin_lock_irq(get_ccwdev_lock(cdev));
-
-no_cleanup:
+ rc = qdio_cancel_ccw(irq_ptr, how);
qdio_shutdown_thinint(irq_ptr);
-
- /* restore interrupt handler */
- if ((void *)cdev->handler == (void *)qdio_int_handler) {
- cdev->handler = irq_ptr->orig_handler;
- cdev->private->intparm = 0;
- }
- spin_unlock_irq(get_ccwdev_lock(cdev));
+ qdio_shutdown_irq(irq_ptr);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1242,43 +1127,47 @@
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_release_memory(irq_ptr);
+ qdio_free_async_data(irq_ptr);
+ qdio_free_queues(irq_ptr);
+ free_page((unsigned long) irq_ptr->qdr);
+ free_page(irq_ptr->chsc_page);
+ free_page((unsigned long) irq_ptr);
return 0;
}
EXPORT_SYMBOL_GPL(qdio_free);
/**
* qdio_allocate - allocate qdio queues and associated data
- * @init_data: initialization data
+ * @cdev: associated ccw device
+ * @no_input_qs: allocate this number of Input Queues
+ * @no_output_qs: allocate this number of Output Queues
*/
-int qdio_allocate(struct qdio_initialize *init_data)
+int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
+ unsigned int no_output_qs)
{
struct subchannel_id schid;
struct qdio_irq *irq_ptr;
+ int rc = -ENOMEM;
- ccw_device_get_schid(init_data->cdev, &schid);
+ ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qallocate:%4x", schid.sch_no);
- if ((init_data->no_input_qs && !init_data->input_handler) ||
- (init_data->no_output_qs && !init_data->output_handler))
- return -EINVAL;
-
- if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
- (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
- return -EINVAL;
-
- if ((!init_data->input_sbal_addr_array) ||
- (!init_data->output_sbal_addr_array))
+ if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
+ no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
return -EINVAL;
/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr)
- goto out_err;
+ return -ENOMEM;
+ irq_ptr->cdev = cdev;
mutex_init(&irq_ptr->setup_mutex);
- if (qdio_allocate_dbf(init_data, irq_ptr))
- goto out_rel;
+ if (qdio_allocate_dbf(irq_ptr))
+ goto err_dbf;
+
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
+ no_output_qs);
/*
* Allocate a page for the chsc calls in qdio_establish.
@@ -1288,24 +1177,30 @@
*/
irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
if (!irq_ptr->chsc_page)
- goto out_rel;
+ goto err_chsc;
/* qdr is used in ccw1.cda which is u32 */
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
- goto out_rel;
+ goto err_qdr;
- if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
- init_data->no_output_qs))
- goto out_rel;
+ rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
+ if (rc)
+ goto err_queues;
- init_data->cdev->private->qdio_data = irq_ptr;
+ INIT_LIST_HEAD(&irq_ptr->entry);
+ cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
-out_rel:
- qdio_release_memory(irq_ptr);
-out_err:
- return -ENOMEM;
+
+err_queues:
+ free_page((unsigned long) irq_ptr->qdr);
+err_qdr:
+ free_page(irq_ptr->chsc_page);
+err_chsc:
+err_dbf:
+ free_page((unsigned long) irq_ptr);
+ return rc;
}
EXPORT_SYMBOL_GPL(qdio_allocate);
@@ -1319,6 +1214,8 @@
for_each_output_queue(irq_ptr, q, i) {
if (use_cq) {
+ if (multicast_outbound(q))
+ continue;
if (qdio_enable_async_operation(&q->u.out) < 0) {
use_cq = 0;
continue;
@@ -1329,33 +1226,62 @@
DBF_EVENT("use_cq:%d", use_cq);
}
+static void qdio_trace_init_data(struct qdio_irq *irq,
+ struct qdio_initialize *data)
+{
+ DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
+ DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
+ DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
+ DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
+ data->no_output_qs);
+ DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
+ DBF_ERR);
+}
+
/**
* qdio_establish - establish queues on a qdio subchannel
+ * @cdev: associated ccw device
* @init_data: initialization data
*/
-int qdio_establish(struct qdio_initialize *init_data)
+int qdio_establish(struct ccw_device *cdev,
+ struct qdio_initialize *init_data)
{
- struct ccw_device *cdev = init_data->cdev;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- struct qdio_irq *irq_ptr;
+ long timeout;
int rc;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qestablish:%4x", schid.sch_no);
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
+ if (init_data->no_input_qs > irq_ptr->max_input_qs ||
+ init_data->no_output_qs > irq_ptr->max_output_qs)
+ return -EINVAL;
+
+ if ((init_data->no_input_qs && !init_data->input_handler) ||
+ (init_data->no_output_qs && !init_data->output_handler))
+ return -EINVAL;
+
+ if (!init_data->input_sbal_addr_array ||
+ !init_data->output_sbal_addr_array)
+ return -EINVAL;
+
mutex_lock(&irq_ptr->setup_mutex);
- qdio_setup_irq(init_data);
+ qdio_trace_init_data(irq_ptr, init_data);
+ qdio_setup_irq(irq_ptr, init_data);
rc = qdio_establish_thinint(irq_ptr);
- if (rc) {
- mutex_unlock(&irq_ptr->setup_mutex);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
- return rc;
- }
+ if (rc)
+ goto err_thinint;
/* establish q */
irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
@@ -1371,14 +1297,16 @@
if (rc) {
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
- mutex_unlock(&irq_ptr->setup_mutex);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
- return rc;
+ goto err_ccw_start;
}
- wait_event_interruptible_timeout(cdev->private->wait_q,
- irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
- irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+ timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+ irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+ if (timeout <= 0) {
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+ goto err_ccw_timeout;
+ }
if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1394,9 +1322,19 @@
qdio_init_buf_states(irq_ptr);
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_print_subchannel_info(irq_ptr, cdev);
- qdio_setup_debug_entries(irq_ptr, cdev);
+ qdio_print_subchannel_info(irq_ptr);
+ qdio_setup_debug_entries(irq_ptr);
return 0;
+
+err_ccw_timeout:
+ qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
+err_ccw_start:
+ qdio_shutdown_thinint(irq_ptr);
+err_thinint:
+ qdio_shutdown_irq(irq_ptr);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ mutex_unlock(&irq_ptr->setup_mutex);
+ return rc;
}
EXPORT_SYMBOL_GPL(qdio_establish);
@@ -1406,14 +1344,13 @@
*/
int qdio_activate(struct ccw_device *cdev)
{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- struct qdio_irq *irq_ptr;
int rc;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qactivate:%4x", schid.sch_no);
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
@@ -1441,7 +1378,7 @@
}
if (is_thinint_irq(irq_ptr))
- tiqdio_add_input_queues(irq_ptr);
+ tiqdio_add_device(irq_ptr);
/* wait for subchannel to become active */
msleep(5);
@@ -1461,25 +1398,6 @@
}
EXPORT_SYMBOL_GPL(qdio_activate);
-static inline int buf_in_between(int bufnr, int start, int count)
-{
- int end = add_buf(start, count);
-
- if (end > start) {
- if (bufnr >= start && bufnr < end)
- return 1;
- else
- return 0;
- }
-
- /* wrap-around case */
- if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
- (bufnr < end))
- return 1;
- else
- return 0;
-}
-
/**
* handle_inbound - reset processed input buffers
* @q: queue containing the buffers
@@ -1490,38 +1408,18 @@
static int handle_inbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
- int diff;
+ int overlap;
qperf_inc(q, inbound_call);
- if (!q->u.in.polling)
- goto set;
-
- /* protect against stop polling setting an ACK for an emptied slsb */
- if (count == QDIO_MAX_BUFFERS_PER_Q) {
- /* overwriting everything, just delete polling status */
- q->u.in.polling = 0;
- q->u.in.ack_count = 0;
- goto set;
- } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
- if (is_qebsm(q)) {
- /* partial overwrite, just update ack_start */
- diff = add_buf(bufnr, count);
- diff = sub_buf(diff, q->u.in.ack_start);
- q->u.in.ack_count -= diff;
- if (q->u.in.ack_count <= 0) {
- q->u.in.polling = 0;
- q->u.in.ack_count = 0;
- goto set;
- }
- q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
- }
- else
- /* the only ACK will be deleted, so stop polling */
- q->u.in.polling = 0;
+ /* If any processed SBALs are returned to HW, adjust our tracking: */
+ overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
+ q->u.in.batch_count);
+ if (overlap > 0) {
+ q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
+ q->u.in.batch_count -= overlap;
}
-set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
atomic_add(count, &q->nr_buf_used);
@@ -1539,8 +1437,9 @@
* @count: how many buffers are filled
*/
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
- int bufnr, int count)
+ unsigned int bufnr, unsigned int count)
{
+ const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
unsigned char state = 0;
int used, rc = 0;
@@ -1561,12 +1460,10 @@
if (queue_type(q) == QDIO_IQDIO_QFMT) {
unsigned long phys_aob = 0;
- /* One SIGA-W per buffer required for unicast HSI */
- WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
+ if (q->u.out.use_cq && count == 1)
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
- phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
-
- rc = qdio_kick_outbound_q(q, phys_aob);
+ rc = qdio_kick_outbound_q(q, count, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
@@ -1575,11 +1472,15 @@
/* The previous buffer is not processed yet, tack on. */
qperf_inc(q, fast_requeue);
} else {
- rc = qdio_kick_outbound_q(q, 0);
+ rc = qdio_kick_outbound_q(q, count, 0);
}
+ /* Let drivers implement their own completion scanning: */
+ if (!scan_threshold)
+ return rc;
+
/* in case of SIGA errors we must process the error immediately */
- if (used >= q->u.out.scan_threshold || rc)
+ if (used >= scan_threshold || rc)
qdio_tasklet_schedule(q);
else
/* free the SBALs in case of no further traffic */
@@ -1600,12 +1501,11 @@
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
int q_nr, unsigned int bufnr, unsigned int count)
{
- struct qdio_irq *irq_ptr;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
@@ -1627,26 +1527,26 @@
EXPORT_SYMBOL_GPL(do_QDIO);
/**
- * qdio_start_irq - process input buffers
+ * qdio_start_irq - enable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
*
* Return codes
* 0 - success
* 1 - irqs not started since new data is available
*/
-int qdio_start_irq(struct ccw_device *cdev, int nr)
+int qdio_start_irq(struct ccw_device *cdev)
{
struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ unsigned int i;
if (!irq_ptr)
return -ENODEV;
- q = irq_ptr->input_qs[nr];
- clear_nonshared_ind(irq_ptr);
- qdio_stop_polling(q);
- clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
+ for_each_input_queue(irq_ptr, q, i)
+ qdio_stop_polling(q);
+
+ clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
/*
* We need to check again to not lose initiative after
@@ -1654,19 +1554,60 @@
*/
if (test_nonshared_ind(irq_ptr))
goto rescan;
- if (!qdio_inbound_q_done(q))
- goto rescan;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ if (!qdio_inbound_q_done(q, q->first_to_check))
+ goto rescan;
+ }
+
return 0;
rescan:
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state))
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
return 0;
else
return 1;
}
EXPORT_SYMBOL(qdio_start_irq);
+
+static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
+ unsigned int *error)
+{
+ unsigned int start = q->first_to_check;
+ int count;
+
+ count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
+ qdio_outbound_q_moved(q, start);
+ if (count == 0)
+ return 0;
+
+ *bufnr = start;
+ *error = q->qdio_error;
+
+ /* for the next time */
+ q->first_to_check = add_buf(start, count);
+ q->qdio_error = 0;
+
+ return count;
+}
+
+int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
+ unsigned int *bufnr, unsigned int *error)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct qdio_q *q;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
+
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
+
+ return __qdio_inspect_queue(q, bufnr, error);
+}
+EXPORT_SYMBOL_GPL(qdio_inspect_queue);
/**
* qdio_get_next_buffers - process input buffers
@@ -1684,7 +1625,6 @@
int *error)
{
struct qdio_q *q;
- int start, end;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
@@ -1698,141 +1638,37 @@
if (need_siga_sync(q))
qdio_sync_queues(q);
- /* check the PCI capable outbound queues. */
- qdio_check_outbound_after_thinint(q);
-
- if (!qdio_inbound_q_moved(q))
- return 0;
+ qdio_check_outbound_pci_queues(irq_ptr);
/* Note: upper-layer MUST stop processing immediately here ... */
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return -EIO;
- start = q->first_to_kick;
- end = q->first_to_check;
- *bufnr = start;
- *error = q->qdio_error;
-
- /* for the next time */
- q->first_to_kick = end;
- q->qdio_error = 0;
- return sub_buf(end, start);
+ return __qdio_inspect_queue(q, bufnr, error);
}
EXPORT_SYMBOL(qdio_get_next_buffers);
/**
* qdio_stop_irq - disable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
*
* Return codes
* 0 - interrupts were already disabled
* 1 - interrupts successfully disabled
*/
-int qdio_stop_irq(struct ccw_device *cdev, int nr)
+int qdio_stop_irq(struct ccw_device *cdev)
{
- struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
- q = irq_ptr->input_qs[nr];
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state))
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
return 0;
else
return 1;
}
EXPORT_SYMBOL(qdio_stop_irq);
-
-/**
- * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
- * @schid: Subchannel ID.
- * @cnc: Boolean Change-Notification Control
- * @response: Response code will be stored at this address
- * @cb: Callback function will be executed for each element
- * of the address list
- * @priv: Pointer to pass to the callback function.
- *
- * Performs "Store-network-bridging-information list" operation and calls
- * the callback function for every entry in the list. If "change-
- * notification-control" is set, further changes in the address list
- * will be reported via the IPA command.
- */
-int qdio_pnso_brinfo(struct subchannel_id schid,
- int cnc, u16 *response,
- void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
- void *entry),
- void *priv)
-{
- struct chsc_pnso_area *rr;
- int rc;
- u32 prev_instance = 0;
- int isfirstblock = 1;
- int i, size, elems;
-
- rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
- if (rr == NULL)
- return -ENOMEM;
- do {
- /* on the first iteration, naihdr.resume_token will be zero */
- rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
- if (rc != 0 && rc != -EBUSY)
- goto out;
- if (rr->response.code != 1) {
- rc = -EIO;
- continue;
- } else
- rc = 0;
-
- if (cb == NULL)
- continue;
-
- size = rr->naihdr.naids;
- elems = (rr->response.length -
- sizeof(struct chsc_header) -
- sizeof(struct chsc_brinfo_naihdr)) /
- size;
-
- if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
- /* Inform the caller that they need to scrap */
- /* the data that was already reported via cb */
- rc = -EAGAIN;
- break;
- }
- isfirstblock = 0;
- prev_instance = rr->naihdr.instance;
- for (i = 0; i < elems; i++)
- switch (size) {
- case sizeof(struct qdio_brinfo_entry_l3_ipv6):
- (*cb)(priv, l3_ipv6_addr,
- &rr->entries.l3_ipv6[i]);
- break;
- case sizeof(struct qdio_brinfo_entry_l3_ipv4):
- (*cb)(priv, l3_ipv4_addr,
- &rr->entries.l3_ipv4[i]);
- break;
- case sizeof(struct qdio_brinfo_entry_l2):
- (*cb)(priv, l2_addr_lnid,
- &rr->entries.l2[i]);
- break;
- default:
- WARN_ON_ONCE(1);
- rc = -EIO;
- goto out;
- }
- } while (rr->response.code == 0x0107 || /* channel busy */
- (rr->response.code == 1 && /* list stored */
- /* resume token is non-zero => list incomplete */
- (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
- (*response) = rr->response.code;
-
-out:
- free_page((unsigned long)rr);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
static int __init init_QDIO(void)
{
@@ -1844,16 +1680,11 @@
rc = qdio_setup_init();
if (rc)
goto out_debug;
- rc = tiqdio_allocate_memory();
+ rc = qdio_thinint_init();
if (rc)
goto out_cache;
- rc = tiqdio_register_thinints();
- if (rc)
- goto out_ti;
return 0;
-out_ti:
- tiqdio_free_memory();
out_cache:
qdio_setup_exit();
out_debug:
@@ -1863,8 +1694,7 @@
static void __exit exit_QDIO(void)
{
- tiqdio_unregister_thinints();
- tiqdio_free_memory();
+ qdio_thinint_exit();
qdio_setup_exit();
qdio_debug_exit();
}
--
Gitblit v1.6.2