hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/s390/net/qeth_core_main.c
....@@ -16,23 +16,27 @@
1616 #include <linux/string.h>
1717 #include <linux/errno.h>
1818 #include <linux/kernel.h>
19
+#include <linux/log2.h>
20
+#include <linux/io.h>
1921 #include <linux/ip.h>
2022 #include <linux/tcp.h>
2123 #include <linux/mii.h>
24
+#include <linux/mm.h>
2225 #include <linux/kthread.h>
2326 #include <linux/slab.h>
2427 #include <linux/if_vlan.h>
2528 #include <linux/netdevice.h>
2629 #include <linux/netdev_features.h>
30
+#include <linux/rcutree.h>
2731 #include <linux/skbuff.h>
2832 #include <linux/vmalloc.h>
2933
3034 #include <net/iucv/af_iucv.h>
3135 #include <net/dsfield.h>
36
+#include <net/sock.h>
3237
3338 #include <asm/ebcdic.h>
3439 #include <asm/chpid.h>
35
-#include <asm/io.h>
3640 #include <asm/sysinfo.h>
3741 #include <asm/diag.h>
3842 #include <asm/cio.h>
....@@ -53,37 +57,22 @@
5357 };
5458 EXPORT_SYMBOL_GPL(qeth_dbf);
5559
56
-struct qeth_card_list_struct qeth_core_card_list;
57
-EXPORT_SYMBOL_GPL(qeth_core_card_list);
5860 struct kmem_cache *qeth_core_header_cache;
5961 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
6062 static struct kmem_cache *qeth_qdio_outbuf_cache;
6163
6264 static struct device *qeth_core_root_dev;
65
+static struct dentry *qeth_debugfs_root;
6366 static struct lock_class_key qdio_out_skb_queue_key;
64
-static struct mutex qeth_mod_mutex;
6567
66
-static void qeth_send_control_data_cb(struct qeth_channel *,
67
- struct qeth_cmd_buffer *);
68
-static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
69
-static void qeth_free_buffer_pool(struct qeth_card *);
68
+static void qeth_issue_next_read_cb(struct qeth_card *card,
69
+ struct qeth_cmd_buffer *iob,
70
+ unsigned int data_length);
7071 static int qeth_qdio_establish(struct qeth_card *);
71
-static void qeth_free_qdio_buffers(struct qeth_card *);
72
+static void qeth_free_qdio_queues(struct qeth_card *card);
7273 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
7374 struct qeth_qdio_out_buffer *buf,
7475 enum iucv_tx_notify notification);
75
-static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
76
-static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
77
-
78
-struct workqueue_struct *qeth_wq;
79
-EXPORT_SYMBOL_GPL(qeth_wq);
80
-
81
-int qeth_card_hw_is_reachable(struct qeth_card *card)
82
-{
83
- return (card->state == CARD_STATE_SOFTSETUP) ||
84
- (card->state == CARD_STATE_UP);
85
-}
86
-EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
8776
8877 static void qeth_close_dev_handler(struct work_struct *work)
8978 {
....@@ -91,22 +80,12 @@
9180
9281 card = container_of(work, struct qeth_card, close_dev_work);
9382 QETH_CARD_TEXT(card, 2, "cldevhdl");
94
- rtnl_lock();
95
- dev_close(card->dev);
96
- rtnl_unlock();
9783 ccwgroup_set_offline(card->gdev);
9884 }
9985
100
-void qeth_close_dev(struct qeth_card *card)
101
-{
102
- QETH_CARD_TEXT(card, 2, "cldevsubm");
103
- queue_work(qeth_wq, &card->close_dev_work);
104
-}
105
-EXPORT_SYMBOL_GPL(qeth_close_dev);
106
-
10786 static const char *qeth_get_cardname(struct qeth_card *card)
10887 {
109
- if (card->info.guestlan) {
88
+ if (IS_VM_NIC(card)) {
11089 switch (card->info.type) {
11190 case QETH_CARD_TYPE_OSD:
11291 return " Virtual NIC QDIO";
....@@ -141,7 +120,7 @@
141120 /* max length to be returned: 14 */
142121 const char *qeth_get_cardname_short(struct qeth_card *card)
143122 {
144
- if (card->info.guestlan) {
123
+ if (IS_VM_NIC(card)) {
145124 switch (card->info.type) {
146125 case QETH_CARD_TYPE_OSD:
147126 return "Virt.NIC QDIO";
....@@ -166,6 +145,8 @@
166145 return "OSD_1000";
167146 case QETH_LINK_TYPE_10GBIT_ETH:
168147 return "OSD_10GIG";
148
+ case QETH_LINK_TYPE_25GBIT_ETH:
149
+ return "OSD_25GIG";
169150 case QETH_LINK_TYPE_LANE_ETH100:
170151 return "OSD_FE_LANE";
171152 case QETH_LINK_TYPE_LANE_TR:
....@@ -190,23 +171,6 @@
190171 }
191172 }
192173 return "n/a";
193
-}
194
-
195
-void qeth_set_recovery_task(struct qeth_card *card)
196
-{
197
- card->recovery_task = current;
198
-}
199
-EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
200
-
201
-void qeth_clear_recovery_task(struct qeth_card *card)
202
-{
203
- card->recovery_task = NULL;
204
-}
205
-EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
206
-
207
-static bool qeth_is_recovery_task(const struct qeth_card *card)
208
-{
209
- return card->recovery_task == current;
210174 }
211175
212176 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
....@@ -235,74 +199,139 @@
235199 }
236200 EXPORT_SYMBOL_GPL(qeth_threads_running);
237201
238
-int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
239
-{
240
- if (qeth_is_recovery_task(card))
241
- return 0;
242
- return wait_event_interruptible(card->wait_q,
243
- qeth_threads_running(card, threads) == 0);
244
-}
245
-EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
246
-
247
-void qeth_clear_working_pool_list(struct qeth_card *card)
202
+static void qeth_clear_working_pool_list(struct qeth_card *card)
248203 {
249204 struct qeth_buffer_pool_entry *pool_entry, *tmp;
205
+ struct qeth_qdio_q *queue = card->qdio.in_q;
206
+ unsigned int i;
250207
251208 QETH_CARD_TEXT(card, 5, "clwrklst");
252209 list_for_each_entry_safe(pool_entry, tmp,
253
- &card->qdio.in_buf_pool.entry_list, list){
254
- list_del(&pool_entry->list);
210
+ &card->qdio.in_buf_pool.entry_list, list)
211
+ list_del(&pool_entry->list);
212
+
213
+ if (!queue)
214
+ return;
215
+
216
+ for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
217
+ queue->bufs[i].pool_entry = NULL;
218
+}
219
+
220
+static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
221
+{
222
+ unsigned int i;
223
+
224
+ for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
225
+ if (entry->elements[i])
226
+ __free_page(entry->elements[i]);
227
+ }
228
+
229
+ kfree(entry);
230
+}
231
+
232
+static void qeth_free_buffer_pool(struct qeth_card *card)
233
+{
234
+ struct qeth_buffer_pool_entry *entry, *tmp;
235
+
236
+ list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
237
+ init_list) {
238
+ list_del(&entry->init_list);
239
+ qeth_free_pool_entry(entry);
255240 }
256241 }
257
-EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
242
+
243
+static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
244
+{
245
+ struct qeth_buffer_pool_entry *entry;
246
+ unsigned int i;
247
+
248
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
249
+ if (!entry)
250
+ return NULL;
251
+
252
+ for (i = 0; i < pages; i++) {
253
+ entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
254
+
255
+ if (!entry->elements[i]) {
256
+ qeth_free_pool_entry(entry);
257
+ return NULL;
258
+ }
259
+ }
260
+
261
+ return entry;
262
+}
258263
259264 static int qeth_alloc_buffer_pool(struct qeth_card *card)
260265 {
261
- struct qeth_buffer_pool_entry *pool_entry;
262
- void *ptr;
263
- int i, j;
266
+ unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
267
+ unsigned int i;
264268
265269 QETH_CARD_TEXT(card, 5, "alocpool");
266270 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
267
- pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
268
- if (!pool_entry) {
271
+ struct qeth_buffer_pool_entry *entry;
272
+
273
+ entry = qeth_alloc_pool_entry(buf_elements);
274
+ if (!entry) {
269275 qeth_free_buffer_pool(card);
270276 return -ENOMEM;
271277 }
272
- for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
273
- ptr = (void *) __get_free_page(GFP_KERNEL);
274
- if (!ptr) {
275
- while (j > 0)
276
- free_page((unsigned long)
277
- pool_entry->elements[--j]);
278
- kfree(pool_entry);
279
- qeth_free_buffer_pool(card);
280
- return -ENOMEM;
281
- }
282
- pool_entry->elements[j] = ptr;
283
- }
284
- list_add(&pool_entry->init_list,
285
- &card->qdio.init_pool.entry_list);
278
+
279
+ list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
286280 }
287281 return 0;
288282 }
289283
290
-int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
284
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
291285 {
286
+ unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
287
+ struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
288
+ struct qeth_buffer_pool_entry *entry, *tmp;
289
+ int delta = count - pool->buf_count;
290
+ LIST_HEAD(entries);
291
+
292292 QETH_CARD_TEXT(card, 2, "realcbp");
293293
294
- if ((card->state != CARD_STATE_DOWN) &&
295
- (card->state != CARD_STATE_RECOVER))
296
- return -EPERM;
294
+ /* Defer until queue is allocated: */
295
+ if (!card->qdio.in_q)
296
+ goto out;
297297
298
- /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
299
- qeth_clear_working_pool_list(card);
300
- qeth_free_buffer_pool(card);
301
- card->qdio.in_buf_pool.buf_count = bufcnt;
302
- card->qdio.init_pool.buf_count = bufcnt;
303
- return qeth_alloc_buffer_pool(card);
298
+ /* Remove entries from the pool: */
299
+ while (delta < 0) {
300
+ entry = list_first_entry(&pool->entry_list,
301
+ struct qeth_buffer_pool_entry,
302
+ init_list);
303
+ list_del(&entry->init_list);
304
+ qeth_free_pool_entry(entry);
305
+
306
+ delta++;
307
+ }
308
+
309
+ /* Allocate additional entries: */
310
+ while (delta > 0) {
311
+ entry = qeth_alloc_pool_entry(buf_elements);
312
+ if (!entry) {
313
+ list_for_each_entry_safe(entry, tmp, &entries,
314
+ init_list) {
315
+ list_del(&entry->init_list);
316
+ qeth_free_pool_entry(entry);
317
+ }
318
+
319
+ return -ENOMEM;
320
+ }
321
+
322
+ list_add(&entry->init_list, &entries);
323
+
324
+ delta--;
325
+ }
326
+
327
+ list_splice(&entries, &pool->entry_list);
328
+
329
+out:
330
+ card->qdio.in_buf_pool.buf_count = count;
331
+ pool->buf_count = count;
332
+ return 0;
304333 }
305
-EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
334
+EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
306335
307336 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
308337 {
....@@ -338,7 +367,7 @@
338367 int rc;
339368
340369 if (card->options.cq == QETH_CQ_ENABLED) {
341
- QETH_DBF_TEXT(SETUP, 2, "cqinit");
370
+ QETH_CARD_TEXT(card, 2, "cqinit");
342371 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
343372 QDIO_MAX_BUFFERS_PER_Q);
344373 card->qdio.c_q->next_buf_to_init = 127;
....@@ -346,7 +375,7 @@
346375 card->qdio.no_in_queues - 1, 0,
347376 127);
348377 if (rc) {
349
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
378
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
350379 goto out;
351380 }
352381 }
....@@ -363,7 +392,7 @@
363392 int i;
364393 struct qdio_outbuf_state *outbuf_states;
365394
366
- QETH_DBF_TEXT(SETUP, 2, "cqon");
395
+ QETH_CARD_TEXT(card, 2, "cqon");
367396 card->qdio.c_q = qeth_alloc_qdio_queue();
368397 if (!card->qdio.c_q) {
369398 rc = -1;
....@@ -385,11 +414,11 @@
385414 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
386415 }
387416 } else {
388
- QETH_DBF_TEXT(SETUP, 2, "nocq");
417
+ QETH_CARD_TEXT(card, 2, "nocq");
389418 card->qdio.c_q = NULL;
390419 card->qdio.no_in_queues = 1;
391420 }
392
- QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
421
+ QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
393422 rc = 0;
394423 out:
395424 return rc;
....@@ -437,48 +466,14 @@
437466 return n;
438467 }
439468
440
-static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
441
- int forced_cleanup)
442
-{
443
- if (q->card->options.cq != QETH_CQ_ENABLED)
444
- return;
445
-
446
- if (q->bufs[bidx]->next_pending != NULL) {
447
- struct qeth_qdio_out_buffer *head = q->bufs[bidx];
448
- struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
449
-
450
- while (c) {
451
- if (forced_cleanup ||
452
- atomic_read(&c->state) ==
453
- QETH_QDIO_BUF_HANDLED_DELAYED) {
454
- struct qeth_qdio_out_buffer *f = c;
455
- QETH_CARD_TEXT(f->q->card, 5, "fp");
456
- QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
457
- /* release here to avoid interleaving between
458
- outbound tasklet and inbound tasklet
459
- regarding notifications and lifecycle */
460
- qeth_release_skbs(c);
461
-
462
- c = f->next_pending;
463
- WARN_ON_ONCE(head->next_pending != f);
464
- head->next_pending = c;
465
- kmem_cache_free(qeth_qdio_outbuf_cache, f);
466
- } else {
467
- head = c;
468
- c = c->next_pending;
469
- }
470
-
471
- }
472
- }
473
-}
474
-
475
-
476469 static void qeth_qdio_handle_aob(struct qeth_card *card,
477470 unsigned long phys_aob_addr)
478471 {
472
+ enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
479473 struct qaob *aob;
480474 struct qeth_qdio_out_buffer *buffer;
481475 enum iucv_tx_notify notification;
476
+ struct qeth_qdio_out_q *queue;
482477 unsigned int i;
483478
484479 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
....@@ -487,82 +482,87 @@
487482 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
488483 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
489484
490
- if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
491
- QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
492
- notification = TX_NOTIFY_OK;
493
- } else {
494
- WARN_ON_ONCE(atomic_read(&buffer->state) !=
495
- QETH_QDIO_BUF_PENDING);
496
- atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
497
- notification = TX_NOTIFY_DELAYED_OK;
498
- }
499
-
500
- if (aob->aorc != 0) {
485
+ if (aob->aorc) {
501486 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
502
- notification = qeth_compute_cq_notification(aob->aorc, 1);
487
+ new_state = QETH_QDIO_BUF_QAOB_ERROR;
503488 }
504
- qeth_notify_skbs(buffer->q, buffer, notification);
505489
506
- /* Free dangling allocations. The attached skbs are handled by
507
- * qeth_cleanup_handled_pending().
508
- */
509
- for (i = 0;
510
- i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
511
- i++) {
512
- if (aob->sba[i] && buffer->is_header[i])
513
- kmem_cache_free(qeth_core_header_cache,
514
- (void *) aob->sba[i]);
490
+ switch (atomic_xchg(&buffer->state, new_state)) {
491
+ case QETH_QDIO_BUF_PRIMED:
492
+ /* Faster than TX completion code, let it handle the async
493
+ * completion for us.
494
+ */
495
+ break;
496
+ case QETH_QDIO_BUF_PENDING:
497
+ /* TX completion code is active and will handle the async
498
+ * completion for us.
499
+ */
500
+ break;
501
+ case QETH_QDIO_BUF_NEED_QAOB:
502
+ /* TX completion code is already finished. */
503
+ notification = qeth_compute_cq_notification(aob->aorc, 1);
504
+ qeth_notify_skbs(buffer->q, buffer, notification);
505
+
506
+ /* Free dangling allocations. The attached skbs are handled by
507
+ * qeth_tx_complete_pending_bufs().
508
+ */
509
+ for (i = 0;
510
+ i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
511
+ i++) {
512
+ void *data = phys_to_virt(aob->sba[i]);
513
+
514
+ if (data && buffer->is_header[i])
515
+ kmem_cache_free(qeth_core_header_cache, data);
516
+ }
517
+
518
+ queue = buffer->q;
519
+ atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY);
520
+ napi_schedule(&queue->napi);
521
+ break;
522
+ default:
523
+ WARN_ON_ONCE(1);
515524 }
516
- atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
517525
518526 qdio_release_aob(aob);
519527 }
520528
521
-static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
522
-{
523
- return card->options.cq == QETH_CQ_ENABLED &&
524
- card->qdio.c_q != NULL &&
525
- queue != 0 &&
526
- queue == card->qdio.no_in_queues - 1;
527
-}
528
-
529
-static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data)
529
+static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
530
+ void *data)
530531 {
531532 ccw->cmd_code = cmd_code;
532
- ccw->flags = CCW_FLAG_SLI;
533
+ ccw->flags = flags | CCW_FLAG_SLI;
533534 ccw->count = len;
534535 ccw->cda = (__u32) __pa(data);
535536 }
536537
537538 static int __qeth_issue_next_read(struct qeth_card *card)
538539 {
539
- struct qeth_channel *channel = &card->read;
540
- struct qeth_cmd_buffer *iob;
540
+ struct qeth_cmd_buffer *iob = card->read_cmd;
541
+ struct qeth_channel *channel = iob->channel;
542
+ struct ccw1 *ccw = __ccw_from_cmd(iob);
541543 int rc;
542544
543545 QETH_CARD_TEXT(card, 5, "issnxrd");
544546 if (channel->state != CH_STATE_UP)
545547 return -EIO;
546
- iob = qeth_get_buffer(channel);
547
- if (!iob) {
548
- dev_warn(&card->gdev->dev, "The qeth device driver "
549
- "failed to recover an error on the device\n");
550
- QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
551
- "available\n", dev_name(&card->gdev->dev));
552
- return -ENOMEM;
553
- }
554
- qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
548
+
549
+ memset(iob->data, 0, iob->length);
550
+ qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
551
+ iob->callback = qeth_issue_next_read_cb;
552
+ /* keep the cmd alive after completion: */
553
+ qeth_get_cmd(iob);
554
+
555555 QETH_CARD_TEXT(card, 6, "noirqpnd");
556
- rc = ccw_device_start(channel->ccwdev, channel->ccw,
557
- (addr_t) iob, 0, 0);
558
- if (rc) {
559
- QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
560
- "rc=%i\n", dev_name(&card->gdev->dev), rc);
561
- atomic_set(&channel->irq_pending, 0);
562
- qeth_release_buffer(channel, iob);
556
+ rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
557
+ if (!rc) {
558
+ channel->active_cmd = iob;
559
+ } else {
560
+ QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
561
+ rc, CARD_DEVID(card));
562
+ qeth_unlock_channel(card, channel);
563
+ qeth_put_cmd(iob);
563564 card->read_or_write_problem = 1;
564565 qeth_schedule_recovery(card);
565
- wake_up(&card->wait_q);
566566 }
567567 return rc;
568568 }
....@@ -578,340 +578,550 @@
578578 return ret;
579579 }
580580
581
-static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
581
+static void qeth_enqueue_cmd(struct qeth_card *card,
582
+ struct qeth_cmd_buffer *iob)
582583 {
583
- struct qeth_reply *reply;
584
+ spin_lock_irq(&card->lock);
585
+ list_add_tail(&iob->list, &card->cmd_waiter_list);
586
+ spin_unlock_irq(&card->lock);
587
+}
584588
585
- reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
586
- if (reply) {
587
- refcount_set(&reply->refcnt, 1);
588
- atomic_set(&reply->received, 0);
589
- reply->card = card;
589
+static void qeth_dequeue_cmd(struct qeth_card *card,
590
+ struct qeth_cmd_buffer *iob)
591
+{
592
+ spin_lock_irq(&card->lock);
593
+ list_del(&iob->list);
594
+ spin_unlock_irq(&card->lock);
595
+}
596
+
597
+void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
598
+{
599
+ iob->rc = reason;
600
+ complete(&iob->done);
601
+}
602
+EXPORT_SYMBOL_GPL(qeth_notify_cmd);
603
+
604
+static void qeth_flush_local_addrs4(struct qeth_card *card)
605
+{
606
+ struct qeth_local_addr *addr;
607
+ struct hlist_node *tmp;
608
+ unsigned int i;
609
+
610
+ spin_lock_irq(&card->local_addrs4_lock);
611
+ hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
612
+ hash_del_rcu(&addr->hnode);
613
+ kfree_rcu(addr, rcu);
590614 }
591
- return reply;
615
+ spin_unlock_irq(&card->local_addrs4_lock);
592616 }
593617
594
-static void qeth_get_reply(struct qeth_reply *reply)
618
+static void qeth_flush_local_addrs6(struct qeth_card *card)
595619 {
596
- refcount_inc(&reply->refcnt);
620
+ struct qeth_local_addr *addr;
621
+ struct hlist_node *tmp;
622
+ unsigned int i;
623
+
624
+ spin_lock_irq(&card->local_addrs6_lock);
625
+ hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
626
+ hash_del_rcu(&addr->hnode);
627
+ kfree_rcu(addr, rcu);
628
+ }
629
+ spin_unlock_irq(&card->local_addrs6_lock);
597630 }
598631
599
-static void qeth_put_reply(struct qeth_reply *reply)
632
+static void qeth_flush_local_addrs(struct qeth_card *card)
600633 {
601
- if (refcount_dec_and_test(&reply->refcnt))
602
- kfree(reply);
634
+ qeth_flush_local_addrs4(card);
635
+ qeth_flush_local_addrs6(card);
603636 }
637
+
638
+static void qeth_add_local_addrs4(struct qeth_card *card,
639
+ struct qeth_ipacmd_local_addrs4 *cmd)
640
+{
641
+ unsigned int i;
642
+
643
+ if (cmd->addr_length !=
644
+ sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
645
+ dev_err_ratelimited(&card->gdev->dev,
646
+ "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
647
+ cmd->addr_length);
648
+ return;
649
+ }
650
+
651
+ spin_lock(&card->local_addrs4_lock);
652
+ for (i = 0; i < cmd->count; i++) {
653
+ unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
654
+ struct qeth_local_addr *addr;
655
+ bool duplicate = false;
656
+
657
+ hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
658
+ if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
659
+ duplicate = true;
660
+ break;
661
+ }
662
+ }
663
+
664
+ if (duplicate)
665
+ continue;
666
+
667
+ addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
668
+ if (!addr) {
669
+ dev_err(&card->gdev->dev,
670
+ "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
671
+ &cmd->addrs[i].addr);
672
+ continue;
673
+ }
674
+
675
+ ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
676
+ hash_add_rcu(card->local_addrs4, &addr->hnode, key);
677
+ }
678
+ spin_unlock(&card->local_addrs4_lock);
679
+}
680
+
681
+static void qeth_add_local_addrs6(struct qeth_card *card,
682
+ struct qeth_ipacmd_local_addrs6 *cmd)
683
+{
684
+ unsigned int i;
685
+
686
+ if (cmd->addr_length !=
687
+ sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
688
+ dev_err_ratelimited(&card->gdev->dev,
689
+ "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
690
+ cmd->addr_length);
691
+ return;
692
+ }
693
+
694
+ spin_lock(&card->local_addrs6_lock);
695
+ for (i = 0; i < cmd->count; i++) {
696
+ u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
697
+ struct qeth_local_addr *addr;
698
+ bool duplicate = false;
699
+
700
+ hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
701
+ if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
702
+ duplicate = true;
703
+ break;
704
+ }
705
+ }
706
+
707
+ if (duplicate)
708
+ continue;
709
+
710
+ addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
711
+ if (!addr) {
712
+ dev_err(&card->gdev->dev,
713
+ "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
714
+ &cmd->addrs[i].addr);
715
+ continue;
716
+ }
717
+
718
+ addr->addr = cmd->addrs[i].addr;
719
+ hash_add_rcu(card->local_addrs6, &addr->hnode, key);
720
+ }
721
+ spin_unlock(&card->local_addrs6_lock);
722
+}
723
+
724
+static void qeth_del_local_addrs4(struct qeth_card *card,
725
+ struct qeth_ipacmd_local_addrs4 *cmd)
726
+{
727
+ unsigned int i;
728
+
729
+ if (cmd->addr_length !=
730
+ sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
731
+ dev_err_ratelimited(&card->gdev->dev,
732
+ "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
733
+ cmd->addr_length);
734
+ return;
735
+ }
736
+
737
+ spin_lock(&card->local_addrs4_lock);
738
+ for (i = 0; i < cmd->count; i++) {
739
+ struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
740
+ unsigned int key = ipv4_addr_hash(addr->addr);
741
+ struct qeth_local_addr *tmp;
742
+
743
+ hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
744
+ if (tmp->addr.s6_addr32[3] == addr->addr) {
745
+ hash_del_rcu(&tmp->hnode);
746
+ kfree_rcu(tmp, rcu);
747
+ break;
748
+ }
749
+ }
750
+ }
751
+ spin_unlock(&card->local_addrs4_lock);
752
+}
753
+
754
+static void qeth_del_local_addrs6(struct qeth_card *card,
755
+ struct qeth_ipacmd_local_addrs6 *cmd)
756
+{
757
+ unsigned int i;
758
+
759
+ if (cmd->addr_length !=
760
+ sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
761
+ dev_err_ratelimited(&card->gdev->dev,
762
+ "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
763
+ cmd->addr_length);
764
+ return;
765
+ }
766
+
767
+ spin_lock(&card->local_addrs6_lock);
768
+ for (i = 0; i < cmd->count; i++) {
769
+ struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
770
+ u32 key = ipv6_addr_hash(&addr->addr);
771
+ struct qeth_local_addr *tmp;
772
+
773
+ hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
774
+ if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
775
+ hash_del_rcu(&tmp->hnode);
776
+ kfree_rcu(tmp, rcu);
777
+ break;
778
+ }
779
+ }
780
+ }
781
+ spin_unlock(&card->local_addrs6_lock);
782
+}
783
+
784
+static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
785
+ struct sk_buff *skb)
786
+{
787
+ struct qeth_local_addr *tmp;
788
+ bool is_local = false;
789
+ unsigned int key;
790
+ __be32 next_hop;
791
+
792
+ if (hash_empty(card->local_addrs4))
793
+ return false;
794
+
795
+ rcu_read_lock();
796
+ next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
797
+ key = ipv4_addr_hash(next_hop);
798
+
799
+ hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
800
+ if (tmp->addr.s6_addr32[3] == next_hop) {
801
+ is_local = true;
802
+ break;
803
+ }
804
+ }
805
+ rcu_read_unlock();
806
+
807
+ return is_local;
808
+}
809
+
810
+static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
811
+ struct sk_buff *skb)
812
+{
813
+ struct qeth_local_addr *tmp;
814
+ struct in6_addr *next_hop;
815
+ bool is_local = false;
816
+ u32 key;
817
+
818
+ if (hash_empty(card->local_addrs6))
819
+ return false;
820
+
821
+ rcu_read_lock();
822
+ next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
823
+ key = ipv6_addr_hash(next_hop);
824
+
825
+ hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
826
+ if (ipv6_addr_equal(&tmp->addr, next_hop)) {
827
+ is_local = true;
828
+ break;
829
+ }
830
+ }
831
+ rcu_read_unlock();
832
+
833
+ return is_local;
834
+}
835
+
836
+static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
837
+{
838
+ struct qeth_card *card = m->private;
839
+ struct qeth_local_addr *tmp;
840
+ unsigned int i;
841
+
842
+ rcu_read_lock();
843
+ hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
844
+ seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
845
+ hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
846
+ seq_printf(m, "%pI6c\n", &tmp->addr);
847
+ rcu_read_unlock();
848
+
849
+ return 0;
850
+}
851
+
852
+DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
604853
605854 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
606855 struct qeth_card *card)
607856 {
608857 const char *ipa_name;
609858 int com = cmd->hdr.command;
859
+
610860 ipa_name = qeth_get_ipa_cmd_name(com);
861
+
611862 if (rc)
612
- QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
613
- "x%X \"%s\"\n",
614
- ipa_name, com, dev_name(&card->gdev->dev),
615
- QETH_CARD_IFNAME(card), rc,
616
- qeth_get_ipa_msg(rc));
863
+ QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
864
+ ipa_name, com, CARD_DEVID(card), rc,
865
+ qeth_get_ipa_msg(rc));
617866 else
618
- QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
619
- ipa_name, com, dev_name(&card->gdev->dev),
620
- QETH_CARD_IFNAME(card));
867
+ QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
868
+ ipa_name, com, CARD_DEVID(card));
621869 }
622870
623871 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
624
- struct qeth_cmd_buffer *iob)
872
+ struct qeth_ipa_cmd *cmd)
625873 {
626
- struct qeth_ipa_cmd *cmd = NULL;
627
-
628874 QETH_CARD_TEXT(card, 5, "chkipad");
629
- if (IS_IPA(iob->data)) {
630
- cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
631
- if (IS_IPA_REPLY(cmd)) {
632
- if (cmd->hdr.command != IPA_CMD_SETCCID &&
633
- cmd->hdr.command != IPA_CMD_DELCCID &&
634
- cmd->hdr.command != IPA_CMD_MODCCID &&
635
- cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
636
- qeth_issue_ipa_msg(cmd,
637
- cmd->hdr.return_code, card);
638
- return cmd;
639
- } else {
640
- switch (cmd->hdr.command) {
641
- case IPA_CMD_STOPLAN:
642
- if (cmd->hdr.return_code ==
643
- IPA_RC_VEPA_TO_VEB_TRANSITION) {
644
- dev_err(&card->gdev->dev,
645
- "Interface %s is down because the "
646
- "adjacent port is no longer in "
647
- "reflective relay mode\n",
648
- QETH_CARD_IFNAME(card));
649
- qeth_close_dev(card);
650
- } else {
651
- dev_warn(&card->gdev->dev,
652
- "The link for interface %s on CHPID"
653
- " 0x%X failed\n",
654
- QETH_CARD_IFNAME(card),
655
- card->info.chpid);
656
- qeth_issue_ipa_msg(cmd,
657
- cmd->hdr.return_code, card);
658
- }
659
- card->lan_online = 0;
660
- netif_carrier_off(card->dev);
661
- return NULL;
662
- case IPA_CMD_STARTLAN:
663
- dev_info(&card->gdev->dev,
664
- "The link for %s on CHPID 0x%X has"
665
- " been restored\n",
666
- QETH_CARD_IFNAME(card),
667
- card->info.chpid);
668
- netif_carrier_on(card->dev);
669
- card->lan_online = 1;
670
- if (card->info.hwtrap)
671
- card->info.hwtrap = 2;
672
- qeth_schedule_recovery(card);
673
- return NULL;
674
- case IPA_CMD_SETBRIDGEPORT_IQD:
675
- case IPA_CMD_SETBRIDGEPORT_OSA:
676
- case IPA_CMD_ADDRESS_CHANGE_NOTIF:
677
- if (card->discipline->control_event_handler
678
- (card, cmd))
679
- return cmd;
680
- else
681
- return NULL;
682
- case IPA_CMD_MODCCID:
683
- return cmd;
684
- case IPA_CMD_REGISTER_LOCAL_ADDR:
685
- QETH_CARD_TEXT(card, 3, "irla");
686
- break;
687
- case IPA_CMD_UNREGISTER_LOCAL_ADDR:
688
- QETH_CARD_TEXT(card, 3, "urla");
689
- break;
690
- default:
691
- QETH_DBF_MESSAGE(2, "Received data is IPA "
692
- "but not a reply!\n");
693
- break;
694
- }
695
- }
875
+
876
+ if (IS_IPA_REPLY(cmd)) {
877
+ if (cmd->hdr.command != IPA_CMD_SETCCID &&
878
+ cmd->hdr.command != IPA_CMD_DELCCID &&
879
+ cmd->hdr.command != IPA_CMD_MODCCID &&
880
+ cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
881
+ qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
882
+ return cmd;
696883 }
697
- return cmd;
884
+
885
+ /* handle unsolicited event: */
886
+ switch (cmd->hdr.command) {
887
+ case IPA_CMD_STOPLAN:
888
+ if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
889
+ dev_err(&card->gdev->dev,
890
+ "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
891
+ netdev_name(card->dev));
892
+ schedule_work(&card->close_dev_work);
893
+ } else {
894
+ dev_warn(&card->gdev->dev,
895
+ "The link for interface %s on CHPID 0x%X failed\n",
896
+ netdev_name(card->dev), card->info.chpid);
897
+ qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
898
+ netif_carrier_off(card->dev);
899
+ }
900
+ return NULL;
901
+ case IPA_CMD_STARTLAN:
902
+ dev_info(&card->gdev->dev,
903
+ "The link for %s on CHPID 0x%X has been restored\n",
904
+ netdev_name(card->dev), card->info.chpid);
905
+ if (card->info.hwtrap)
906
+ card->info.hwtrap = 2;
907
+ qeth_schedule_recovery(card);
908
+ return NULL;
909
+ case IPA_CMD_SETBRIDGEPORT_IQD:
910
+ case IPA_CMD_SETBRIDGEPORT_OSA:
911
+ case IPA_CMD_ADDRESS_CHANGE_NOTIF:
912
+ if (card->discipline->control_event_handler(card, cmd))
913
+ return cmd;
914
+ return NULL;
915
+ case IPA_CMD_MODCCID:
916
+ return cmd;
917
+ case IPA_CMD_REGISTER_LOCAL_ADDR:
918
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
919
+ qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
920
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
921
+ qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
922
+
923
+ QETH_CARD_TEXT(card, 3, "irla");
924
+ return NULL;
925
+ case IPA_CMD_UNREGISTER_LOCAL_ADDR:
926
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
927
+ qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
928
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
929
+ qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
930
+
931
+ QETH_CARD_TEXT(card, 3, "urla");
932
+ return NULL;
933
+ default:
934
+ QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
935
+ return cmd;
936
+ }
698937 }
699938
700
-void qeth_clear_ipacmd_list(struct qeth_card *card)
939
+static void qeth_clear_ipacmd_list(struct qeth_card *card)
701940 {
702
- struct qeth_reply *reply, *r;
941
+ struct qeth_cmd_buffer *iob;
703942 unsigned long flags;
704943
705944 QETH_CARD_TEXT(card, 4, "clipalst");
706945
707946 spin_lock_irqsave(&card->lock, flags);
708
- list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
709
- qeth_get_reply(reply);
710
- reply->rc = -EIO;
711
- atomic_inc(&reply->received);
712
- list_del_init(&reply->list);
713
- wake_up(&reply->wait_q);
714
- qeth_put_reply(reply);
715
- }
947
+ list_for_each_entry(iob, &card->cmd_waiter_list, list)
948
+ qeth_notify_cmd(iob, -ECANCELED);
716949 spin_unlock_irqrestore(&card->lock, flags);
717950 }
718
-EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
719951
720952 static int qeth_check_idx_response(struct qeth_card *card,
721953 unsigned char *buffer)
722954 {
723
- if (!buffer)
724
- return 0;
725
-
726955 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
727
- if ((buffer[2] & 0xc0) == 0xc0) {
728
- QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n",
956
+ if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
957
+ QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
729958 buffer[4]);
730959 QETH_CARD_TEXT(card, 2, "ckidxres");
731960 QETH_CARD_TEXT(card, 2, " idxterm");
732
- QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
733
- if (buffer[4] == 0xf6) {
961
+ QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
962
+ if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
963
+ buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
734964 dev_err(&card->gdev->dev,
735
- "The qeth device is not configured "
736
- "for the OSI layer required by z/VM\n");
737
- return -EPERM;
965
+ "The device does not support the configured transport mode\n");
966
+ return -EPROTONOSUPPORT;
738967 }
739968 return -EIO;
740969 }
741970 return 0;
742971 }
743972
744
-static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
973
+void qeth_put_cmd(struct qeth_cmd_buffer *iob)
745974 {
746
- struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
747
- dev_get_drvdata(&cdev->dev))->dev);
748
- return card;
975
+ if (refcount_dec_and_test(&iob->ref_count)) {
976
+ kfree(iob->data);
977
+ kfree(iob);
978
+ }
979
+}
980
+EXPORT_SYMBOL_GPL(qeth_put_cmd);
981
+
982
+static void qeth_release_buffer_cb(struct qeth_card *card,
983
+ struct qeth_cmd_buffer *iob,
984
+ unsigned int data_length)
985
+{
986
+ qeth_put_cmd(iob);
749987 }
750988
751
-static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
989
+static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
752990 {
753
- __u8 index;
754
-
755
- QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
756
- index = channel->io_buf_no;
757
- do {
758
- if (channel->iob[index].state == BUF_STATE_FREE) {
759
- channel->iob[index].state = BUF_STATE_LOCKED;
760
- channel->io_buf_no = (channel->io_buf_no + 1) %
761
- QETH_CMD_BUFFER_NO;
762
- memset(channel->iob[index].data, 0, QETH_BUFSIZE);
763
- return channel->iob + index;
764
- }
765
- index = (index + 1) % QETH_CMD_BUFFER_NO;
766
- } while (index != channel->io_buf_no);
767
-
768
- return NULL;
991
+ qeth_notify_cmd(iob, rc);
992
+ qeth_put_cmd(iob);
769993 }
770994
771
-void qeth_release_buffer(struct qeth_channel *channel,
772
- struct qeth_cmd_buffer *iob)
995
+struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
996
+ unsigned int length, unsigned int ccws,
997
+ long timeout)
773998 {
999
+ struct qeth_cmd_buffer *iob;
1000
+
1001
+ if (length > QETH_BUFSIZE)
1002
+ return NULL;
1003
+
1004
+ iob = kzalloc(sizeof(*iob), GFP_KERNEL);
1005
+ if (!iob)
1006
+ return NULL;
1007
+
1008
+ iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
1009
+ GFP_KERNEL | GFP_DMA);
1010
+ if (!iob->data) {
1011
+ kfree(iob);
1012
+ return NULL;
1013
+ }
1014
+
1015
+ init_completion(&iob->done);
1016
+ spin_lock_init(&iob->lock);
1017
+ INIT_LIST_HEAD(&iob->list);
1018
+ refcount_set(&iob->ref_count, 1);
1019
+ iob->channel = channel;
1020
+ iob->timeout = timeout;
1021
+ iob->length = length;
1022
+ return iob;
1023
+}
1024
+EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
1025
+
1026
+static void qeth_issue_next_read_cb(struct qeth_card *card,
1027
+ struct qeth_cmd_buffer *iob,
1028
+ unsigned int data_length)
1029
+{
1030
+ struct qeth_cmd_buffer *request = NULL;
1031
+ struct qeth_ipa_cmd *cmd = NULL;
1032
+ struct qeth_reply *reply = NULL;
1033
+ struct qeth_cmd_buffer *tmp;
7741034 unsigned long flags;
775
-
776
- QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
777
- spin_lock_irqsave(&channel->iob_lock, flags);
778
- memset(iob->data, 0, QETH_BUFSIZE);
779
- iob->state = BUF_STATE_FREE;
780
- iob->callback = qeth_send_control_data_cb;
781
- iob->rc = 0;
782
- spin_unlock_irqrestore(&channel->iob_lock, flags);
783
- wake_up(&channel->wait_q);
784
-}
785
-EXPORT_SYMBOL_GPL(qeth_release_buffer);
786
-
787
-static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
788
-{
789
- struct qeth_cmd_buffer *buffer = NULL;
790
- unsigned long flags;
791
-
792
- spin_lock_irqsave(&channel->iob_lock, flags);
793
- buffer = __qeth_get_buffer(channel);
794
- spin_unlock_irqrestore(&channel->iob_lock, flags);
795
- return buffer;
796
-}
797
-
798
-struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
799
-{
800
- struct qeth_cmd_buffer *buffer;
801
- wait_event(channel->wait_q,
802
- ((buffer = qeth_get_buffer(channel)) != NULL));
803
- return buffer;
804
-}
805
-EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
806
-
807
-void qeth_clear_cmd_buffers(struct qeth_channel *channel)
808
-{
809
- int cnt;
810
-
811
- for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
812
- qeth_release_buffer(channel, &channel->iob[cnt]);
813
- channel->io_buf_no = 0;
814
-}
815
-EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
816
-
817
-static void qeth_send_control_data_cb(struct qeth_channel *channel,
818
- struct qeth_cmd_buffer *iob)
819
-{
820
- struct qeth_card *card;
821
- struct qeth_reply *reply, *r;
822
- struct qeth_ipa_cmd *cmd;
823
- unsigned long flags;
824
- int keep_reply;
8251035 int rc = 0;
8261036
827
- card = CARD_FROM_CDEV(channel->ccwdev);
8281037 QETH_CARD_TEXT(card, 4, "sndctlcb");
8291038 rc = qeth_check_idx_response(card, iob->data);
8301039 switch (rc) {
8311040 case 0:
8321041 break;
8331042 case -EIO:
834
- qeth_clear_ipacmd_list(card);
8351043 qeth_schedule_recovery(card);
836
- /* fall through */
1044
+ fallthrough;
8371045 default:
838
- goto out;
1046
+ qeth_clear_ipacmd_list(card);
1047
+ goto err_idx;
8391048 }
8401049
841
- cmd = qeth_check_ipa_data(card, iob);
842
- if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
843
- goto out;
844
- /*in case of OSN : check if cmd is set */
845
- if (card->info.type == QETH_CARD_TYPE_OSN &&
846
- cmd &&
847
- cmd->hdr.command != IPA_CMD_STARTLAN &&
848
- card->osn_info.assist_cb != NULL) {
849
- card->osn_info.assist_cb(card->dev, cmd);
850
- goto out;
851
- }
852
-
853
- spin_lock_irqsave(&card->lock, flags);
854
- list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
855
- if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
856
- ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
857
- qeth_get_reply(reply);
858
- list_del_init(&reply->list);
859
- spin_unlock_irqrestore(&card->lock, flags);
860
- keep_reply = 0;
861
- if (reply->callback != NULL) {
862
- if (cmd) {
863
- reply->offset = (__u16)((char *)cmd -
864
- (char *)iob->data);
865
- keep_reply = reply->callback(card,
866
- reply,
867
- (unsigned long)cmd);
868
- } else
869
- keep_reply = reply->callback(card,
870
- reply,
871
- (unsigned long)iob);
872
- }
873
- if (cmd)
874
- reply->rc = (u16) cmd->hdr.return_code;
875
- else if (iob->rc)
876
- reply->rc = iob->rc;
877
- if (keep_reply) {
878
- spin_lock_irqsave(&card->lock, flags);
879
- list_add_tail(&reply->list,
880
- &card->cmd_waiter_list);
881
- spin_unlock_irqrestore(&card->lock, flags);
882
- } else {
883
- atomic_inc(&reply->received);
884
- wake_up(&reply->wait_q);
885
- }
886
- qeth_put_reply(reply);
1050
+ cmd = __ipa_reply(iob);
1051
+ if (cmd) {
1052
+ cmd = qeth_check_ipa_data(card, cmd);
1053
+ if (!cmd)
1054
+ goto out;
1055
+ if (IS_OSN(card) && card->osn_info.assist_cb &&
1056
+ cmd->hdr.command != IPA_CMD_STARTLAN) {
1057
+ card->osn_info.assist_cb(card->dev, cmd);
8871058 goto out;
8881059 }
8891060 }
1061
+
1062
+ /* match against pending cmd requests */
1063
+ spin_lock_irqsave(&card->lock, flags);
1064
+ list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
1065
+ if (tmp->match && tmp->match(tmp, iob)) {
1066
+ request = tmp;
1067
+ /* take the object outside the lock */
1068
+ qeth_get_cmd(request);
1069
+ break;
1070
+ }
1071
+ }
8901072 spin_unlock_irqrestore(&card->lock, flags);
1073
+
1074
+ if (!request)
1075
+ goto out;
1076
+
1077
+ reply = &request->reply;
1078
+ if (!reply->callback) {
1079
+ rc = 0;
1080
+ goto no_callback;
1081
+ }
1082
+
1083
+ spin_lock_irqsave(&request->lock, flags);
1084
+ if (request->rc)
1085
+ /* Bail out when the requestor has already left: */
1086
+ rc = request->rc;
1087
+ else
1088
+ rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
1089
+ (unsigned long)iob);
1090
+ spin_unlock_irqrestore(&request->lock, flags);
1091
+
1092
+no_callback:
1093
+ if (rc <= 0)
1094
+ qeth_notify_cmd(request, rc);
1095
+ qeth_put_cmd(request);
8911096 out:
8921097 memcpy(&card->seqno.pdu_hdr_ack,
8931098 QETH_PDU_HEADER_SEQ_NO(iob->data),
8941099 QETH_SEQ_NO_LENGTH);
895
- qeth_release_buffer(channel, iob);
1100
+ __qeth_issue_next_read(card);
1101
+err_idx:
1102
+ qeth_put_cmd(iob);
8961103 }
8971104
8981105 static int qeth_set_thread_start_bit(struct qeth_card *card,
8991106 unsigned long thread)
9001107 {
9011108 unsigned long flags;
1109
+ int rc = 0;
9021110
9031111 spin_lock_irqsave(&card->thread_mask_lock, flags);
904
- if (!(card->thread_allowed_mask & thread) ||
905
- (card->thread_start_mask & thread)) {
906
- spin_unlock_irqrestore(&card->thread_mask_lock, flags);
907
- return -EPERM;
908
- }
909
- card->thread_start_mask |= thread;
1112
+ if (!(card->thread_allowed_mask & thread))
1113
+ rc = -EPERM;
1114
+ else if (card->thread_start_mask & thread)
1115
+ rc = -EBUSY;
1116
+ else
1117
+ card->thread_start_mask |= thread;
9101118 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
911
- return 0;
1119
+
1120
+ return rc;
9121121 }
9131122
914
-void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
1123
+static void qeth_clear_thread_start_bit(struct qeth_card *card,
1124
+ unsigned long thread)
9151125 {
9161126 unsigned long flags;
9171127
....@@ -920,9 +1130,9 @@
9201130 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
9211131 wake_up(&card->wait_q);
9221132 }
923
-EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
9241133
925
-void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
1134
+static void qeth_clear_thread_running_bit(struct qeth_card *card,
1135
+ unsigned long thread)
9261136 {
9271137 unsigned long flags;
9281138
....@@ -931,7 +1141,6 @@
9311141 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
9321142 wake_up_all(&card->wait_q);
9331143 }
934
-EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
9351144
9361145 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
9371146 {
....@@ -952,7 +1161,7 @@
9521161 return rc;
9531162 }
9541163
955
-int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1164
+static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
9561165 {
9571166 int rc = 0;
9581167
....@@ -960,26 +1169,29 @@
9601169 (rc = __qeth_do_run_thread(card, thread)) >= 0);
9611170 return rc;
9621171 }
963
-EXPORT_SYMBOL_GPL(qeth_do_run_thread);
9641172
965
-void qeth_schedule_recovery(struct qeth_card *card)
1173
+int qeth_schedule_recovery(struct qeth_card *card)
9661174 {
967
- QETH_CARD_TEXT(card, 2, "startrec");
968
- if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
969
- schedule_work(&card->kernel_thread_starter);
970
-}
971
-EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
1175
+ int rc;
9721176
973
-static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
1177
+ QETH_CARD_TEXT(card, 2, "startrec");
1178
+
1179
+ rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1180
+ if (!rc)
1181
+ schedule_work(&card->kernel_thread_starter);
1182
+
1183
+ return rc;
1184
+}
1185
+
1186
+static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1187
+ struct irb *irb)
9741188 {
9751189 int dstat, cstat;
9761190 char *sense;
977
- struct qeth_card *card;
9781191
9791192 sense = (char *) irb->ecw;
9801193 cstat = irb->scsw.cmd.cstat;
9811194 dstat = irb->scsw.cmd.dstat;
982
- card = CARD_FROM_CDEV(cdev);
9831195
9841196 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
9851197 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
....@@ -987,74 +1199,64 @@
9871199 QETH_CARD_TEXT(card, 2, "CGENCHK");
9881200 dev_warn(&cdev->dev, "The qeth device driver "
9891201 "failed to recover an error on the device\n");
990
- QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
991
- dev_name(&cdev->dev), dstat, cstat);
1202
+ QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1203
+ CCW_DEVID(cdev), dstat, cstat);
9921204 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
9931205 16, 1, irb, 64, 1);
994
- return 1;
1206
+ return -EIO;
9951207 }
9961208
9971209 if (dstat & DEV_STAT_UNIT_CHECK) {
9981210 if (sense[SENSE_RESETTING_EVENT_BYTE] &
9991211 SENSE_RESETTING_EVENT_FLAG) {
10001212 QETH_CARD_TEXT(card, 2, "REVIND");
1001
- return 1;
1213
+ return -EIO;
10021214 }
10031215 if (sense[SENSE_COMMAND_REJECT_BYTE] &
10041216 SENSE_COMMAND_REJECT_FLAG) {
10051217 QETH_CARD_TEXT(card, 2, "CMDREJi");
1006
- return 1;
1218
+ return -EIO;
10071219 }
10081220 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
10091221 QETH_CARD_TEXT(card, 2, "AFFE");
1010
- return 1;
1222
+ return -EIO;
10111223 }
10121224 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
10131225 QETH_CARD_TEXT(card, 2, "ZEROSEN");
10141226 return 0;
10151227 }
10161228 QETH_CARD_TEXT(card, 2, "DGENCHK");
1017
- return 1;
1229
+ return -EIO;
10181230 }
10191231 return 0;
10201232 }
10211233
1022
-static long __qeth_check_irb_error(struct ccw_device *cdev,
1023
- unsigned long intparm, struct irb *irb)
1234
+static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1235
+ struct irb *irb)
10241236 {
1025
- struct qeth_card *card;
1026
-
1027
- card = CARD_FROM_CDEV(cdev);
1028
-
1029
- if (!card || !IS_ERR(irb))
1237
+ if (!IS_ERR(irb))
10301238 return 0;
10311239
10321240 switch (PTR_ERR(irb)) {
10331241 case -EIO:
1034
- QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
1035
- dev_name(&cdev->dev));
1242
+ QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1243
+ CCW_DEVID(cdev));
10361244 QETH_CARD_TEXT(card, 2, "ckirberr");
10371245 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
1038
- break;
1246
+ return -EIO;
10391247 case -ETIMEDOUT:
10401248 dev_warn(&cdev->dev, "A hardware operation timed out"
10411249 " on the device\n");
10421250 QETH_CARD_TEXT(card, 2, "ckirberr");
10431251 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
1044
- if (intparm == QETH_RCD_PARM) {
1045
- if (card->data.ccwdev == cdev) {
1046
- card->data.state = CH_STATE_DOWN;
1047
- wake_up(&card->wait_q);
1048
- }
1049
- }
1050
- break;
1252
+ return -ETIMEDOUT;
10511253 default:
1052
- QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
1053
- dev_name(&cdev->dev), PTR_ERR(irb));
1254
+ QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1255
+ PTR_ERR(irb), CCW_DEVID(cdev));
10541256 QETH_CARD_TEXT(card, 2, "ckirberr");
10551257 QETH_CARD_TEXT(card, 2, " rc???");
1258
+ return PTR_ERR(irb);
10561259 }
1057
- return PTR_ERR(irb);
10581260 }
10591261
10601262 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
....@@ -1063,12 +1265,13 @@
10631265 int rc;
10641266 int cstat, dstat;
10651267 struct qeth_cmd_buffer *iob = NULL;
1268
+ struct ccwgroup_device *gdev;
10661269 struct qeth_channel *channel;
10671270 struct qeth_card *card;
10681271
1069
- card = CARD_FROM_CDEV(cdev);
1070
- if (!card)
1071
- return;
1272
+ /* while we hold the ccwdev lock, this stays valid: */
1273
+ gdev = dev_get_drvdata(&cdev->dev);
1274
+ card = dev_get_drvdata(&gdev->dev);
10721275
10731276 QETH_CARD_TEXT(card, 5, "irq");
10741277
....@@ -1083,40 +1286,45 @@
10831286 QETH_CARD_TEXT(card, 5, "data");
10841287 }
10851288
1086
- if (qeth_intparm_is_iob(intparm))
1087
- iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1289
+ if (intparm == 0) {
1290
+ QETH_CARD_TEXT(card, 5, "irqunsol");
1291
+ } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1292
+ QETH_CARD_TEXT(card, 5, "irqunexp");
10881293
1089
- if (__qeth_check_irb_error(cdev, intparm, irb)) {
1294
+ dev_err(&cdev->dev,
1295
+ "Received IRQ with intparm %lx, expected %px\n",
1296
+ intparm, channel->active_cmd);
1297
+ if (channel->active_cmd)
1298
+ qeth_cancel_cmd(channel->active_cmd, -EIO);
1299
+ } else {
1300
+ iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1301
+ }
1302
+
1303
+ channel->active_cmd = NULL;
1304
+ qeth_unlock_channel(card, channel);
1305
+
1306
+ rc = qeth_check_irb_error(card, cdev, irb);
1307
+ if (rc) {
10901308 /* IO was terminated, free its resources. */
10911309 if (iob)
1092
- qeth_release_buffer(iob->channel, iob);
1093
- atomic_set(&channel->irq_pending, 0);
1094
- wake_up(&card->wait_q);
1310
+ qeth_cancel_cmd(iob, rc);
10951311 return;
10961312 }
10971313
1098
- atomic_set(&channel->irq_pending, 0);
1099
-
1100
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
1314
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
11011315 channel->state = CH_STATE_STOPPED;
1102
-
1103
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
1104
- channel->state = CH_STATE_HALTED;
1105
-
1106
- /*let's wake up immediately on data channel*/
1107
- if ((channel == &card->data) && (intparm != 0) &&
1108
- (intparm != QETH_RCD_PARM))
1109
- goto out;
1110
-
1111
- if (intparm == QETH_CLEAR_CHANNEL_PARM) {
1112
- QETH_CARD_TEXT(card, 6, "clrchpar");
1113
- /* we don't have to handle this further */
1114
- intparm = 0;
1316
+ wake_up(&card->wait_q);
11151317 }
1116
- if (intparm == QETH_HALT_CHANNEL_PARM) {
1117
- QETH_CARD_TEXT(card, 6, "hltchpar");
1118
- /* we don't have to handle this further */
1119
- intparm = 0;
1318
+
1319
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1320
+ channel->state = CH_STATE_HALTED;
1321
+ wake_up(&card->wait_q);
1322
+ }
1323
+
1324
+ if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1325
+ SCSW_FCTL_HALT_FUNC))) {
1326
+ qeth_cancel_cmd(iob, -ECANCELED);
1327
+ iob = NULL;
11201328 }
11211329
11221330 cstat = irb->scsw.cmd.cstat;
....@@ -1129,45 +1337,36 @@
11291337 dev_warn(&channel->ccwdev->dev,
11301338 "The qeth device driver failed to recover "
11311339 "an error on the device\n");
1132
- QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
1133
- "0x%X dstat 0x%X\n",
1134
- dev_name(&channel->ccwdev->dev), cstat, dstat);
1340
+ QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1341
+ CCW_DEVID(channel->ccwdev), cstat,
1342
+ dstat);
11351343 print_hex_dump(KERN_WARNING, "qeth: irb ",
11361344 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
11371345 print_hex_dump(KERN_WARNING, "qeth: sense data ",
11381346 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
11391347 }
1140
- if (intparm == QETH_RCD_PARM) {
1141
- channel->state = CH_STATE_DOWN;
1142
- goto out;
1143
- }
1144
- rc = qeth_get_problem(cdev, irb);
1348
+
1349
+ rc = qeth_get_problem(card, cdev, irb);
11451350 if (rc) {
11461351 card->read_or_write_problem = 1;
11471352 if (iob)
1148
- qeth_release_buffer(iob->channel, iob);
1353
+ qeth_cancel_cmd(iob, rc);
11491354 qeth_clear_ipacmd_list(card);
11501355 qeth_schedule_recovery(card);
1151
- goto out;
1356
+ return;
11521357 }
11531358 }
11541359
1155
- if (intparm == QETH_RCD_PARM) {
1156
- channel->state = CH_STATE_RCD_DONE;
1157
- goto out;
1360
+ if (iob) {
1361
+ /* sanity check: */
1362
+ if (irb->scsw.cmd.count > iob->length) {
1363
+ qeth_cancel_cmd(iob, -EIO);
1364
+ return;
1365
+ }
1366
+ if (iob->callback)
1367
+ iob->callback(card, iob,
1368
+ iob->length - irb->scsw.cmd.count);
11581369 }
1159
- if (channel == &card->data)
1160
- return;
1161
- if (channel == &card->read &&
1162
- channel->state == CH_STATE_UP)
1163
- __qeth_issue_next_read(card);
1164
-
1165
- if (iob && iob->callback)
1166
- iob->callback(iob->channel, iob);
1167
-
1168
-out:
1169
- wake_up(&card->wait_q);
1170
- return;
11711370 }
11721371
11731372 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
....@@ -1176,58 +1375,57 @@
11761375 {
11771376 struct sk_buff *skb;
11781377
1179
- if (skb_queue_empty(&buf->skb_list))
1180
- goto out;
1181
- skb = skb_peek(&buf->skb_list);
1182
- while (skb) {
1378
+ skb_queue_walk(&buf->skb_list, skb) {
11831379 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
11841380 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1185
- if (be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) {
1186
- if (skb->sk) {
1187
- struct iucv_sock *iucv = iucv_sk(skb->sk);
1188
- iucv->sk_txnotify(skb, notification);
1189
- }
1190
- }
1191
- if (skb_queue_is_last(&buf->skb_list, skb))
1192
- skb = NULL;
1193
- else
1194
- skb = skb_queue_next(&buf->skb_list, skb);
1381
+ if (skb->sk && skb->sk->sk_family == PF_IUCV)
1382
+ iucv_sk(skb->sk)->sk_txnotify(skb, notification);
11951383 }
1196
-out:
1197
- return;
11981384 }
11991385
1200
-static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1386
+static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
1387
+ int budget)
12011388 {
1389
+ struct qeth_qdio_out_q *queue = buf->q;
12021390 struct sk_buff *skb;
1203
- struct iucv_sock *iucv;
1204
- int notify_general_error = 0;
12051391
1206
- if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1207
- notify_general_error = 1;
1392
+ /* Empty buffer? */
1393
+ if (buf->next_element_to_fill == 0)
1394
+ return;
12081395
1209
- /* release may never happen from within CQ tasklet scope */
1210
- WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1396
+ QETH_TXQ_STAT_INC(queue, bufs);
1397
+ QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1398
+ if (error) {
1399
+ QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1400
+ } else {
1401
+ QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1402
+ QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1403
+ }
12111404
1212
- skb = skb_dequeue(&buf->skb_list);
1213
- while (skb) {
1214
- QETH_CARD_TEXT(buf->q->card, 5, "skbr");
1215
- QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
1216
- if (notify_general_error &&
1217
- be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) {
1218
- if (skb->sk) {
1219
- iucv = iucv_sk(skb->sk);
1220
- iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
1405
+ while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1406
+ unsigned int bytes = qdisc_pkt_len(skb);
1407
+ bool is_tso = skb_is_gso(skb);
1408
+ unsigned int packets;
1409
+
1410
+ packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1411
+ if (!error) {
1412
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
1413
+ QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1414
+ if (skb_is_nonlinear(skb))
1415
+ QETH_TXQ_STAT_INC(queue, skbs_sg);
1416
+ if (is_tso) {
1417
+ QETH_TXQ_STAT_INC(queue, skbs_tso);
1418
+ QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
12211419 }
12221420 }
1223
- refcount_dec(&skb->users);
1224
- dev_kfree_skb_any(skb);
1225
- skb = skb_dequeue(&buf->skb_list);
1421
+
1422
+ napi_consume_skb(skb, budget);
12261423 }
12271424 }
12281425
12291426 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1230
- struct qeth_qdio_out_buffer *buf)
1427
+ struct qeth_qdio_out_buffer *buf,
1428
+ bool error, int budget)
12311429 {
12321430 int i;
12331431
....@@ -1235,30 +1433,56 @@
12351433 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
12361434 atomic_dec(&queue->set_pci_flags_count);
12371435
1238
- qeth_release_skbs(buf);
1436
+ qeth_tx_complete_buf(buf, error, budget);
12391437
1240
- for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
1241
- if (buf->buffer->element[i].addr && buf->is_header[i])
1242
- kmem_cache_free(qeth_core_header_cache,
1243
- buf->buffer->element[i].addr);
1438
+ for (i = 0; i < queue->max_elements; ++i) {
1439
+ void *data = phys_to_virt(buf->buffer->element[i].addr);
1440
+
1441
+ if (data && buf->is_header[i])
1442
+ kmem_cache_free(qeth_core_header_cache, data);
12441443 buf->is_header[i] = 0;
12451444 }
12461445
1247
- qeth_scrub_qdio_buffer(buf->buffer,
1248
- QETH_MAX_BUFFER_ELEMENTS(queue->card));
1446
+ qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
12491447 buf->next_element_to_fill = 0;
1448
+ buf->frames = 0;
1449
+ buf->bytes = 0;
12501450 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
12511451 }
12521452
1253
-static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
1453
+static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
1454
+ struct qeth_qdio_out_q *queue,
1455
+ bool drain)
1456
+{
1457
+ struct qeth_qdio_out_buffer *buf, *tmp;
1458
+
1459
+ list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1460
+ if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) {
1461
+ QETH_CARD_TEXT(card, 5, "fp");
1462
+ QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
1463
+
1464
+ if (drain)
1465
+ qeth_notify_skbs(queue, buf,
1466
+ TX_NOTIFY_GENERALERROR);
1467
+ qeth_tx_complete_buf(buf, drain, 0);
1468
+
1469
+ list_del(&buf->list_entry);
1470
+ kmem_cache_free(qeth_qdio_outbuf_cache, buf);
1471
+ }
1472
+ }
1473
+}
1474
+
1475
+static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
12541476 {
12551477 int j;
1478
+
1479
+ qeth_tx_complete_pending_bufs(q->card, q, true);
12561480
12571481 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
12581482 if (!q->bufs[j])
12591483 continue;
1260
- qeth_cleanup_handled_pending(q, j, 1);
1261
- qeth_clear_output_buffer(q, q->bufs[j]);
1484
+
1485
+ qeth_clear_output_buffer(q, q->bufs[j], true, 0);
12621486 if (free) {
12631487 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
12641488 q->bufs[j] = NULL;
....@@ -1266,111 +1490,32 @@
12661490 }
12671491 }
12681492
1269
-void qeth_clear_qdio_buffers(struct qeth_card *card)
1493
+static void qeth_drain_output_queues(struct qeth_card *card)
12701494 {
12711495 int i;
12721496
12731497 QETH_CARD_TEXT(card, 2, "clearqdbf");
12741498 /* clear outbound buffers to free skbs */
12751499 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1276
- if (card->qdio.out_qs[i]) {
1277
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
1278
- }
1279
- }
1280
-}
1281
-EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
1282
-
1283
-static void qeth_free_buffer_pool(struct qeth_card *card)
1284
-{
1285
- struct qeth_buffer_pool_entry *pool_entry, *tmp;
1286
- int i = 0;
1287
- list_for_each_entry_safe(pool_entry, tmp,
1288
- &card->qdio.init_pool.entry_list, init_list){
1289
- for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
1290
- free_page((unsigned long)pool_entry->elements[i]);
1291
- list_del(&pool_entry->init_list);
1292
- kfree(pool_entry);
1500
+ if (card->qdio.out_qs[i])
1501
+ qeth_drain_output_queue(card->qdio.out_qs[i], false);
12931502 }
12941503 }
12951504
1296
-static void qeth_clean_channel(struct qeth_channel *channel)
1505
+static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
12971506 {
1298
- struct ccw_device *cdev = channel->ccwdev;
1299
- int cnt;
1507
+ unsigned int max = single ? 1 : card->dev->num_tx_queues;
13001508
1301
- QETH_DBF_TEXT(SETUP, 2, "freech");
1509
+ if (card->qdio.no_out_queues == max)
1510
+ return;
13021511
1303
- spin_lock_irq(get_ccwdev_lock(cdev));
1304
- cdev->handler = NULL;
1305
- spin_unlock_irq(get_ccwdev_lock(cdev));
1512
+ if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1513
+ qeth_free_qdio_queues(card);
13061514
1307
- for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1308
- kfree(channel->iob[cnt].data);
1309
- kfree(channel->ccw);
1310
-}
1311
-
1312
-static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
1313
-{
1314
- struct ccw_device *cdev = channel->ccwdev;
1315
- int cnt;
1316
-
1317
- QETH_DBF_TEXT(SETUP, 2, "setupch");
1318
-
1319
- channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1320
- if (!channel->ccw)
1321
- return -ENOMEM;
1322
- channel->state = CH_STATE_DOWN;
1323
- atomic_set(&channel->irq_pending, 0);
1324
- init_waitqueue_head(&channel->wait_q);
1325
-
1326
- spin_lock_irq(get_ccwdev_lock(cdev));
1327
- cdev->handler = qeth_irq;
1328
- spin_unlock_irq(get_ccwdev_lock(cdev));
1329
-
1330
- if (!alloc_buffers)
1331
- return 0;
1332
-
1333
- for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
1334
- channel->iob[cnt].data =
1335
- kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
1336
- if (channel->iob[cnt].data == NULL)
1337
- break;
1338
- channel->iob[cnt].state = BUF_STATE_FREE;
1339
- channel->iob[cnt].channel = channel;
1340
- channel->iob[cnt].callback = qeth_send_control_data_cb;
1341
- channel->iob[cnt].rc = 0;
1342
- }
1343
- if (cnt < QETH_CMD_BUFFER_NO) {
1344
- qeth_clean_channel(channel);
1345
- return -ENOMEM;
1346
- }
1347
- channel->io_buf_no = 0;
1348
- spin_lock_init(&channel->iob_lock);
1349
-
1350
- return 0;
1351
-}
1352
-
1353
-static void qeth_set_single_write_queues(struct qeth_card *card)
1354
-{
1355
- if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1356
- (card->qdio.no_out_queues == 4))
1357
- qeth_free_qdio_buffers(card);
1358
-
1359
- card->qdio.no_out_queues = 1;
1360
- if (card->qdio.default_out_queue != 0)
1515
+ if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
13611516 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
13621517
1363
- card->qdio.default_out_queue = 0;
1364
-}
1365
-
1366
-static void qeth_set_multiple_write_queues(struct qeth_card *card)
1367
-{
1368
- if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1369
- (card->qdio.no_out_queues == 1)) {
1370
- qeth_free_qdio_buffers(card);
1371
- card->qdio.default_out_queue = 2;
1372
- }
1373
- card->qdio.no_out_queues = 4;
1518
+ card->qdio.no_out_queues = max;
13741519 }
13751520
13761521 static int qeth_update_from_chp_desc(struct qeth_card *card)
....@@ -1378,7 +1523,7 @@
13781523 struct ccw_device *ccwdev;
13791524 struct channel_path_desc_fmt0 *chp_dsc;
13801525
1381
- QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1526
+ QETH_CARD_TEXT(card, 2, "chp_desc");
13821527
13831528 ccwdev = card->data.ccwdev;
13841529 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
....@@ -1386,33 +1531,28 @@
13861531 return -ENOMEM;
13871532
13881533 card->info.func_level = 0x4100 + chp_dsc->desc;
1389
- if (card->info.type == QETH_CARD_TYPE_IQD)
1390
- goto out;
13911534
1392
- /* CHPP field bit 6 == 1 -> single queue */
1393
- if ((chp_dsc->chpp & 0x02) == 0x02)
1394
- qeth_set_single_write_queues(card);
1395
- else
1396
- qeth_set_multiple_write_queues(card);
1397
-out:
1535
+ if (IS_OSD(card) || IS_OSX(card))
1536
+ /* CHPP field bit 6 == 1 -> single queue */
1537
+ qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1538
+
13981539 kfree(chp_dsc);
1399
- QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1400
- QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1540
+ QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1541
+ QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
14011542 return 0;
14021543 }
14031544
14041545 static void qeth_init_qdio_info(struct qeth_card *card)
14051546 {
1406
- QETH_DBF_TEXT(SETUP, 4, "intqdinf");
1547
+ QETH_CARD_TEXT(card, 4, "intqdinf");
14071548 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
14081549 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
14091550 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1410
- card->qdio.no_out_queues = QETH_MAX_QUEUES;
14111551
14121552 /* inbound */
14131553 card->qdio.no_in_queues = 1;
14141554 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1415
- if (card->info.type == QETH_CARD_TYPE_IQD)
1555
+ if (IS_IQD(card))
14161556 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
14171557 else
14181558 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
....@@ -1425,9 +1565,9 @@
14251565 {
14261566 card->options.route4.type = NO_ROUTER;
14271567 card->options.route6.type = NO_ROUTER;
1428
- card->options.rx_sg_cb = QETH_RX_SG_CB;
14291568 card->options.isolation = ISOLATION_MODE_NONE;
14301569 card->options.cq = QETH_CQ_DISABLED;
1570
+ card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
14311571 }
14321572
14331573 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
....@@ -1445,19 +1585,19 @@
14451585 return rc;
14461586 }
14471587
1588
+static int qeth_do_reset(void *data);
14481589 static void qeth_start_kernel_thread(struct work_struct *work)
14491590 {
14501591 struct task_struct *ts;
14511592 struct qeth_card *card = container_of(work, struct qeth_card,
14521593 kernel_thread_starter);
1453
- QETH_CARD_TEXT(card , 2, "strthrd");
1594
+ QETH_CARD_TEXT(card, 2, "strthrd");
14541595
14551596 if (card->read.state != CH_STATE_UP &&
14561597 card->write.state != CH_STATE_UP)
14571598 return;
14581599 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1459
- ts = kthread_run(card->discipline->recover, (void *)card,
1460
- "qeth_recover");
1600
+ ts = kthread_run(qeth_do_reset, card, "qeth_recover");
14611601 if (IS_ERR(ts)) {
14621602 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
14631603 qeth_clear_thread_running_bit(card,
....@@ -1469,18 +1609,14 @@
14691609 static void qeth_buffer_reclaim_work(struct work_struct *);
14701610 static void qeth_setup_card(struct qeth_card *card)
14711611 {
1472
- QETH_DBF_TEXT(SETUP, 2, "setupcrd");
1473
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1612
+ QETH_CARD_TEXT(card, 2, "setupcrd");
14741613
14751614 card->info.type = CARD_RDEV(card)->id.driver_info;
14761615 card->state = CARD_STATE_DOWN;
1477
- spin_lock_init(&card->mclock);
14781616 spin_lock_init(&card->lock);
1479
- spin_lock_init(&card->ip_lock);
14801617 spin_lock_init(&card->thread_mask_lock);
14811618 mutex_init(&card->conf_mutex);
14821619 mutex_init(&card->discipline_mutex);
1483
- mutex_init(&card->vid_list_mutex);
14841620 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
14851621 INIT_LIST_HEAD(&card->cmd_waiter_list);
14861622 init_waitqueue_head(&card->wait_q);
....@@ -1490,6 +1626,11 @@
14901626 qeth_init_qdio_info(card);
14911627 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
14921628 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1629
+ hash_init(card->rx_mode_addrs);
1630
+ hash_init(card->local_addrs4);
1631
+ hash_init(card->local_addrs6);
1632
+ spin_lock_init(&card->local_addrs4_lock);
1633
+ spin_lock_init(&card->local_addrs6_lock);
14931634 }
14941635
14951636 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
....@@ -1512,41 +1653,47 @@
15121653 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
15131654
15141655 card->gdev = gdev;
1656
+ dev_set_drvdata(&gdev->dev, card);
15151657 CARD_RDEV(card) = gdev->cdev[0];
15161658 CARD_WDEV(card) = gdev->cdev[1];
15171659 CARD_DDEV(card) = gdev->cdev[2];
1518
- if (qeth_setup_channel(&card->read, true))
1519
- goto out_ip;
1520
- if (qeth_setup_channel(&card->write, true))
1521
- goto out_channel;
1522
- if (qeth_setup_channel(&card->data, false))
1523
- goto out_data;
1524
- card->options.layer2 = -1;
1660
+
1661
+ card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1662
+ dev_name(&gdev->dev));
1663
+ if (!card->event_wq)
1664
+ goto out_wq;
1665
+
1666
+ card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1667
+ if (!card->read_cmd)
1668
+ goto out_read_cmd;
1669
+
1670
+ card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1671
+ qeth_debugfs_root);
1672
+ debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1673
+ &qeth_debugfs_local_addr_fops);
1674
+
15251675 card->qeth_service_level.seq_print = qeth_core_sl_print;
15261676 register_service_level(&card->qeth_service_level);
15271677 return card;
15281678
1529
-out_data:
1530
- qeth_clean_channel(&card->write);
1531
-out_channel:
1532
- qeth_clean_channel(&card->read);
1533
-out_ip:
1679
+out_read_cmd:
1680
+ destroy_workqueue(card->event_wq);
1681
+out_wq:
1682
+ dev_set_drvdata(&gdev->dev, NULL);
15341683 kfree(card);
15351684 out:
15361685 return NULL;
15371686 }
15381687
1539
-static int qeth_clear_channel(struct qeth_channel *channel)
1688
+static int qeth_clear_channel(struct qeth_card *card,
1689
+ struct qeth_channel *channel)
15401690 {
1541
- unsigned long flags;
1542
- struct qeth_card *card;
15431691 int rc;
15441692
1545
- card = CARD_FROM_CDEV(channel->ccwdev);
15461693 QETH_CARD_TEXT(card, 3, "clearch");
1547
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1548
- rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1549
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1694
+ spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1695
+ rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1696
+ spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
15501697
15511698 if (rc)
15521699 return rc;
....@@ -1560,17 +1707,15 @@
15601707 return 0;
15611708 }
15621709
1563
-static int qeth_halt_channel(struct qeth_channel *channel)
1710
+static int qeth_halt_channel(struct qeth_card *card,
1711
+ struct qeth_channel *channel)
15641712 {
1565
- unsigned long flags;
1566
- struct qeth_card *card;
15671713 int rc;
15681714
1569
- card = CARD_FROM_CDEV(channel->ccwdev);
15701715 QETH_CARD_TEXT(card, 3, "haltch");
1571
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1572
- rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1573
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1716
+ spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1717
+ rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1718
+ spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
15741719
15751720 if (rc)
15761721 return rc;
....@@ -1583,14 +1728,58 @@
15831728 return 0;
15841729 }
15851730
1731
+static int qeth_stop_channel(struct qeth_channel *channel)
1732
+{
1733
+ struct ccw_device *cdev = channel->ccwdev;
1734
+ int rc;
1735
+
1736
+ rc = ccw_device_set_offline(cdev);
1737
+
1738
+ spin_lock_irq(get_ccwdev_lock(cdev));
1739
+ if (channel->active_cmd) {
1740
+ dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1741
+ channel->active_cmd);
1742
+ channel->active_cmd = NULL;
1743
+ }
1744
+ cdev->handler = NULL;
1745
+ spin_unlock_irq(get_ccwdev_lock(cdev));
1746
+
1747
+ return rc;
1748
+}
1749
+
1750
+static int qeth_start_channel(struct qeth_channel *channel)
1751
+{
1752
+ struct ccw_device *cdev = channel->ccwdev;
1753
+ int rc;
1754
+
1755
+ channel->state = CH_STATE_DOWN;
1756
+ atomic_set(&channel->irq_pending, 0);
1757
+
1758
+ spin_lock_irq(get_ccwdev_lock(cdev));
1759
+ cdev->handler = qeth_irq;
1760
+ spin_unlock_irq(get_ccwdev_lock(cdev));
1761
+
1762
+ rc = ccw_device_set_online(cdev);
1763
+ if (rc)
1764
+ goto err;
1765
+
1766
+ return 0;
1767
+
1768
+err:
1769
+ spin_lock_irq(get_ccwdev_lock(cdev));
1770
+ cdev->handler = NULL;
1771
+ spin_unlock_irq(get_ccwdev_lock(cdev));
1772
+ return rc;
1773
+}
1774
+
15861775 static int qeth_halt_channels(struct qeth_card *card)
15871776 {
15881777 int rc1 = 0, rc2 = 0, rc3 = 0;
15891778
15901779 QETH_CARD_TEXT(card, 3, "haltchs");
1591
- rc1 = qeth_halt_channel(&card->read);
1592
- rc2 = qeth_halt_channel(&card->write);
1593
- rc3 = qeth_halt_channel(&card->data);
1780
+ rc1 = qeth_halt_channel(card, &card->read);
1781
+ rc2 = qeth_halt_channel(card, &card->write);
1782
+ rc3 = qeth_halt_channel(card, &card->data);
15941783 if (rc1)
15951784 return rc1;
15961785 if (rc2)
....@@ -1603,9 +1792,9 @@
16031792 int rc1 = 0, rc2 = 0, rc3 = 0;
16041793
16051794 QETH_CARD_TEXT(card, 3, "clearchs");
1606
- rc1 = qeth_clear_channel(&card->read);
1607
- rc2 = qeth_clear_channel(&card->write);
1608
- rc3 = qeth_clear_channel(&card->data);
1795
+ rc1 = qeth_clear_channel(card, &card->read);
1796
+ rc2 = qeth_clear_channel(card, &card->write);
1797
+ rc3 = qeth_clear_channel(card, &card->data);
16091798 if (rc1)
16101799 return rc1;
16111800 if (rc2)
....@@ -1626,7 +1815,7 @@
16261815 return qeth_clear_channels(card);
16271816 }
16281817
1629
-int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1818
+static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
16301819 {
16311820 int rc = 0;
16321821
....@@ -1634,7 +1823,7 @@
16341823 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
16351824 QETH_QDIO_CLEANING)) {
16361825 case QETH_QDIO_ESTABLISHED:
1637
- if (card->info.type == QETH_CARD_TYPE_IQD)
1826
+ if (IS_IQD(card))
16381827 rc = qdio_shutdown(CARD_DDEV(card),
16391828 QDIO_FLAG_CLEANUP_USING_HALT);
16401829 else
....@@ -1652,64 +1841,7 @@
16521841 rc = qeth_clear_halt_card(card, use_halt);
16531842 if (rc)
16541843 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1655
- card->state = CARD_STATE_DOWN;
16561844 return rc;
1657
-}
1658
-EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1659
-
1660
-static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1661
- int *length)
1662
-{
1663
- struct ciw *ciw;
1664
- char *rcd_buf;
1665
- int ret;
1666
- struct qeth_channel *channel = &card->data;
1667
- unsigned long flags;
1668
-
1669
- /*
1670
- * scan for RCD command in extended SenseID data
1671
- */
1672
- ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1673
- if (!ciw || ciw->cmd == 0)
1674
- return -EOPNOTSUPP;
1675
- rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1676
- if (!rcd_buf)
1677
- return -ENOMEM;
1678
-
1679
- qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
1680
- channel->state = CH_STATE_RCD;
1681
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1682
- ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1683
- QETH_RCD_PARM, LPM_ANYPATH, 0,
1684
- QETH_RCD_TIMEOUT);
1685
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1686
- if (!ret)
1687
- wait_event(card->wait_q,
1688
- (channel->state == CH_STATE_RCD_DONE ||
1689
- channel->state == CH_STATE_DOWN));
1690
- if (channel->state == CH_STATE_DOWN)
1691
- ret = -EIO;
1692
- else
1693
- channel->state = CH_STATE_DOWN;
1694
- if (ret) {
1695
- kfree(rcd_buf);
1696
- *buffer = NULL;
1697
- *length = 0;
1698
- } else {
1699
- *length = ciw->count;
1700
- *buffer = rcd_buf;
1701
- }
1702
- return ret;
1703
-}
1704
-
1705
-static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
1706
-{
1707
- QETH_DBF_TEXT(SETUP, 2, "cfgunit");
1708
- card->info.chpid = prcd[30];
1709
- card->info.unit_addr2 = prcd[31];
1710
- card->info.cula = prcd[63];
1711
- card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1712
- (prcd[0x11] == _ascebc['M']));
17131845 }
17141846
17151847 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
....@@ -1721,7 +1853,7 @@
17211853 char userid[80];
17221854 int rc = 0;
17231855
1724
- QETH_DBF_TEXT(SETUP, 2, "vmlayer");
1856
+ QETH_CARD_TEXT(card, 2, "vmlayer");
17251857
17261858 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
17271859 if (rc)
....@@ -1764,7 +1896,7 @@
17641896 kfree(response);
17651897 kfree(request);
17661898 if (rc)
1767
- QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
1899
+ QETH_CARD_TEXT_(card, 2, "err%x", rc);
17681900 return disc;
17691901 }
17701902
....@@ -1773,34 +1905,31 @@
17731905 {
17741906 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
17751907
1776
- if (card->info.type == QETH_CARD_TYPE_OSM ||
1777
- card->info.type == QETH_CARD_TYPE_OSN)
1908
+ if (IS_OSM(card) || IS_OSN(card))
17781909 disc = QETH_DISCIPLINE_LAYER2;
1779
- else if (card->info.guestlan)
1780
- disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
1781
- QETH_DISCIPLINE_LAYER3 :
1782
- qeth_vm_detect_layer(card);
1910
+ else if (IS_VM_NIC(card))
1911
+ disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1912
+ qeth_vm_detect_layer(card);
17831913
17841914 switch (disc) {
17851915 case QETH_DISCIPLINE_LAYER2:
1786
- QETH_DBF_TEXT(SETUP, 3, "force l2");
1916
+ QETH_CARD_TEXT(card, 3, "force l2");
17871917 break;
17881918 case QETH_DISCIPLINE_LAYER3:
1789
- QETH_DBF_TEXT(SETUP, 3, "force l3");
1919
+ QETH_CARD_TEXT(card, 3, "force l3");
17901920 break;
17911921 default:
1792
- QETH_DBF_TEXT(SETUP, 3, "force no");
1922
+ QETH_CARD_TEXT(card, 3, "force no");
17931923 }
17941924
17951925 return disc;
17961926 }
17971927
1798
-static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1928
+static void qeth_set_blkt_defaults(struct qeth_card *card)
17991929 {
1800
- QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1930
+ QETH_CARD_TEXT(card, 2, "cfgblkt");
18011931
1802
- if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
1803
- prcd[76] >= 0xF1 && prcd[76] <= 0xF4) {
1932
+ if (card->info.use_v1_blkt) {
18041933 card->info.blkt.time_total = 0;
18051934 card->info.blkt.inter_packet = 0;
18061935 card->info.blkt.inter_packet_jumbo = 0;
....@@ -1811,17 +1940,16 @@
18111940 }
18121941 }
18131942
1814
-static void qeth_init_tokens(struct qeth_card *card)
1943
+static void qeth_idx_init(struct qeth_card *card)
18151944 {
1945
+ memset(&card->seqno, 0, sizeof(card->seqno));
1946
+
18161947 card->token.issuer_rm_w = 0x00010103UL;
18171948 card->token.cm_filter_w = 0x00010108UL;
18181949 card->token.cm_connection_w = 0x0001010aUL;
18191950 card->token.ulp_filter_w = 0x0001010bUL;
18201951 card->token.ulp_connection_w = 0x0001010dUL;
1821
-}
18221952
1823
-static void qeth_init_func_level(struct qeth_card *card)
1824
-{
18251953 switch (card->info.type) {
18261954 case QETH_CARD_TYPE_IQD:
18271955 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
....@@ -1835,124 +1963,13 @@
18351963 }
18361964 }
18371965
1838
-static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1839
- void (*idx_reply_cb)(struct qeth_channel *,
1840
- struct qeth_cmd_buffer *))
1966
+static void qeth_idx_finalize_cmd(struct qeth_card *card,
1967
+ struct qeth_cmd_buffer *iob)
18411968 {
1842
- struct qeth_cmd_buffer *iob;
1843
- unsigned long flags;
1844
- int rc;
1845
- struct qeth_card *card;
1846
-
1847
- QETH_DBF_TEXT(SETUP, 2, "idxanswr");
1848
- card = CARD_FROM_CDEV(channel->ccwdev);
1849
- iob = qeth_get_buffer(channel);
1850
- if (!iob)
1851
- return -ENOMEM;
1852
- iob->callback = idx_reply_cb;
1853
- qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
1854
-
1855
- wait_event(card->wait_q,
1856
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1857
- QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1858
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1859
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1860
- (addr_t) iob, 0, 0, QETH_TIMEOUT);
1861
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1862
-
1863
- if (rc) {
1864
- QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
1865
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
1866
- atomic_set(&channel->irq_pending, 0);
1867
- qeth_release_buffer(channel, iob);
1868
- wake_up(&card->wait_q);
1869
- return rc;
1870
- }
1871
- rc = wait_event_interruptible_timeout(card->wait_q,
1872
- channel->state == CH_STATE_UP, QETH_TIMEOUT);
1873
- if (rc == -ERESTARTSYS)
1874
- return rc;
1875
- if (channel->state != CH_STATE_UP) {
1876
- rc = -ETIME;
1877
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
1878
- } else
1879
- rc = 0;
1880
- return rc;
1881
-}
1882
-
1883
-static int qeth_idx_activate_channel(struct qeth_channel *channel,
1884
- void (*idx_reply_cb)(struct qeth_channel *,
1885
- struct qeth_cmd_buffer *))
1886
-{
1887
- struct qeth_card *card;
1888
- struct qeth_cmd_buffer *iob;
1889
- unsigned long flags;
1890
- __u16 temp;
1891
- __u8 tmp;
1892
- int rc;
1893
- struct ccw_dev_id temp_devid;
1894
-
1895
- card = CARD_FROM_CDEV(channel->ccwdev);
1896
-
1897
- QETH_DBF_TEXT(SETUP, 2, "idxactch");
1898
-
1899
- iob = qeth_get_buffer(channel);
1900
- if (!iob)
1901
- return -ENOMEM;
1902
- iob->callback = idx_reply_cb;
1903
- qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
1904
- iob->data);
1905
- if (channel == &card->write) {
1906
- memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1907
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1908
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1969
+ memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1970
+ QETH_SEQ_NO_LENGTH);
1971
+ if (iob->channel == &card->write)
19091972 card->seqno.trans_hdr++;
1910
- } else {
1911
- memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1912
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1913
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1914
- }
1915
- tmp = ((u8)card->dev->dev_port) | 0x80;
1916
- memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
1917
- memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1918
- &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1919
- memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1920
- &card->info.func_level, sizeof(__u16));
1921
- ccw_device_get_id(CARD_DDEV(card), &temp_devid);
1922
- memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
1923
- temp = (card->info.cula << 8) + card->info.unit_addr2;
1924
- memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1925
-
1926
- wait_event(card->wait_q,
1927
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1928
- QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1929
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1930
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1931
- (addr_t) iob, 0, 0, QETH_TIMEOUT);
1932
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1933
-
1934
- if (rc) {
1935
- QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
1936
- rc);
1937
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1938
- atomic_set(&channel->irq_pending, 0);
1939
- qeth_release_buffer(channel, iob);
1940
- wake_up(&card->wait_q);
1941
- return rc;
1942
- }
1943
- rc = wait_event_interruptible_timeout(card->wait_q,
1944
- channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1945
- if (rc == -ERESTARTSYS)
1946
- return rc;
1947
- if (channel->state != CH_STATE_ACTIVATING) {
1948
- dev_warn(&channel->ccwdev->dev, "The qeth device driver"
1949
- " failed to recover an error on the device\n");
1950
- QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
1951
- dev_name(&channel->ccwdev->dev));
1952
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1953
- return -ETIME;
1954
- }
1955
- return qeth_idx_activate_get_answer(channel, idx_reply_cb);
19561973 }
19571974
19581975 static int qeth_peer_func_level(int level)
....@@ -1964,123 +1981,48 @@
19641981 return level;
19651982 }
19661983
1967
-static void qeth_idx_write_cb(struct qeth_channel *channel,
1968
- struct qeth_cmd_buffer *iob)
1984
+static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1985
+ struct qeth_cmd_buffer *iob)
19691986 {
1970
- struct qeth_card *card;
1971
- __u16 temp;
1987
+ qeth_idx_finalize_cmd(card, iob);
19721988
1973
- QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
1974
-
1975
- if (channel->state == CH_STATE_DOWN) {
1976
- channel->state = CH_STATE_ACTIVATING;
1977
- goto out;
1978
- }
1979
- card = CARD_FROM_CDEV(channel->ccwdev);
1980
-
1981
- if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1982
- if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
1983
- dev_err(&channel->ccwdev->dev,
1984
- "The adapter is used exclusively by another "
1985
- "host\n");
1986
- else
1987
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
1988
- " negative reply\n",
1989
- dev_name(&channel->ccwdev->dev));
1990
- goto out;
1991
- }
1992
- memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1993
- if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1994
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
1995
- "function level mismatch (sent: 0x%x, received: "
1996
- "0x%x)\n", dev_name(&channel->ccwdev->dev),
1997
- card->info.func_level, temp);
1998
- goto out;
1999
- }
2000
- channel->state = CH_STATE_UP;
2001
-out:
2002
- qeth_release_buffer(channel, iob);
2003
-}
2004
-
2005
-static void qeth_idx_read_cb(struct qeth_channel *channel,
2006
- struct qeth_cmd_buffer *iob)
2007
-{
2008
- struct qeth_card *card;
2009
- __u16 temp;
2010
-
2011
- QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
2012
- if (channel->state == CH_STATE_DOWN) {
2013
- channel->state = CH_STATE_ACTIVATING;
2014
- goto out;
2015
- }
2016
-
2017
- card = CARD_FROM_CDEV(channel->ccwdev);
2018
- if (qeth_check_idx_response(card, iob->data))
2019
- goto out;
2020
-
2021
- if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
2022
- switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2023
- case QETH_IDX_ACT_ERR_EXCL:
2024
- dev_err(&channel->ccwdev->dev,
2025
- "The adapter is used exclusively by another "
2026
- "host\n");
2027
- break;
2028
- case QETH_IDX_ACT_ERR_AUTH:
2029
- case QETH_IDX_ACT_ERR_AUTH_USER:
2030
- dev_err(&channel->ccwdev->dev,
2031
- "Setting the device online failed because of "
2032
- "insufficient authorization\n");
2033
- break;
2034
- default:
2035
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
2036
- " negative reply\n",
2037
- dev_name(&channel->ccwdev->dev));
2038
- }
2039
- QETH_CARD_TEXT_(card, 2, "idxread%c",
2040
- QETH_IDX_ACT_CAUSE_CODE(iob->data));
2041
- goto out;
2042
- }
2043
-
2044
- memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2045
- if (temp != qeth_peer_func_level(card->info.func_level)) {
2046
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
2047
- "level mismatch (sent: 0x%x, received: 0x%x)\n",
2048
- dev_name(&channel->ccwdev->dev),
2049
- card->info.func_level, temp);
2050
- goto out;
2051
- }
2052
- memcpy(&card->token.issuer_rm_r,
2053
- QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2054
- QETH_MPC_TOKEN_LENGTH);
2055
- memcpy(&card->info.mcl_level[0],
2056
- QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2057
- channel->state = CH_STATE_UP;
2058
-out:
2059
- qeth_release_buffer(channel, iob);
2060
-}
2061
-
2062
-void qeth_prepare_control_data(struct qeth_card *card, int len,
2063
- struct qeth_cmd_buffer *iob)
2064
-{
2065
- qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
2066
- iob->callback = qeth_release_buffer;
2067
-
2068
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
2069
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
2070
- card->seqno.trans_hdr++;
20711989 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
20721990 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
20731991 card->seqno.pdu_hdr++;
20741992 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
20751993 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2076
- QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
1994
+
1995
+ iob->callback = qeth_release_buffer_cb;
20771996 }
2078
-EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
1997
+
1998
+static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
1999
+ struct qeth_cmd_buffer *reply)
2000
+{
2001
+ /* MPC cmds are issued strictly in sequence. */
2002
+ return !IS_IPA(reply->data);
2003
+}
2004
+
2005
+static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
2006
+ const void *data,
2007
+ unsigned int data_length)
2008
+{
2009
+ struct qeth_cmd_buffer *iob;
2010
+
2011
+ iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
2012
+ if (!iob)
2013
+ return NULL;
2014
+
2015
+ memcpy(iob->data, data, data_length);
2016
+ qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
2017
+ iob->data);
2018
+ iob->finalize = qeth_mpc_finalize_cmd;
2019
+ iob->match = qeth_mpc_match_reply;
2020
+ return iob;
2021
+}
20792022
20802023 /**
20812024 * qeth_send_control_data() - send control command to the card
20822025 * @card: qeth_card structure pointer
2083
- * @len: size of the command buffer
20842026 * @iob: qeth_cmd_buffer pointer
20852027 * @reply_cb: callback function pointer
20862028 * @cb_card: pointer to the qeth_card structure
....@@ -2090,187 +2032,395 @@
20902032 * for the IPA commands.
20912033 * @reply_param: private pointer passed to the callback
20922034 *
2093
- * Returns the value of the `return_code' field of the response
2094
- * block returned from the hardware, or other error indication.
2095
- * Value of zero indicates successful execution of the command.
2096
- *
20972035 * Callback function gets called one or more times, with cb_cmd
20982036 * pointing to the response returned by the hardware. Callback
2099
- * function must return non-zero if more reply blocks are expected,
2100
- * and zero if the last or only reply block is received. Callback
2101
- * function can get the value of the reply_param pointer from the
2037
+ * function must return
2038
+ * > 0 if more reply blocks are expected,
2039
+ * 0 if the last or only reply block is received, and
2040
+ * < 0 on error.
2041
+ * Callback function can get the value of the reply_param pointer from the
21022042 * field 'param' of the structure qeth_reply.
21032043 */
21042044
2105
-int qeth_send_control_data(struct qeth_card *card, int len,
2106
- struct qeth_cmd_buffer *iob,
2107
- int (*reply_cb)(struct qeth_card *cb_card,
2108
- struct qeth_reply *cb_reply,
2109
- unsigned long cb_cmd),
2110
- void *reply_param)
2045
+static int qeth_send_control_data(struct qeth_card *card,
2046
+ struct qeth_cmd_buffer *iob,
2047
+ int (*reply_cb)(struct qeth_card *cb_card,
2048
+ struct qeth_reply *cb_reply,
2049
+ unsigned long cb_cmd),
2050
+ void *reply_param)
21112051 {
21122052 struct qeth_channel *channel = iob->channel;
2053
+ struct qeth_reply *reply = &iob->reply;
2054
+ long timeout = iob->timeout;
21132055 int rc;
2114
- unsigned long flags;
2115
- struct qeth_reply *reply = NULL;
2116
- unsigned long timeout, event_timeout;
2117
- struct qeth_ipa_cmd *cmd = NULL;
21182056
21192057 QETH_CARD_TEXT(card, 2, "sendctl");
21202058
2121
- if (card->read_or_write_problem) {
2122
- qeth_release_buffer(channel, iob);
2123
- return -EIO;
2124
- }
2125
- reply = qeth_alloc_reply(card);
2126
- if (!reply) {
2127
- qeth_release_buffer(channel, iob);
2128
- return -ENOMEM;
2129
- }
21302059 reply->callback = reply_cb;
21312060 reply->param = reply_param;
21322061
2133
- init_waitqueue_head(&reply->wait_q);
2134
-
2135
- while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
2136
-
2137
- if (IS_IPA(iob->data)) {
2138
- cmd = __ipa_cmd(iob);
2139
- cmd->hdr.seqno = card->seqno.ipa++;
2140
- reply->seqno = cmd->hdr.seqno;
2141
- event_timeout = QETH_IPA_TIMEOUT;
2142
- } else {
2143
- reply->seqno = QETH_IDX_COMMAND_SEQNO;
2144
- event_timeout = QETH_TIMEOUT;
2062
+ timeout = wait_event_interruptible_timeout(card->wait_q,
2063
+ qeth_trylock_channel(channel),
2064
+ timeout);
2065
+ if (timeout <= 0) {
2066
+ qeth_put_cmd(iob);
2067
+ return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
21452068 }
2146
- qeth_prepare_control_data(card, len, iob);
21472069
2148
- spin_lock_irqsave(&card->lock, flags);
2149
- list_add_tail(&reply->list, &card->cmd_waiter_list);
2150
- spin_unlock_irqrestore(&card->lock, flags);
2070
+ if (iob->finalize)
2071
+ iob->finalize(card, iob);
2072
+ QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
21512073
2152
- timeout = jiffies + event_timeout;
2074
+ qeth_enqueue_cmd(card, iob);
2075
+
2076
+ /* This pairs with iob->callback, and keeps the iob alive after IO: */
2077
+ qeth_get_cmd(iob);
21532078
21542079 QETH_CARD_TEXT(card, 6, "noirqpnd");
2155
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
2156
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
2157
- (addr_t) iob, 0, 0, event_timeout);
2158
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
2080
+ spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2081
+ rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2082
+ (addr_t) iob, 0, 0, timeout);
2083
+ if (!rc)
2084
+ channel->active_cmd = iob;
2085
+ spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
21592086 if (rc) {
2160
- QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
2161
- "ccw_device_start rc = %i\n",
2162
- dev_name(&channel->ccwdev->dev), rc);
2087
+ QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2088
+ CARD_DEVID(card), rc);
21632089 QETH_CARD_TEXT_(card, 2, " err%d", rc);
2164
- spin_lock_irqsave(&card->lock, flags);
2165
- list_del_init(&reply->list);
2166
- qeth_put_reply(reply);
2167
- spin_unlock_irqrestore(&card->lock, flags);
2168
- qeth_release_buffer(channel, iob);
2169
- atomic_set(&channel->irq_pending, 0);
2170
- wake_up(&card->wait_q);
2171
- return rc;
2090
+ qeth_dequeue_cmd(card, iob);
2091
+ qeth_put_cmd(iob);
2092
+ qeth_unlock_channel(card, channel);
2093
+ goto out;
21722094 }
21732095
2174
- /* we have only one long running ipassist, since we can ensure
2175
- process context of this command we can sleep */
2176
- if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
2177
- cmd->hdr.prot_version == QETH_PROT_IPV4) {
2178
- if (!wait_event_timeout(reply->wait_q,
2179
- atomic_read(&reply->received), event_timeout))
2180
- goto time_err;
2181
- } else {
2182
- while (!atomic_read(&reply->received)) {
2183
- if (time_after(jiffies, timeout))
2184
- goto time_err;
2185
- cpu_relax();
2186
- }
2096
+ timeout = wait_for_completion_interruptible_timeout(&iob->done,
2097
+ timeout);
2098
+ if (timeout <= 0)
2099
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2100
+
2101
+ qeth_dequeue_cmd(card, iob);
2102
+
2103
+ if (reply_cb) {
2104
+ /* Wait until the callback for a late reply has completed: */
2105
+ spin_lock_irq(&iob->lock);
2106
+ if (rc)
2107
+ /* Zap any callback that's still pending: */
2108
+ iob->rc = rc;
2109
+ spin_unlock_irq(&iob->lock);
21872110 }
21882111
2189
- rc = reply->rc;
2190
- qeth_put_reply(reply);
2191
- return rc;
2112
+ if (!rc)
2113
+ rc = iob->rc;
21922114
2193
-time_err:
2194
- reply->rc = -ETIME;
2195
- spin_lock_irqsave(&reply->card->lock, flags);
2196
- list_del_init(&reply->list);
2197
- spin_unlock_irqrestore(&reply->card->lock, flags);
2198
- atomic_inc(&reply->received);
2199
- rc = reply->rc;
2200
- qeth_put_reply(reply);
2115
+out:
2116
+ qeth_put_cmd(iob);
22012117 return rc;
22022118 }
2203
-EXPORT_SYMBOL_GPL(qeth_send_control_data);
2119
+
2120
+struct qeth_node_desc {
2121
+ struct node_descriptor nd1;
2122
+ struct node_descriptor nd2;
2123
+ struct node_descriptor nd3;
2124
+};
2125
+
2126
+static void qeth_read_conf_data_cb(struct qeth_card *card,
2127
+ struct qeth_cmd_buffer *iob,
2128
+ unsigned int data_length)
2129
+{
2130
+ struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2131
+ int rc = 0;
2132
+ u8 *tag;
2133
+
2134
+ QETH_CARD_TEXT(card, 2, "cfgunit");
2135
+
2136
+ if (data_length < sizeof(*nd)) {
2137
+ rc = -EINVAL;
2138
+ goto out;
2139
+ }
2140
+
2141
+ card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2142
+ nd->nd1.plant[1] == _ascebc['M'];
2143
+ tag = (u8 *)&nd->nd1.tag;
2144
+ card->info.chpid = tag[0];
2145
+ card->info.unit_addr2 = tag[1];
2146
+
2147
+ tag = (u8 *)&nd->nd2.tag;
2148
+ card->info.cula = tag[1];
2149
+
2150
+ card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2151
+ nd->nd3.model[1] == 0xF0 &&
2152
+ nd->nd3.model[2] >= 0xF1 &&
2153
+ nd->nd3.model[2] <= 0xF4;
2154
+
2155
+out:
2156
+ qeth_notify_cmd(iob, rc);
2157
+ qeth_put_cmd(iob);
2158
+}
2159
+
2160
+static int qeth_read_conf_data(struct qeth_card *card)
2161
+{
2162
+ struct qeth_channel *channel = &card->data;
2163
+ struct qeth_cmd_buffer *iob;
2164
+ struct ciw *ciw;
2165
+
2166
+ /* scan for RCD command in extended SenseID data */
2167
+ ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2168
+ if (!ciw || ciw->cmd == 0)
2169
+ return -EOPNOTSUPP;
2170
+ if (ciw->count < sizeof(struct qeth_node_desc))
2171
+ return -EINVAL;
2172
+
2173
+ iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2174
+ if (!iob)
2175
+ return -ENOMEM;
2176
+
2177
+ iob->callback = qeth_read_conf_data_cb;
2178
+ qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2179
+ iob->data);
2180
+
2181
+ return qeth_send_control_data(card, iob, NULL, NULL);
2182
+}
2183
+
2184
+static int qeth_idx_check_activate_response(struct qeth_card *card,
2185
+ struct qeth_channel *channel,
2186
+ struct qeth_cmd_buffer *iob)
2187
+{
2188
+ int rc;
2189
+
2190
+ rc = qeth_check_idx_response(card, iob->data);
2191
+ if (rc)
2192
+ return rc;
2193
+
2194
+ if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2195
+ return 0;
2196
+
2197
+ /* negative reply: */
2198
+ QETH_CARD_TEXT_(card, 2, "idxneg%c",
2199
+ QETH_IDX_ACT_CAUSE_CODE(iob->data));
2200
+
2201
+ switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2202
+ case QETH_IDX_ACT_ERR_EXCL:
2203
+ dev_err(&channel->ccwdev->dev,
2204
+ "The adapter is used exclusively by another host\n");
2205
+ return -EBUSY;
2206
+ case QETH_IDX_ACT_ERR_AUTH:
2207
+ case QETH_IDX_ACT_ERR_AUTH_USER:
2208
+ dev_err(&channel->ccwdev->dev,
2209
+ "Setting the device online failed because of insufficient authorization\n");
2210
+ return -EPERM;
2211
+ default:
2212
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2213
+ CCW_DEVID(channel->ccwdev));
2214
+ return -EIO;
2215
+ }
2216
+}
2217
+
2218
+static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2219
+ struct qeth_cmd_buffer *iob,
2220
+ unsigned int data_length)
2221
+{
2222
+ struct qeth_channel *channel = iob->channel;
2223
+ u16 peer_level;
2224
+ int rc;
2225
+
2226
+ QETH_CARD_TEXT(card, 2, "idxrdcb");
2227
+
2228
+ rc = qeth_idx_check_activate_response(card, channel, iob);
2229
+ if (rc)
2230
+ goto out;
2231
+
2232
+ memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2233
+ if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2234
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2235
+ CCW_DEVID(channel->ccwdev),
2236
+ card->info.func_level, peer_level);
2237
+ rc = -EINVAL;
2238
+ goto out;
2239
+ }
2240
+
2241
+ memcpy(&card->token.issuer_rm_r,
2242
+ QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2243
+ QETH_MPC_TOKEN_LENGTH);
2244
+ memcpy(&card->info.mcl_level[0],
2245
+ QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2246
+
2247
+out:
2248
+ qeth_notify_cmd(iob, rc);
2249
+ qeth_put_cmd(iob);
2250
+}
2251
+
2252
+static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2253
+ struct qeth_cmd_buffer *iob,
2254
+ unsigned int data_length)
2255
+{
2256
+ struct qeth_channel *channel = iob->channel;
2257
+ u16 peer_level;
2258
+ int rc;
2259
+
2260
+ QETH_CARD_TEXT(card, 2, "idxwrcb");
2261
+
2262
+ rc = qeth_idx_check_activate_response(card, channel, iob);
2263
+ if (rc)
2264
+ goto out;
2265
+
2266
+ memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2267
+ if ((peer_level & ~0x0100) !=
2268
+ qeth_peer_func_level(card->info.func_level)) {
2269
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2270
+ CCW_DEVID(channel->ccwdev),
2271
+ card->info.func_level, peer_level);
2272
+ rc = -EINVAL;
2273
+ }
2274
+
2275
+out:
2276
+ qeth_notify_cmd(iob, rc);
2277
+ qeth_put_cmd(iob);
2278
+}
2279
+
2280
+static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2281
+ struct qeth_cmd_buffer *iob)
2282
+{
2283
+ u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2284
+ u8 port = ((u8)card->dev->dev_port) | 0x80;
2285
+ struct ccw1 *ccw = __ccw_from_cmd(iob);
2286
+
2287
+ qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2288
+ iob->data);
2289
+ qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2290
+ iob->finalize = qeth_idx_finalize_cmd;
2291
+
2292
+ port |= QETH_IDX_ACT_INVAL_FRAME;
2293
+ memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2294
+ memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2295
+ &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2296
+ memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2297
+ &card->info.func_level, 2);
2298
+ memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2299
+ memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2300
+}
2301
+
2302
+static int qeth_idx_activate_read_channel(struct qeth_card *card)
2303
+{
2304
+ struct qeth_channel *channel = &card->read;
2305
+ struct qeth_cmd_buffer *iob;
2306
+ int rc;
2307
+
2308
+ QETH_CARD_TEXT(card, 2, "idxread");
2309
+
2310
+ iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2311
+ if (!iob)
2312
+ return -ENOMEM;
2313
+
2314
+ memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2315
+ qeth_idx_setup_activate_cmd(card, iob);
2316
+ iob->callback = qeth_idx_activate_read_channel_cb;
2317
+
2318
+ rc = qeth_send_control_data(card, iob, NULL, NULL);
2319
+ if (rc)
2320
+ return rc;
2321
+
2322
+ channel->state = CH_STATE_UP;
2323
+ return 0;
2324
+}
2325
+
2326
+static int qeth_idx_activate_write_channel(struct qeth_card *card)
2327
+{
2328
+ struct qeth_channel *channel = &card->write;
2329
+ struct qeth_cmd_buffer *iob;
2330
+ int rc;
2331
+
2332
+ QETH_CARD_TEXT(card, 2, "idxwrite");
2333
+
2334
+ iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2335
+ if (!iob)
2336
+ return -ENOMEM;
2337
+
2338
+ memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2339
+ qeth_idx_setup_activate_cmd(card, iob);
2340
+ iob->callback = qeth_idx_activate_write_channel_cb;
2341
+
2342
+ rc = qeth_send_control_data(card, iob, NULL, NULL);
2343
+ if (rc)
2344
+ return rc;
2345
+
2346
+ channel->state = CH_STATE_UP;
2347
+ return 0;
2348
+}
22042349
22052350 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
22062351 unsigned long data)
22072352 {
22082353 struct qeth_cmd_buffer *iob;
22092354
2210
- QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
2355
+ QETH_CARD_TEXT(card, 2, "cmenblcb");
22112356
22122357 iob = (struct qeth_cmd_buffer *) data;
22132358 memcpy(&card->token.cm_filter_r,
22142359 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
22152360 QETH_MPC_TOKEN_LENGTH);
2216
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
22172361 return 0;
22182362 }
22192363
22202364 static int qeth_cm_enable(struct qeth_card *card)
22212365 {
2222
- int rc;
22232366 struct qeth_cmd_buffer *iob;
22242367
2225
- QETH_DBF_TEXT(SETUP, 2, "cmenable");
2368
+ QETH_CARD_TEXT(card, 2, "cmenable");
22262369
2227
- iob = qeth_wait_for_buffer(&card->write);
2228
- memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
2370
+ iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2371
+ if (!iob)
2372
+ return -ENOMEM;
2373
+
22292374 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
22302375 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
22312376 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
22322377 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
22332378
2234
- rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
2235
- qeth_cm_enable_cb, NULL);
2236
- return rc;
2379
+ return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
22372380 }
22382381
22392382 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
22402383 unsigned long data)
22412384 {
2242
-
22432385 struct qeth_cmd_buffer *iob;
22442386
2245
- QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
2387
+ QETH_CARD_TEXT(card, 2, "cmsetpcb");
22462388
22472389 iob = (struct qeth_cmd_buffer *) data;
22482390 memcpy(&card->token.cm_connection_r,
22492391 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
22502392 QETH_MPC_TOKEN_LENGTH);
2251
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
22522393 return 0;
22532394 }
22542395
22552396 static int qeth_cm_setup(struct qeth_card *card)
22562397 {
2257
- int rc;
22582398 struct qeth_cmd_buffer *iob;
22592399
2260
- QETH_DBF_TEXT(SETUP, 2, "cmsetup");
2400
+ QETH_CARD_TEXT(card, 2, "cmsetup");
22612401
2262
- iob = qeth_wait_for_buffer(&card->write);
2263
- memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
2402
+ iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2403
+ if (!iob)
2404
+ return -ENOMEM;
2405
+
22642406 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
22652407 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
22662408 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
22672409 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
22682410 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
22692411 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2270
- rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
2271
- qeth_cm_setup_cb, NULL);
2272
- return rc;
2412
+ return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2413
+}
22732414
2415
+static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2416
+{
2417
+ if (link_type == QETH_LINK_TYPE_LANE_TR ||
2418
+ link_type == QETH_LINK_TYPE_HSTR) {
2419
+ dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2420
+ return false;
2421
+ }
2422
+
2423
+ return true;
22742424 }
22752425
22762426 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
....@@ -2294,12 +2444,12 @@
22942444 /* adjust RX buffer size to new max MTU: */
22952445 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
22962446 if (dev->max_mtu && dev->max_mtu != max_mtu)
2297
- qeth_free_qdio_buffers(card);
2447
+ qeth_free_qdio_queues(card);
22982448 } else {
22992449 if (dev->mtu)
23002450 new_mtu = dev->mtu;
23012451 /* default MTUs for first setup: */
2302
- else if (card->options.layer2)
2452
+ else if (IS_LAYER2(card))
23032453 new_mtu = ETH_DATA_LEN;
23042454 else
23052455 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
....@@ -2330,19 +2480,18 @@
23302480 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
23312481 unsigned long data)
23322482 {
2333
-
23342483 __u16 mtu, framesize;
23352484 __u16 len;
2336
- __u8 link_type;
23372485 struct qeth_cmd_buffer *iob;
2486
+ u8 link_type = 0;
23382487
2339
- QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
2488
+ QETH_CARD_TEXT(card, 2, "ulpenacb");
23402489
23412490 iob = (struct qeth_cmd_buffer *) data;
23422491 memcpy(&card->token.ulp_filter_r,
23432492 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
23442493 QETH_MPC_TOKEN_LENGTH);
2345
- if (card->info.type == QETH_CARD_TYPE_IQD) {
2494
+ if (IS_IQD(card)) {
23462495 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
23472496 mtu = qeth_get_mtu_outof_framesize(framesize);
23482497 } else {
....@@ -2354,11 +2503,12 @@
23542503 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
23552504 memcpy(&link_type,
23562505 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2357
- card->info.link_type = link_type;
2358
- } else
2359
- card->info.link_type = 0;
2360
- QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
2361
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
2506
+ if (!qeth_is_supported_link_type(card, link_type))
2507
+ return -EPROTONOSUPPORT;
2508
+ }
2509
+
2510
+ card->info.link_type = link_type;
2511
+ QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
23622512 return 0;
23632513 }
23642514
....@@ -2366,7 +2516,7 @@
23662516 {
23672517 if (IS_OSN(card))
23682518 return QETH_PROT_OSN2;
2369
- return (card->options.layer2 == 1) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2519
+ return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
23702520 }
23712521
23722522 static int qeth_ulp_enable(struct qeth_card *card)
....@@ -2376,11 +2526,11 @@
23762526 u16 max_mtu;
23772527 int rc;
23782528
2379
- /*FIXME: trace view callbacks*/
2380
- QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
2529
+ QETH_CARD_TEXT(card, 2, "ulpenabl");
23812530
2382
- iob = qeth_wait_for_buffer(&card->write);
2383
- memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2531
+ iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2532
+ if (!iob)
2533
+ return -ENOMEM;
23842534
23852535 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
23862536 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
....@@ -2388,8 +2538,7 @@
23882538 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
23892539 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
23902540 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2391
- rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2392
- qeth_ulp_enable_cb, &max_mtu);
2541
+ rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
23932542 if (rc)
23942543 return rc;
23952544 return qeth_update_max_mtu(card, max_mtu);
....@@ -2400,7 +2549,7 @@
24002549 {
24012550 struct qeth_cmd_buffer *iob;
24022551
2403
- QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
2552
+ QETH_CARD_TEXT(card, 2, "ulpstpcb");
24042553
24052554 iob = (struct qeth_cmd_buffer *) data;
24062555 memcpy(&card->token.ulp_connection_r,
....@@ -2408,26 +2557,24 @@
24082557 QETH_MPC_TOKEN_LENGTH);
24092558 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
24102559 3)) {
2411
- QETH_DBF_TEXT(SETUP, 2, "olmlimit");
2560
+ QETH_CARD_TEXT(card, 2, "olmlimit");
24122561 dev_err(&card->gdev->dev, "A connection could not be "
24132562 "established because of an OLM limit\n");
2414
- iob->rc = -EMLINK;
2563
+ return -EMLINK;
24152564 }
2416
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
24172565 return 0;
24182566 }
24192567
24202568 static int qeth_ulp_setup(struct qeth_card *card)
24212569 {
2422
- int rc;
24232570 __u16 temp;
24242571 struct qeth_cmd_buffer *iob;
2425
- struct ccw_dev_id dev_id;
24262572
2427
- QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
2573
+ QETH_CARD_TEXT(card, 2, "ulpsetup");
24282574
2429
- iob = qeth_wait_for_buffer(&card->write);
2430
- memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2575
+ iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2576
+ if (!iob)
2577
+ return -ENOMEM;
24312578
24322579 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
24332580 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
....@@ -2436,13 +2583,10 @@
24362583 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
24372584 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
24382585
2439
- ccw_device_get_id(CARD_DDEV(card), &dev_id);
2440
- memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2586
+ memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
24412587 temp = (card->info.cula << 8) + card->info.unit_addr2;
24422588 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2443
- rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2444
- qeth_ulp_setup_cb, NULL);
2445
- return rc;
2589
+ return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
24462590 }
24472591
24482592 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
....@@ -2457,7 +2601,6 @@
24572601 skb_queue_head_init(&newbuf->skb_list);
24582602 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
24592603 newbuf->q = q;
2460
- newbuf->next_pending = q->bufs[bidx];
24612604 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
24622605 q->bufs[bidx] = newbuf;
24632606 return 0;
....@@ -2468,36 +2611,57 @@
24682611 if (!q)
24692612 return;
24702613
2471
- qeth_clear_outq_buffers(q, 1);
2614
+ qeth_drain_output_queue(q, true);
24722615 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
24732616 kfree(q);
24742617 }
24752618
2476
-static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
2619
+static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
24772620 {
24782621 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2622
+ unsigned int i;
24792623
24802624 if (!q)
24812625 return NULL;
24822626
2483
- if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2484
- kfree(q);
2485
- return NULL;
2627
+ if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
2628
+ goto err_qdio_bufs;
2629
+
2630
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2631
+ if (qeth_init_qdio_out_buf(q, i))
2632
+ goto err_out_bufs;
24862633 }
2634
+
24872635 return q;
2636
+
2637
+err_out_bufs:
2638
+ while (i > 0)
2639
+ kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]);
2640
+ qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2641
+err_qdio_bufs:
2642
+ kfree(q);
2643
+ return NULL;
24882644 }
24892645
2490
-static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2646
+static void qeth_tx_completion_timer(struct timer_list *timer)
24912647 {
2492
- int i, j;
2648
+ struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
24932649
2494
- QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
2650
+ napi_schedule(&queue->napi);
2651
+ QETH_TXQ_STAT_INC(queue, completion_timer);
2652
+}
2653
+
2654
+static int qeth_alloc_qdio_queues(struct qeth_card *card)
2655
+{
2656
+ unsigned int i;
2657
+
2658
+ QETH_CARD_TEXT(card, 2, "allcqdbf");
24952659
24962660 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
24972661 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
24982662 return 0;
24992663
2500
- QETH_DBF_TEXT(SETUP, 2, "inq");
2664
+ QETH_CARD_TEXT(card, 2, "inq");
25012665 card->qdio.in_q = qeth_alloc_qdio_queue();
25022666 if (!card->qdio.in_q)
25032667 goto out_nomem;
....@@ -2507,25 +2671,23 @@
25072671 goto out_freeinq;
25082672
25092673 /* outbound */
2510
- card->qdio.out_qs =
2511
- kcalloc(card->qdio.no_out_queues,
2512
- sizeof(struct qeth_qdio_out_q *),
2513
- GFP_KERNEL);
2514
- if (!card->qdio.out_qs)
2515
- goto out_freepool;
25162674 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2517
- card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
2518
- if (!card->qdio.out_qs[i])
2675
+ struct qeth_qdio_out_q *queue;
2676
+
2677
+ queue = qeth_alloc_output_queue();
2678
+ if (!queue)
25192679 goto out_freeoutq;
2520
- QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
2521
- QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
2522
- card->qdio.out_qs[i]->queue_no = i;
2523
- /* give outbound qeth_qdio_buffers their qdio_buffers */
2524
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2525
- WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
2526
- if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
2527
- goto out_freeoutqbufs;
2528
- }
2680
+ QETH_CARD_TEXT_(card, 2, "outq %i", i);
2681
+ QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2682
+ card->qdio.out_qs[i] = queue;
2683
+ queue->card = card;
2684
+ queue->queue_no = i;
2685
+ INIT_LIST_HEAD(&queue->pending_bufs);
2686
+ spin_lock_init(&queue->lock);
2687
+ timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2688
+ queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2689
+ queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2690
+ queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
25292691 }
25302692
25312693 /* completion */
....@@ -2534,19 +2696,11 @@
25342696
25352697 return 0;
25362698
2537
-out_freeoutqbufs:
2538
- while (j > 0) {
2539
- --j;
2540
- kmem_cache_free(qeth_qdio_outbuf_cache,
2541
- card->qdio.out_qs[i]->bufs[j]);
2542
- card->qdio.out_qs[i]->bufs[j] = NULL;
2543
- }
25442699 out_freeoutq:
2545
- while (i > 0)
2700
+ while (i > 0) {
25462701 qeth_free_output_queue(card->qdio.out_qs[--i]);
2547
- kfree(card->qdio.out_qs);
2548
- card->qdio.out_qs = NULL;
2549
-out_freepool:
2702
+ card->qdio.out_qs[i] = NULL;
2703
+ }
25502704 qeth_free_buffer_pool(card);
25512705 out_freeinq:
25522706 qeth_free_qdio_queue(card->qdio.in_q);
....@@ -2556,7 +2710,7 @@
25562710 return -ENOMEM;
25572711 }
25582712
2559
-static void qeth_free_qdio_buffers(struct qeth_card *card)
2713
+static void qeth_free_qdio_queues(struct qeth_card *card)
25602714 {
25612715 int i, j;
25622716
....@@ -2565,7 +2719,6 @@
25652719 return;
25662720
25672721 qeth_free_cq(card);
2568
- cancel_delayed_work_sync(&card->buffer_reclaim_work);
25692722 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
25702723 if (card->qdio.in_q->bufs[j].rx_skb)
25712724 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
....@@ -2575,125 +2728,132 @@
25752728 /* inbound buffer pool */
25762729 qeth_free_buffer_pool(card);
25772730 /* free outbound qdio_qs */
2578
- if (card->qdio.out_qs) {
2579
- for (i = 0; i < card->qdio.no_out_queues; i++)
2580
- qeth_free_output_queue(card->qdio.out_qs[i]);
2581
- kfree(card->qdio.out_qs);
2582
- card->qdio.out_qs = NULL;
2731
+ for (i = 0; i < card->qdio.no_out_queues; i++) {
2732
+ qeth_free_output_queue(card->qdio.out_qs[i]);
2733
+ card->qdio.out_qs[i] = NULL;
25832734 }
25842735 }
25852736
2586
-static void qeth_create_qib_param_field(struct qeth_card *card,
2587
- char *param_field)
2737
+static void qeth_fill_qib_parms(struct qeth_card *card,
2738
+ struct qeth_qib_parms *parms)
25882739 {
2740
+ struct qeth_qdio_out_q *queue;
2741
+ unsigned int i;
25892742
2590
- param_field[0] = _ascebc['P'];
2591
- param_field[1] = _ascebc['C'];
2592
- param_field[2] = _ascebc['I'];
2593
- param_field[3] = _ascebc['T'];
2594
- *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2595
- *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2596
- *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2597
-}
2743
+ parms->pcit_magic[0] = 'P';
2744
+ parms->pcit_magic[1] = 'C';
2745
+ parms->pcit_magic[2] = 'I';
2746
+ parms->pcit_magic[3] = 'T';
2747
+ ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
2748
+ parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
2749
+ parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
2750
+ parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
25982751
2599
-static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2600
- char *param_field)
2601
-{
2602
- param_field[16] = _ascebc['B'];
2603
- param_field[17] = _ascebc['L'];
2604
- param_field[18] = _ascebc['K'];
2605
- param_field[19] = _ascebc['T'];
2606
- *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2607
- *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2608
- *((unsigned int *) (&param_field[28])) =
2609
- card->info.blkt.inter_packet_jumbo;
2752
+ parms->blkt_magic[0] = 'B';
2753
+ parms->blkt_magic[1] = 'L';
2754
+ parms->blkt_magic[2] = 'K';
2755
+ parms->blkt_magic[3] = 'T';
2756
+ ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
2757
+ parms->blkt_total = card->info.blkt.time_total;
2758
+ parms->blkt_inter_packet = card->info.blkt.inter_packet;
2759
+ parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2760
+
2761
+ /* Prio-queueing implicitly uses the default priorities: */
2762
+ if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
2763
+ return;
2764
+
2765
+ parms->pque_magic[0] = 'P';
2766
+ parms->pque_magic[1] = 'Q';
2767
+ parms->pque_magic[2] = 'U';
2768
+ parms->pque_magic[3] = 'E';
2769
+ ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
2770
+ parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
2771
+ parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
2772
+
2773
+ qeth_for_each_output_queue(card, queue, i)
2774
+ parms->pque_priority[i] = queue->priority;
26102775 }
26112776
26122777 static int qeth_qdio_activate(struct qeth_card *card)
26132778 {
2614
- QETH_DBF_TEXT(SETUP, 3, "qdioact");
2779
+ QETH_CARD_TEXT(card, 3, "qdioact");
26152780 return qdio_activate(CARD_DDEV(card));
26162781 }
26172782
26182783 static int qeth_dm_act(struct qeth_card *card)
26192784 {
2620
- int rc;
26212785 struct qeth_cmd_buffer *iob;
26222786
2623
- QETH_DBF_TEXT(SETUP, 2, "dmact");
2787
+ QETH_CARD_TEXT(card, 2, "dmact");
26242788
2625
- iob = qeth_wait_for_buffer(&card->write);
2626
- memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
2789
+ iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2790
+ if (!iob)
2791
+ return -ENOMEM;
26272792
26282793 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
26292794 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
26302795 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
26312796 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2632
- rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
2633
- return rc;
2797
+ return qeth_send_control_data(card, iob, NULL, NULL);
26342798 }
26352799
26362800 static int qeth_mpc_initialize(struct qeth_card *card)
26372801 {
26382802 int rc;
26392803
2640
- QETH_DBF_TEXT(SETUP, 2, "mpcinit");
2804
+ QETH_CARD_TEXT(card, 2, "mpcinit");
26412805
26422806 rc = qeth_issue_next_read(card);
26432807 if (rc) {
2644
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2808
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
26452809 return rc;
26462810 }
26472811 rc = qeth_cm_enable(card);
26482812 if (rc) {
2649
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2650
- goto out_qdio;
2813
+ QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2814
+ return rc;
26512815 }
26522816 rc = qeth_cm_setup(card);
26532817 if (rc) {
2654
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
2655
- goto out_qdio;
2818
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2819
+ return rc;
26562820 }
26572821 rc = qeth_ulp_enable(card);
26582822 if (rc) {
2659
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
2660
- goto out_qdio;
2823
+ QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2824
+ return rc;
26612825 }
26622826 rc = qeth_ulp_setup(card);
26632827 if (rc) {
2664
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2665
- goto out_qdio;
2828
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2829
+ return rc;
26662830 }
2667
- rc = qeth_alloc_qdio_buffers(card);
2831
+ rc = qeth_alloc_qdio_queues(card);
26682832 if (rc) {
2669
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2670
- goto out_qdio;
2833
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2834
+ return rc;
26712835 }
26722836 rc = qeth_qdio_establish(card);
26732837 if (rc) {
2674
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
2675
- qeth_free_qdio_buffers(card);
2676
- goto out_qdio;
2838
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2839
+ qeth_free_qdio_queues(card);
2840
+ return rc;
26772841 }
26782842 rc = qeth_qdio_activate(card);
26792843 if (rc) {
2680
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
2681
- goto out_qdio;
2844
+ QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2845
+ return rc;
26822846 }
26832847 rc = qeth_dm_act(card);
26842848 if (rc) {
2685
- QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
2686
- goto out_qdio;
2849
+ QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2850
+ return rc;
26872851 }
26882852
26892853 return 0;
2690
-out_qdio:
2691
- qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
2692
- qdio_free(CARD_DDEV(card));
2693
- return rc;
26942854 }
26952855
2696
-void qeth_print_status_message(struct qeth_card *card)
2856
+static void qeth_print_status_message(struct qeth_card *card)
26972857 {
26982858 switch (card->info.type) {
26992859 case QETH_CARD_TYPE_OSD:
....@@ -2709,10 +2869,9 @@
27092869 card->info.mcl_level[3]);
27102870 break;
27112871 }
2712
- /* fallthrough */
2872
+ fallthrough;
27132873 case QETH_CARD_TYPE_IQD:
2714
- if ((card->info.guestlan) ||
2715
- (card->info.mcl_level[0] & 0x80)) {
2874
+ if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
27162875 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
27172876 card->info.mcl_level[0]];
27182877 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
....@@ -2735,7 +2894,6 @@
27352894 (card->info.mcl_level[0]) ? ")" : "",
27362895 qeth_get_cardname_short(card));
27372896 }
2738
-EXPORT_SYMBOL_GPL(qeth_print_status_message);
27392897
27402898 static void qeth_initialize_working_pool_list(struct qeth_card *card)
27412899 {
....@@ -2752,19 +2910,16 @@
27522910 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
27532911 struct qeth_card *card)
27542912 {
2755
- struct list_head *plh;
27562913 struct qeth_buffer_pool_entry *entry;
27572914 int i, free;
2758
- struct page *page;
27592915
27602916 if (list_empty(&card->qdio.in_buf_pool.entry_list))
27612917 return NULL;
27622918
2763
- list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2764
- entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2919
+ list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
27652920 free = 1;
27662921 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2767
- if (page_count(virt_to_page(entry->elements[i])) > 1) {
2922
+ if (page_count(entry->elements[i]) > 1) {
27682923 free = 0;
27692924 break;
27702925 }
....@@ -2776,19 +2931,18 @@
27762931 }
27772932
27782933 /* no free buffer in pool so take first one and swap pages */
2779
- entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2780
- struct qeth_buffer_pool_entry, list);
2934
+ entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2935
+ struct qeth_buffer_pool_entry, list);
27812936 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2782
- if (page_count(virt_to_page(entry->elements[i])) > 1) {
2783
- page = alloc_page(GFP_ATOMIC);
2784
- if (!page) {
2937
+ if (page_count(entry->elements[i]) > 1) {
2938
+ struct page *page = dev_alloc_page();
2939
+
2940
+ if (!page)
27852941 return NULL;
2786
- } else {
2787
- free_page((unsigned long)entry->elements[i]);
2788
- entry->elements[i] = page_address(page);
2789
- if (card->options.performance_stats)
2790
- card->perf_stats.sg_alloc_page_rx++;
2791
- }
2942
+
2943
+ __free_page(entry->elements[i]);
2944
+ entry->elements[i] = page;
2945
+ QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
27922946 }
27932947 }
27942948 list_del_init(&entry->list);
....@@ -2798,19 +2952,24 @@
27982952 static int qeth_init_input_buffer(struct qeth_card *card,
27992953 struct qeth_qdio_buffer *buf)
28002954 {
2801
- struct qeth_buffer_pool_entry *pool_entry;
2955
+ struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
28022956 int i;
28032957
28042958 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
28052959 buf->rx_skb = netdev_alloc_skb(card->dev,
2806
- QETH_RX_PULL_LEN + ETH_HLEN);
2960
+ ETH_HLEN +
2961
+ sizeof(struct ipv6hdr));
28072962 if (!buf->rx_skb)
28082963 return -ENOMEM;
28092964 }
28102965
2811
- pool_entry = qeth_find_free_buffer_pool_entry(card);
2812
- if (!pool_entry)
2813
- return -ENOBUFS;
2966
+ if (!pool_entry) {
2967
+ pool_entry = qeth_find_free_buffer_pool_entry(card);
2968
+ if (!pool_entry)
2969
+ return -ENOBUFS;
2970
+
2971
+ buf->pool_entry = pool_entry;
2972
+ }
28142973
28152974 /*
28162975 * since the buffer is accessed only from the input_tasklet
....@@ -2818,11 +2977,10 @@
28182977 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
28192978 * buffers
28202979 */
2821
-
2822
- buf->pool_entry = pool_entry;
28232980 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
28242981 buf->buffer->element[i].length = PAGE_SIZE;
2825
- buf->buffer->element[i].addr = pool_entry->elements[i];
2982
+ buf->buffer->element[i].addr =
2983
+ page_to_phys(pool_entry->elements[i]);
28262984 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
28272985 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
28282986 else
....@@ -2832,12 +2990,25 @@
28322990 return 0;
28332991 }
28342992
2835
-int qeth_init_qdio_queues(struct qeth_card *card)
2993
+static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2994
+ struct qeth_qdio_out_q *queue)
28362995 {
2837
- int i, j;
2996
+ if (!IS_IQD(card) ||
2997
+ qeth_iqd_is_mcast_queue(card, queue) ||
2998
+ card->options.cq == QETH_CQ_ENABLED ||
2999
+ qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
3000
+ return 1;
3001
+
3002
+ return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
3003
+}
3004
+
3005
+static int qeth_init_qdio_queues(struct qeth_card *card)
3006
+{
3007
+ unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
3008
+ unsigned int i;
28383009 int rc;
28393010
2840
- QETH_DBF_TEXT(SETUP, 2, "initqdqs");
3011
+ QETH_CARD_TEXT(card, 2, "initqdqs");
28413012
28423013 /* inbound queue */
28433014 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
....@@ -2845,18 +3016,16 @@
28453016
28463017 qeth_initialize_working_pool_list(card);
28473018 /*give only as many buffers to hardware as we have buffer pool entries*/
2848
- for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
3019
+ for (i = 0; i < rx_bufs; i++) {
28493020 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
28503021 if (rc)
28513022 return rc;
28523023 }
28533024
2854
- card->qdio.in_q->next_buf_to_init =
2855
- card->qdio.in_buf_pool.buf_count - 1;
2856
- rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2857
- card->qdio.in_buf_pool.buf_count - 1);
3025
+ card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
3026
+ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
28583027 if (rc) {
2859
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3028
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
28603029 return rc;
28613030 }
28623031
....@@ -2868,83 +3037,101 @@
28683037
28693038 /* outbound queue */
28703039 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2871
- qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
2872
- QDIO_MAX_BUFFERS_PER_Q);
2873
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2874
- qeth_clear_output_buffer(card->qdio.out_qs[i],
2875
- card->qdio.out_qs[i]->bufs[j]);
2876
- }
2877
- card->qdio.out_qs[i]->card = card;
2878
- card->qdio.out_qs[i]->next_buf_to_fill = 0;
2879
- card->qdio.out_qs[i]->do_pack = 0;
2880
- atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
2881
- atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2882
- atomic_set(&card->qdio.out_qs[i]->state,
2883
- QETH_OUT_Q_UNLOCKED);
3040
+ struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
3041
+
3042
+ qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3043
+ queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3044
+ queue->next_buf_to_fill = 0;
3045
+ queue->do_pack = 0;
3046
+ queue->prev_hdr = NULL;
3047
+ queue->coalesced_frames = 0;
3048
+ queue->bulk_start = 0;
3049
+ queue->bulk_count = 0;
3050
+ queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
3051
+ atomic_set(&queue->used_buffers, 0);
3052
+ atomic_set(&queue->set_pci_flags_count, 0);
3053
+ netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
28843054 }
28853055 return 0;
28863056 }
2887
-EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
28883057
2889
-static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
3058
+static void qeth_ipa_finalize_cmd(struct qeth_card *card,
3059
+ struct qeth_cmd_buffer *iob)
28903060 {
2891
- switch (link_type) {
2892
- case QETH_LINK_TYPE_HSTR:
2893
- return 2;
2894
- default:
2895
- return 1;
2896
- }
3061
+ qeth_mpc_finalize_cmd(card, iob);
3062
+
3063
+ /* override with IPA-specific values: */
3064
+ __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
28973065 }
28983066
2899
-static void qeth_fill_ipacmd_header(struct qeth_card *card,
2900
- struct qeth_ipa_cmd *cmd, __u8 command,
2901
- enum qeth_prot_versions prot)
2902
-{
2903
- memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2904
- cmd->hdr.command = command;
2905
- cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2906
- /* cmd->hdr.seqno is set by qeth_send_control_data() */
2907
- cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2908
- cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
2909
- if (card->options.layer2)
2910
- cmd->hdr.prim_version_no = 2;
2911
- else
2912
- cmd->hdr.prim_version_no = 1;
2913
- cmd->hdr.param_count = 1;
2914
- cmd->hdr.prot_version = prot;
2915
- cmd->hdr.ipa_supported = 0;
2916
- cmd->hdr.ipa_enabled = 0;
2917
-}
2918
-
2919
-struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2920
- enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2921
-{
2922
- struct qeth_cmd_buffer *iob;
2923
-
2924
- iob = qeth_get_buffer(&card->write);
2925
- if (iob) {
2926
- qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
2927
- } else {
2928
- dev_warn(&card->gdev->dev,
2929
- "The qeth driver ran out of channel command buffers\n");
2930
- QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
2931
- dev_name(&card->gdev->dev));
2932
- }
2933
-
2934
- return iob;
2935
-}
2936
-EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
2937
-
2938
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
3067
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3068
+ u16 cmd_length,
3069
+ bool (*match)(struct qeth_cmd_buffer *iob,
3070
+ struct qeth_cmd_buffer *reply))
29393071 {
29403072 u8 prot_type = qeth_mpc_select_prot_type(card);
3073
+ u16 total_length = iob->length;
3074
+
3075
+ qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
3076
+ iob->data);
3077
+ iob->finalize = qeth_ipa_finalize_cmd;
3078
+ iob->match = match;
29413079
29423080 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3081
+ memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
29433082 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3083
+ memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
3084
+ memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
29443085 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
29453086 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3087
+ memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
29463088 }
29473089 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
3090
+
3091
+static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3092
+ struct qeth_cmd_buffer *reply)
3093
+{
3094
+ struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3095
+
3096
+ return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3097
+}
3098
+
3099
+struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3100
+ enum qeth_ipa_cmds cmd_code,
3101
+ enum qeth_prot_versions prot,
3102
+ unsigned int data_length)
3103
+{
3104
+ struct qeth_cmd_buffer *iob;
3105
+ struct qeth_ipacmd_hdr *hdr;
3106
+
3107
+ data_length += offsetof(struct qeth_ipa_cmd, data);
3108
+ iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3109
+ QETH_IPA_TIMEOUT);
3110
+ if (!iob)
3111
+ return NULL;
3112
+
3113
+ qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
3114
+
3115
+ hdr = &__ipa_cmd(iob)->hdr;
3116
+ hdr->command = cmd_code;
3117
+ hdr->initiator = IPA_CMD_INITIATOR_HOST;
3118
+ /* hdr->seqno is set by qeth_send_control_data() */
3119
+ hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3120
+ hdr->rel_adapter_no = (u8) card->dev->dev_port;
3121
+ hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3122
+ hdr->param_count = 1;
3123
+ hdr->prot_version = prot;
3124
+ return iob;
3125
+}
3126
+EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3127
+
3128
+static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3129
+ struct qeth_reply *reply, unsigned long data)
3130
+{
3131
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3132
+
3133
+ return (cmd->hdr.return_code) ? -EIO : 0;
3134
+}
29483135
29493136 /**
29503137 * qeth_send_ipa_cmd() - send an IPA command
....@@ -2960,9 +3147,15 @@
29603147 int rc;
29613148
29623149 QETH_CARD_TEXT(card, 4, "sendipa");
2963
- qeth_prepare_ipa_cmd(card, iob);
2964
- rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
2965
- iob, reply_cb, reply_param);
3150
+
3151
+ if (card->read_or_write_problem) {
3152
+ qeth_put_cmd(iob);
3153
+ return -EIO;
3154
+ }
3155
+
3156
+ if (reply_cb == NULL)
3157
+ reply_cb = qeth_send_ipa_cmd_cb;
3158
+ rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
29663159 if (rc == -ETIME) {
29673160 qeth_clear_ipacmd_list(card);
29683161 qeth_schedule_recovery(card);
....@@ -2971,18 +3164,27 @@
29713164 }
29723165 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
29733166
3167
+static int qeth_send_startlan_cb(struct qeth_card *card,
3168
+ struct qeth_reply *reply, unsigned long data)
3169
+{
3170
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3171
+
3172
+ if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3173
+ return -ENETDOWN;
3174
+
3175
+ return (cmd->hdr.return_code) ? -EIO : 0;
3176
+}
3177
+
29743178 static int qeth_send_startlan(struct qeth_card *card)
29753179 {
2976
- int rc;
29773180 struct qeth_cmd_buffer *iob;
29783181
2979
- QETH_DBF_TEXT(SETUP, 2, "strtlan");
3182
+ QETH_CARD_TEXT(card, 2, "strtlan");
29803183
2981
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
3184
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
29823185 if (!iob)
29833186 return -ENOMEM;
2984
- rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2985
- return rc;
3187
+ return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
29863188 }
29873189
29883190 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
....@@ -2997,37 +3199,44 @@
29973199 struct qeth_reply *reply, unsigned long data)
29983200 {
29993201 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3202
+ struct qeth_query_cmds_supp *query_cmd;
30003203
30013204 QETH_CARD_TEXT(card, 3, "quyadpcb");
30023205 if (qeth_setadpparms_inspect_rc(cmd))
3003
- return 0;
3206
+ return -EIO;
30043207
3005
- if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
3006
- card->info.link_type =
3007
- cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
3008
- QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
3208
+ query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3209
+ if (query_cmd->lan_type & 0x7f) {
3210
+ if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3211
+ return -EPROTONOSUPPORT;
3212
+
3213
+ card->info.link_type = query_cmd->lan_type;
3214
+ QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
30093215 }
3010
- card->options.adp.supported_funcs =
3011
- cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
3216
+
3217
+ card->options.adp.supported = query_cmd->supported_cmds;
30123218 return 0;
30133219 }
30143220
30153221 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3016
- __u32 command, __u32 cmdlen)
3222
+ enum qeth_ipa_setadp_cmd adp_cmd,
3223
+ unsigned int data_length)
30173224 {
3225
+ struct qeth_ipacmd_setadpparms_hdr *hdr;
30183226 struct qeth_cmd_buffer *iob;
3019
- struct qeth_ipa_cmd *cmd;
30203227
3021
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
3022
- QETH_PROT_IPV4);
3023
- if (iob) {
3024
- cmd = __ipa_cmd(iob);
3025
- cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
3026
- cmd->data.setadapterparms.hdr.command_code = command;
3027
- cmd->data.setadapterparms.hdr.used_total = 1;
3028
- cmd->data.setadapterparms.hdr.seq_no = 1;
3029
- }
3228
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3229
+ data_length +
3230
+ offsetof(struct qeth_ipacmd_setadpparms,
3231
+ data));
3232
+ if (!iob)
3233
+ return NULL;
30303234
3235
+ hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3236
+ hdr->cmdlength = sizeof(*hdr) + data_length;
3237
+ hdr->command_code = adp_cmd;
3238
+ hdr->used_total = 1;
3239
+ hdr->seq_no = 1;
30313240 return iob;
30323241 }
30333242
....@@ -3038,7 +3247,7 @@
30383247
30393248 QETH_CARD_TEXT(card, 3, "queryadp");
30403249 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3041
- sizeof(struct qeth_ipacmd_setadpparms));
3250
+ SETADP_DATA_SIZEOF(query_cmds_supp));
30423251 if (!iob)
30433252 return -ENOMEM;
30443253 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
....@@ -3050,36 +3259,32 @@
30503259 {
30513260 struct qeth_ipa_cmd *cmd;
30523261
3053
- QETH_DBF_TEXT(SETUP, 2, "qipasscb");
3262
+ QETH_CARD_TEXT(card, 2, "qipasscb");
30543263
30553264 cmd = (struct qeth_ipa_cmd *) data;
30563265
30573266 switch (cmd->hdr.return_code) {
3267
+ case IPA_RC_SUCCESS:
3268
+ break;
30583269 case IPA_RC_NOTSUPP:
30593270 case IPA_RC_L2_UNSUPPORTED_CMD:
3060
- QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
3061
- card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
3062
- card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
3063
- return -0;
3271
+ QETH_CARD_TEXT(card, 2, "ipaunsup");
3272
+ card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3273
+ card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3274
+ return -EOPNOTSUPP;
30643275 default:
3065
- if (cmd->hdr.return_code) {
3066
- QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
3067
- "rc=%d\n",
3068
- dev_name(&card->gdev->dev),
3069
- cmd->hdr.return_code);
3070
- return 0;
3071
- }
3276
+ QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3277
+ CARD_DEVID(card), cmd->hdr.return_code);
3278
+ return -EIO;
30723279 }
30733280
3074
- if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
3075
- card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
3076
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
3077
- } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
3078
- card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
3079
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
3080
- } else
3081
- QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
3082
- "\n", dev_name(&card->gdev->dev));
3281
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3282
+ card->options.ipa4 = cmd->hdr.assists;
3283
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3284
+ card->options.ipa6 = cmd->hdr.assists;
3285
+ else
3286
+ QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3287
+ CARD_DEVID(card));
30833288 return 0;
30843289 }
30853290
....@@ -3089,8 +3294,8 @@
30893294 int rc;
30903295 struct qeth_cmd_buffer *iob;
30913296
3092
- QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
3093
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
3297
+ QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3298
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
30943299 if (!iob)
30953300 return -ENOMEM;
30963301 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
....@@ -3106,7 +3311,7 @@
31063311
31073312 QETH_CARD_TEXT(card, 2, "qswiatcb");
31083313 if (qeth_setadpparms_inspect_rc(cmd))
3109
- return 0;
3314
+ return -EIO;
31103315
31113316 sw_info = (struct qeth_switch_info *)reply->param;
31123317 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
....@@ -3127,41 +3332,55 @@
31273332 return -EOPNOTSUPP;
31283333 if (!netif_carrier_ok(card->dev))
31293334 return -ENOMEDIUM;
3130
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
3131
- sizeof(struct qeth_ipacmd_setadpparms_hdr));
3335
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
31323336 if (!iob)
31333337 return -ENOMEM;
31343338 return qeth_send_ipa_cmd(card, iob,
31353339 qeth_query_switch_attributes_cb, sw_info);
31363340 }
31373341
3342
+struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3343
+ enum qeth_diags_cmds sub_cmd,
3344
+ unsigned int data_length)
3345
+{
3346
+ struct qeth_ipacmd_diagass *cmd;
3347
+ struct qeth_cmd_buffer *iob;
3348
+
3349
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3350
+ DIAG_HDR_LEN + data_length);
3351
+ if (!iob)
3352
+ return NULL;
3353
+
3354
+ cmd = &__ipa_cmd(iob)->data.diagass;
3355
+ cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3356
+ cmd->subcmd = sub_cmd;
3357
+ return iob;
3358
+}
3359
+EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3360
+
31383361 static int qeth_query_setdiagass_cb(struct qeth_card *card,
31393362 struct qeth_reply *reply, unsigned long data)
31403363 {
3141
- struct qeth_ipa_cmd *cmd;
3142
- __u16 rc;
3364
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3365
+ u16 rc = cmd->hdr.return_code;
31433366
3144
- cmd = (struct qeth_ipa_cmd *)data;
3145
- rc = cmd->hdr.return_code;
3146
- if (rc)
3367
+ if (rc) {
31473368 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3148
- else
3149
- card->info.diagass_support = cmd->data.diagass.ext;
3369
+ return -EIO;
3370
+ }
3371
+
3372
+ card->info.diagass_support = cmd->data.diagass.ext;
31503373 return 0;
31513374 }
31523375
31533376 static int qeth_query_setdiagass(struct qeth_card *card)
31543377 {
31553378 struct qeth_cmd_buffer *iob;
3156
- struct qeth_ipa_cmd *cmd;
31573379
3158
- QETH_DBF_TEXT(SETUP, 2, "qdiagass");
3159
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3380
+ QETH_CARD_TEXT(card, 2, "qdiagass");
3381
+ iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
31603382 if (!iob)
31613383 return -ENOMEM;
3162
- cmd = __ipa_cmd(iob);
3163
- cmd->data.diagass.subcmd_len = 16;
3164
- cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
31653384 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
31663385 }
31673386
....@@ -3187,19 +3406,18 @@
31873406 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
31883407 }
31893408 free_page(info);
3190
- return;
31913409 }
31923410
31933411 static int qeth_hw_trap_cb(struct qeth_card *card,
31943412 struct qeth_reply *reply, unsigned long data)
31953413 {
3196
- struct qeth_ipa_cmd *cmd;
3197
- __u16 rc;
3414
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3415
+ u16 rc = cmd->hdr.return_code;
31983416
3199
- cmd = (struct qeth_ipa_cmd *)data;
3200
- rc = cmd->hdr.return_code;
3201
- if (rc)
3417
+ if (rc) {
32023418 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3419
+ return -EIO;
3420
+ }
32033421 return 0;
32043422 }
32053423
....@@ -3208,13 +3426,11 @@
32083426 struct qeth_cmd_buffer *iob;
32093427 struct qeth_ipa_cmd *cmd;
32103428
3211
- QETH_DBF_TEXT(SETUP, 2, "diagtrap");
3212
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3429
+ QETH_CARD_TEXT(card, 2, "diagtrap");
3430
+ iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
32133431 if (!iob)
32143432 return -ENOMEM;
32153433 cmd = __ipa_cmd(iob);
3216
- cmd->data.diagass.subcmd_len = 80;
3217
- cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
32183434 cmd->data.diagass.type = 1;
32193435 cmd->data.diagass.action = action;
32203436 switch (action) {
....@@ -3233,7 +3449,6 @@
32333449 }
32343450 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
32353451 }
3236
-EXPORT_SYMBOL_GPL(qeth_hw_trap);
32373452
32383453 static int qeth_check_qdio_errors(struct qeth_card *card,
32393454 struct qdio_buffer *buf,
....@@ -3248,7 +3463,7 @@
32483463 buf->element[14].sflags);
32493464 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
32503465 if ((buf->element[15].sflags) == 0x12) {
3251
- card->stats.rx_dropped++;
3466
+ QETH_CARD_STAT_INC(card, rx_fifo_errors);
32523467 return 0;
32533468 } else
32543469 return 1;
....@@ -3256,26 +3471,21 @@
32563471 return 0;
32573472 }
32583473
3259
-static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3474
+static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3475
+ unsigned int count)
32603476 {
32613477 struct qeth_qdio_q *queue = card->qdio.in_q;
32623478 struct list_head *lh;
3263
- int count;
32643479 int i;
32653480 int rc;
32663481 int newcount = 0;
32673482
3268
- count = (index < queue->next_buf_to_init)?
3269
- card->qdio.in_buf_pool.buf_count -
3270
- (queue->next_buf_to_init - index) :
3271
- card->qdio.in_buf_pool.buf_count -
3272
- (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
32733483 /* only requeue at a certain threshold to avoid SIGAs */
32743484 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
32753485 for (i = queue->next_buf_to_init;
32763486 i < queue->next_buf_to_init + count; ++i) {
32773487 if (qeth_init_input_buffer(card,
3278
- &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
3488
+ &queue->bufs[QDIO_BUFNR(i)])) {
32793489 break;
32803490 } else {
32813491 newcount++;
....@@ -3297,47 +3507,36 @@
32973507 i++;
32983508 if (i == card->qdio.in_buf_pool.buf_count) {
32993509 QETH_CARD_TEXT(card, 2, "qsarbw");
3300
- card->reclaim_index = index;
33013510 schedule_delayed_work(
33023511 &card->buffer_reclaim_work,
33033512 QETH_RECLAIM_WORK_TIME);
33043513 }
3305
- return;
3514
+ return 0;
33063515 }
33073516
3308
- /*
3309
- * according to old code it should be avoided to requeue all
3310
- * 128 buffers in order to benefit from PCI avoidance.
3311
- * this function keeps at least one buffer (the buffer at
3312
- * 'index') un-requeued -> this buffer is the first buffer that
3313
- * will be requeued the next time
3314
- */
3315
- if (card->options.performance_stats) {
3316
- card->perf_stats.inbound_do_qdio_cnt++;
3317
- card->perf_stats.inbound_do_qdio_start_time =
3318
- qeth_get_micros();
3319
- }
33203517 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
33213518 queue->next_buf_to_init, count);
3322
- if (card->options.performance_stats)
3323
- card->perf_stats.inbound_do_qdio_time +=
3324
- qeth_get_micros() -
3325
- card->perf_stats.inbound_do_qdio_start_time;
33263519 if (rc) {
33273520 QETH_CARD_TEXT(card, 2, "qinberr");
33283521 }
3329
- queue->next_buf_to_init = (queue->next_buf_to_init + count) %
3330
- QDIO_MAX_BUFFERS_PER_Q;
3522
+ queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3523
+ count);
3524
+ return count;
33313525 }
3526
+
3527
+ return 0;
33323528 }
33333529
33343530 static void qeth_buffer_reclaim_work(struct work_struct *work)
33353531 {
3336
- struct qeth_card *card = container_of(work, struct qeth_card,
3337
- buffer_reclaim_work.work);
3532
+ struct qeth_card *card = container_of(to_delayed_work(work),
3533
+ struct qeth_card,
3534
+ buffer_reclaim_work);
33383535
3339
- QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3340
- qeth_queue_input_buffer(card, card->reclaim_index);
3536
+ local_bh_disable();
3537
+ napi_schedule(&card->napi);
3538
+ /* kick-start the NAPI softirq: */
3539
+ local_bh_enable();
33413540 }
33423541
33433542 static void qeth_handle_send_error(struct qeth_card *card,
....@@ -3346,13 +3545,6 @@
33463545 int sbalf15 = buffer->buffer->element[15].sflags;
33473546
33483547 QETH_CARD_TEXT(card, 6, "hdsnderr");
3349
- if (card->info.type == QETH_CARD_TYPE_IQD) {
3350
- if (sbalf15 == 0) {
3351
- qdio_err = 0;
3352
- } else {
3353
- qdio_err = 1;
3354
- }
3355
- }
33563548 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
33573549
33583550 if (!qdio_err)
....@@ -3382,7 +3574,7 @@
33823574 /* it's a packing buffer */
33833575 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
33843576 queue->next_buf_to_fill =
3385
- (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
3577
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
33863578 return 1;
33873579 }
33883580 return 0;
....@@ -3399,8 +3591,7 @@
33993591 >= QETH_HIGH_WATERMARK_PACK){
34003592 /* switch non-PACKING -> PACKING */
34013593 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3402
- if (queue->card->options.performance_stats)
3403
- queue->card->perf_stats.sc_dp_p++;
3594
+ QETH_TXQ_STAT_INC(queue, packing_mode_switch);
34043595 queue->do_pack = 1;
34053596 }
34063597 }
....@@ -3419,8 +3610,7 @@
34193610 <= QETH_LOW_WATERMARK_PACK) {
34203611 /* switch PACKING -> non-PACKING */
34213612 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3422
- if (queue->card->options.performance_stats)
3423
- queue->card->perf_stats.sc_p_dp++;
3613
+ QETH_TXQ_STAT_INC(queue, packing_mode_switch);
34243614 queue->do_pack = 0;
34253615 return qeth_prep_flush_pack_buffer(queue);
34263616 }
....@@ -3431,23 +3621,31 @@
34313621 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
34323622 int count)
34333623 {
3434
- struct qeth_qdio_out_buffer *buf;
3624
+ struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3625
+ unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3626
+ struct qeth_card *card = queue->card;
34353627 int rc;
34363628 int i;
3437
- unsigned int qdio_flags;
34383629
34393630 for (i = index; i < index + count; ++i) {
3440
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3631
+ unsigned int bidx = QDIO_BUFNR(i);
3632
+ struct sk_buff *skb;
3633
+
34413634 buf = queue->bufs[bidx];
34423635 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
34433636 SBAL_EFLAGS_LAST_ENTRY;
3637
+ queue->coalesced_frames += buf->frames;
34443638
34453639 if (queue->bufstates)
34463640 queue->bufstates[bidx].user = buf;
34473641
3448
- if (queue->card->info.type == QETH_CARD_TYPE_IQD)
3449
- continue;
3642
+ if (IS_IQD(card)) {
3643
+ skb_queue_walk(&buf->skb_list, skb)
3644
+ skb_tx_timestamp(skb);
3645
+ }
3646
+ }
34503647
3648
+ if (!IS_IQD(card)) {
34513649 if (!queue->do_pack) {
34523650 if ((atomic_read(&queue->used_buffers) >=
34533651 (QETH_HIGH_WATERMARK_PACK -
....@@ -3472,27 +3670,30 @@
34723670 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
34733671 }
34743672 }
3673
+
3674
+ if (atomic_read(&queue->set_pci_flags_count))
3675
+ qdio_flags |= QDIO_FLAG_PCI_OUT;
34753676 }
34763677
3477
- netif_trans_update(queue->card->dev);
3478
- if (queue->card->options.performance_stats) {
3479
- queue->card->perf_stats.outbound_do_qdio_cnt++;
3480
- queue->card->perf_stats.outbound_do_qdio_start_time =
3481
- qeth_get_micros();
3482
- }
3483
- qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3484
- if (atomic_read(&queue->set_pci_flags_count))
3485
- qdio_flags |= QDIO_FLAG_PCI_OUT;
3486
- atomic_add(count, &queue->used_buffers);
3487
-
3678
+ QETH_TXQ_STAT_INC(queue, doorbell);
34883679 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
34893680 queue->queue_no, index, count);
3490
- if (queue->card->options.performance_stats)
3491
- queue->card->perf_stats.outbound_do_qdio_time +=
3492
- qeth_get_micros() -
3493
- queue->card->perf_stats.outbound_do_qdio_start_time;
3681
+
3682
+ /* Fake the TX completion interrupt: */
3683
+ if (IS_IQD(card)) {
3684
+ unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
3685
+ unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
3686
+
3687
+ if (frames && queue->coalesced_frames >= frames) {
3688
+ napi_schedule(&queue->napi);
3689
+ queue->coalesced_frames = 0;
3690
+ QETH_TXQ_STAT_INC(queue, coal_frames);
3691
+ } else if (usecs) {
3692
+ qeth_tx_arm_timer(queue, usecs);
3693
+ }
3694
+ }
3695
+
34943696 if (rc) {
3495
- queue->card->stats.tx_errors += count;
34963697 /* ignore temporary SIGA errors without busy condition */
34973698 if (rc == -ENOBUFS)
34983699 return;
....@@ -3507,56 +3708,52 @@
35073708 qeth_schedule_recovery(queue->card);
35083709 return;
35093710 }
3510
- if (queue->card->options.performance_stats)
3511
- queue->card->perf_stats.bufs_sent += count;
3711
+}
3712
+
3713
+static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3714
+{
3715
+ qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3716
+
3717
+ queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3718
+ queue->prev_hdr = NULL;
3719
+ queue->bulk_count = 0;
35123720 }
35133721
35143722 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
35153723 {
3516
- int index;
3517
- int flush_cnt = 0;
3518
- int q_was_packing = 0;
3519
-
35203724 /*
35213725 * check if weed have to switch to non-packing mode or if
35223726 * we have to get a pci flag out on the queue
35233727 */
35243728 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
35253729 !atomic_read(&queue->set_pci_flags_count)) {
3526
- if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3527
- QETH_OUT_Q_UNLOCKED) {
3528
- /*
3529
- * If we get in here, there was no action in
3530
- * do_send_packet. So, we check if there is a
3531
- * packing buffer to be flushed here.
3532
- */
3533
- netif_stop_queue(queue->card->dev);
3534
- index = queue->next_buf_to_fill;
3535
- q_was_packing = queue->do_pack;
3536
- /* queue->do_pack may change */
3537
- barrier();
3538
- flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3539
- if (!flush_cnt &&
3540
- !atomic_read(&queue->set_pci_flags_count))
3541
- flush_cnt += qeth_prep_flush_pack_buffer(queue);
3542
- if (queue->card->options.performance_stats &&
3543
- q_was_packing)
3544
- queue->card->perf_stats.bufs_sent_pack +=
3545
- flush_cnt;
3546
- if (flush_cnt)
3547
- qeth_flush_buffers(queue, index, flush_cnt);
3548
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3730
+ unsigned int index, flush_cnt;
3731
+ bool q_was_packing;
3732
+
3733
+ spin_lock(&queue->lock);
3734
+
3735
+ index = queue->next_buf_to_fill;
3736
+ q_was_packing = queue->do_pack;
3737
+
3738
+ flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3739
+ if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3740
+ flush_cnt = qeth_prep_flush_pack_buffer(queue);
3741
+
3742
+ if (flush_cnt) {
3743
+ qeth_flush_buffers(queue, index, flush_cnt);
3744
+ if (q_was_packing)
3745
+ QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
35493746 }
3747
+
3748
+ spin_unlock(&queue->lock);
35503749 }
35513750 }
35523751
3553
-static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3554
- unsigned long card_ptr)
3752
+static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
35553753 {
35563754 struct qeth_card *card = (struct qeth_card *)card_ptr;
35573755
3558
- if (card->dev->flags & IFF_UP)
3559
- napi_schedule(&card->napi);
3756
+ napi_schedule_irqoff(&card->napi);
35603757 }
35613758
35623759 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
....@@ -3572,13 +3769,7 @@
35723769 goto out;
35733770 }
35743771
3575
- if (card->state != CARD_STATE_DOWN &&
3576
- card->state != CARD_STATE_RECOVER) {
3577
- rc = -1;
3578
- goto out;
3579
- }
3580
-
3581
- qeth_free_qdio_buffers(card);
3772
+ qeth_free_qdio_queues(card);
35823773 card->options.cq = cq;
35833774 rc = 0;
35843775 }
....@@ -3596,34 +3787,24 @@
35963787 int i;
35973788 int rc;
35983789
3599
- if (!qeth_is_cq(card, queue))
3600
- goto out;
3601
-
36023790 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
36033791 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
36043792 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
36053793
36063794 if (qdio_err) {
3607
- netif_stop_queue(card->dev);
3795
+ netif_tx_stop_all_queues(card->dev);
36083796 qeth_schedule_recovery(card);
3609
- goto out;
3610
- }
3611
-
3612
- if (card->options.performance_stats) {
3613
- card->perf_stats.cq_cnt++;
3614
- card->perf_stats.cq_start_time = qeth_get_micros();
3797
+ return;
36153798 }
36163799
36173800 for (i = first_element; i < first_element + count; ++i) {
3618
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3619
- struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
3801
+ struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
36203802 int e = 0;
36213803
36223804 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
36233805 buffer->element[e].addr) {
3624
- unsigned long phys_aob_addr;
3806
+ unsigned long phys_aob_addr = buffer->element[e].addr;
36253807
3626
- phys_aob_addr = (unsigned long) buffer->element[e].addr;
36273808 qeth_qdio_handle_aob(card, phys_aob_addr);
36283809 ++e;
36293810 }
....@@ -3637,18 +3818,8 @@
36373818 "QDIO reported an error, rc=%i\n", rc);
36383819 QETH_CARD_TEXT(card, 2, "qcqherr");
36393820 }
3640
- card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
3641
- + count) % QDIO_MAX_BUFFERS_PER_Q;
36423821
3643
- netif_wake_queue(card->dev);
3644
-
3645
- if (card->options.performance_stats) {
3646
- int delta_t = qeth_get_micros();
3647
- delta_t -= card->perf_stats.cq_start_time;
3648
- card->perf_stats.cq_time += delta_t;
3649
- }
3650
-out:
3651
- return;
3822
+ cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
36523823 }
36533824
36543825 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
....@@ -3661,9 +3832,7 @@
36613832 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
36623833 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
36633834
3664
- if (qeth_is_cq(card, queue))
3665
- qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3666
- else if (qdio_err)
3835
+ if (qdio_err)
36673836 qeth_schedule_recovery(card);
36683837 }
36693838
....@@ -3674,92 +3843,49 @@
36743843 {
36753844 struct qeth_card *card = (struct qeth_card *) card_ptr;
36763845 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3677
- struct qeth_qdio_out_buffer *buffer;
3846
+ struct net_device *dev = card->dev;
3847
+ struct netdev_queue *txq;
36783848 int i;
36793849
36803850 QETH_CARD_TEXT(card, 6, "qdouhdl");
36813851 if (qdio_error & QDIO_ERROR_FATAL) {
36823852 QETH_CARD_TEXT(card, 2, "achkcond");
3683
- netif_stop_queue(card->dev);
3853
+ netif_tx_stop_all_queues(dev);
36843854 qeth_schedule_recovery(card);
36853855 return;
36863856 }
3687
- if (card->options.performance_stats) {
3688
- card->perf_stats.outbound_handler_cnt++;
3689
- card->perf_stats.outbound_handler_start_time =
3690
- qeth_get_micros();
3691
- }
3857
+
36923858 for (i = first_element; i < (first_element + count); ++i) {
3693
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3694
- buffer = queue->bufs[bidx];
3695
- qeth_handle_send_error(card, buffer, qdio_error);
3859
+ struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
36963860
3697
- if (queue->bufstates &&
3698
- (queue->bufstates[bidx].flags &
3699
- QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
3700
- WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
3701
-
3702
- if (atomic_cmpxchg(&buffer->state,
3703
- QETH_QDIO_BUF_PRIMED,
3704
- QETH_QDIO_BUF_PENDING) ==
3705
- QETH_QDIO_BUF_PRIMED) {
3706
- qeth_notify_skbs(queue, buffer,
3707
- TX_NOTIFY_PENDING);
3708
- }
3709
- QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
3710
-
3711
- /* prepare the queue slot for re-use: */
3712
- qeth_scrub_qdio_buffer(buffer->buffer,
3713
- QETH_MAX_BUFFER_ELEMENTS(card));
3714
- if (qeth_init_qdio_out_buf(queue, bidx)) {
3715
- QETH_CARD_TEXT(card, 2, "outofbuf");
3716
- qeth_schedule_recovery(card);
3717
- }
3718
- } else {
3719
- if (card->options.cq == QETH_CQ_ENABLED) {
3720
- enum iucv_tx_notify n;
3721
-
3722
- n = qeth_compute_cq_notification(
3723
- buffer->buffer->element[15].sflags, 0);
3724
- qeth_notify_skbs(queue, buffer, n);
3725
- }
3726
-
3727
- qeth_clear_output_buffer(queue, buffer);
3728
- }
3729
- qeth_cleanup_handled_pending(queue, bidx, 0);
3861
+ qeth_handle_send_error(card, buf, qdio_error);
3862
+ qeth_clear_output_buffer(queue, buf, qdio_error, 0);
37303863 }
3864
+
37313865 atomic_sub(count, &queue->used_buffers);
3732
- /* check if we need to do something on this outbound queue */
3733
- if (card->info.type != QETH_CARD_TYPE_IQD)
3734
- qeth_check_outbound_queue(queue);
3866
+ qeth_check_outbound_queue(queue);
37353867
3736
- netif_wake_queue(queue->card->dev);
3737
- if (card->options.performance_stats)
3738
- card->perf_stats.outbound_handler_time += qeth_get_micros() -
3739
- card->perf_stats.outbound_handler_start_time;
3740
-}
3741
-
3742
-/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
3743
-static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
3744
-{
3745
- if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
3746
- return 2;
3747
- return queue_num;
3868
+ txq = netdev_get_tx_queue(dev, __queue);
3869
+ /* xmit may have observed the full-condition, but not yet stopped the
3870
+ * txq. In which case the code below won't trigger. So before returning,
3871
+ * xmit will re-check the txq's fill level and wake it up if needed.
3872
+ */
3873
+ if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3874
+ netif_tx_wake_queue(txq);
37483875 }
37493876
37503877 /**
37513878 * Note: Function assumes that we have 4 outbound queues.
37523879 */
3753
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3754
- int ipv)
3880
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
37553881 {
3756
- __be16 *tci;
3882
+ struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
37573883 u8 tos;
37583884
37593885 switch (card->qdio.do_prio_queueing) {
37603886 case QETH_PRIO_Q_ING_TOS:
37613887 case QETH_PRIO_Q_ING_PREC:
3762
- switch (ipv) {
3888
+ switch (qeth_get_ip_version(skb)) {
37633889 case 4:
37643890 tos = ipv4_get_dsfield(ip_hdr(skb));
37653891 break;
....@@ -3770,9 +3896,9 @@
37703896 return card->qdio.default_out_queue;
37713897 }
37723898 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3773
- return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
3899
+ return ~tos >> 6 & 3;
37743900 if (tos & IPTOS_MINCOST)
3775
- return qeth_cut_iqd_prio(card, 3);
3901
+ return 3;
37763902 if (tos & IPTOS_RELIABILITY)
37773903 return 2;
37783904 if (tos & IPTOS_THROUGHPUT)
....@@ -3783,13 +3909,14 @@
37833909 case QETH_PRIO_Q_ING_SKB:
37843910 if (skb->priority > 5)
37853911 return 0;
3786
- return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
3912
+ return ~skb->priority >> 1 & 3;
37873913 case QETH_PRIO_Q_ING_VLAN:
3788
- tci = &((struct ethhdr *)skb->data)->h_proto;
3789
- if (be16_to_cpu(*tci) == ETH_P_8021Q)
3790
- return qeth_cut_iqd_prio(card,
3791
- ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
3914
+ if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3915
+ return ~ntohs(veth->h_vlan_TCI) >>
3916
+ (VLAN_PRIO_SHIFT + 1) & 3;
37923917 break;
3918
+ case QETH_PRIO_Q_ING_FIXED:
3919
+ return card->qdio.default_out_queue;
37933920 default:
37943921 break;
37953922 }
....@@ -3804,12 +3931,12 @@
38043931 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
38053932 * fragmented part of the SKB. Returns zero for linear SKB.
38063933 */
3807
-int qeth_get_elements_for_frags(struct sk_buff *skb)
3934
+static int qeth_get_elements_for_frags(struct sk_buff *skb)
38083935 {
38093936 int cnt, elements = 0;
38103937
38113938 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3812
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
3939
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
38133940
38143941 elements += qeth_get_elements_for_range(
38153942 (addr_t)skb_frag_address(frag),
....@@ -3817,9 +3944,17 @@
38173944 }
38183945 return elements;
38193946 }
3820
-EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
38213947
3822
-static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
3948
+/**
3949
+ * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3950
+ * to transmit an skb.
3951
+ * @skb: the skb to operate on.
3952
+ * @data_offset: skip this part of the skb's linear data
3953
+ *
3954
+ * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3955
+ * skb's data (both its linear part and paged fragments).
3956
+ */
3957
+unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
38233958 {
38243959 unsigned int elements = qeth_get_elements_for_frags(skb);
38253960 addr_t end = (addr_t)skb->data + skb_headlen(skb);
....@@ -3829,54 +3964,10 @@
38293964 elements += qeth_get_elements_for_range(start, end);
38303965 return elements;
38313966 }
3967
+EXPORT_SYMBOL_GPL(qeth_count_elements);
38323968
3833
-/**
3834
- * qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags.
3835
- * @card: qeth card structure, to check max. elems.
3836
- * @skb: SKB address
3837
- * @extra_elems: extra elems needed, to check against max.
3838
- * @data_offset: range starts at skb->data + data_offset
3839
- *
3840
- * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3841
- * skb data, including linear part and fragments. Checks if the result plus
3842
- * extra_elems fits under the limit for the card. Returns 0 if it does not.
3843
- * Note: extra_elems is not included in the returned result.
3844
- */
3845
-int qeth_get_elements_no(struct qeth_card *card,
3846
- struct sk_buff *skb, int extra_elems, int data_offset)
3847
-{
3848
- int elements = qeth_count_elements(skb, data_offset);
3849
-
3850
- if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3851
- QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
3852
- "(Number=%d / Length=%d). Discarded.\n",
3853
- elements + extra_elems, skb->len);
3854
- return 0;
3855
- }
3856
- return elements;
3857
-}
3858
-EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3859
-
3860
-int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
3861
-{
3862
- int hroom, inpage, rest;
3863
-
3864
- if (((unsigned long)skb->data & PAGE_MASK) !=
3865
- (((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
3866
- hroom = skb_headroom(skb);
3867
- inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
3868
- rest = len - inpage;
3869
- if (rest > hroom)
3870
- return 1;
3871
- memmove(skb->data - rest, skb->data, skb_headlen(skb));
3872
- skb->data -= rest;
3873
- skb->tail -= rest;
3874
- *hdr = (struct qeth_hdr *)skb->data;
3875
- QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
3876
- }
3877
- return 0;
3878
-}
3879
-EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
3969
+#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3970
+ MAX_TCP_HEADER)
38803971
38813972 /**
38823973 * qeth_add_hw_header() - add a HW header to an skb.
....@@ -3893,12 +3984,14 @@
38933984 * The number of needed buffer elements is returned in @elements.
38943985 * Error to create the hdr is indicated by returning with < 0.
38953986 */
3896
-int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
3897
- struct qeth_hdr **hdr, unsigned int hdr_len,
3898
- unsigned int proto_len, unsigned int *elements)
3987
+static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3988
+ struct sk_buff *skb, struct qeth_hdr **hdr,
3989
+ unsigned int hdr_len, unsigned int proto_len,
3990
+ unsigned int *elements)
38993991 {
3900
- const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3992
+ gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
39013993 const unsigned int contiguous = proto_len ? proto_len : 1;
3994
+ const unsigned int max_elements = queue->max_elements;
39023995 unsigned int __elements;
39033996 addr_t start, end;
39043997 bool push_ok;
....@@ -3911,9 +4004,13 @@
39114004 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
39124005 /* Push HW header into same page as first protocol header. */
39134006 push_ok = true;
3914
- __elements = qeth_count_elements(skb, 0);
3915
- } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
3916
- /* Push HW header into a new page. */
4007
+ /* ... but TSO always needs a separate element for headers: */
4008
+ if (skb_is_gso(skb))
4009
+ __elements = 1 + qeth_count_elements(skb, proto_len);
4010
+ else
4011
+ __elements = qeth_count_elements(skb, 0);
4012
+ } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
4013
+ /* Push HW header into preceding page, flush with skb->data. */
39174014 push_ok = true;
39184015 __elements = 1 + qeth_count_elements(skb, 0);
39194016 } else {
....@@ -3932,15 +4029,12 @@
39324029 }
39334030
39344031 rc = skb_linearize(skb);
3935
- if (card->options.performance_stats) {
3936
- if (rc)
3937
- card->perf_stats.tx_linfail++;
3938
- else
3939
- card->perf_stats.tx_lin++;
3940
- }
3941
- if (rc)
4032
+ if (rc) {
4033
+ QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
39424034 return rc;
4035
+ }
39434036
4037
+ QETH_TXQ_STAT_INC(queue, skbs_linearized);
39444038 /* Linearization changed the layout, re-evaluate: */
39454039 goto check_layout;
39464040 }
....@@ -3951,36 +4045,91 @@
39514045 *hdr = skb_push(skb, hdr_len);
39524046 return hdr_len;
39534047 }
3954
- /* fall back */
3955
- *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
4048
+
4049
+ /* Fall back to cache element with known-good alignment: */
4050
+ if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
4051
+ return -E2BIG;
4052
+ *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
39564053 if (!*hdr)
39574054 return -ENOMEM;
39584055 /* Copy protocol headers behind HW header: */
39594056 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
39604057 return 0;
39614058 }
3962
-EXPORT_SYMBOL_GPL(qeth_add_hw_header);
39634059
3964
-static void __qeth_fill_buffer(struct sk_buff *skb,
3965
- struct qeth_qdio_out_buffer *buf,
3966
- bool is_first_elem, unsigned int offset)
4060
+static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
4061
+ struct sk_buff *curr_skb,
4062
+ struct qeth_hdr *curr_hdr)
4063
+{
4064
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4065
+ struct qeth_hdr *prev_hdr = queue->prev_hdr;
4066
+
4067
+ if (!prev_hdr)
4068
+ return true;
4069
+
4070
+ /* All packets must have the same target: */
4071
+ if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
4072
+ struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
4073
+
4074
+ return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
4075
+ eth_hdr(curr_skb)->h_dest) &&
4076
+ qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
4077
+ }
4078
+
4079
+ return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
4080
+ qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
4081
+}
4082
+
4083
+/**
4084
+ * qeth_fill_buffer() - map skb into an output buffer
4085
+ * @buf: buffer to transport the skb
4086
+ * @skb: skb to map into the buffer
4087
+ * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
4088
+ * from qeth_core_header_cache.
4089
+ * @offset: when mapping the skb, start at skb->data + offset
4090
+ * @hd_len: if > 0, build a dedicated header element of this size
4091
+ */
4092
+static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4093
+ struct sk_buff *skb, struct qeth_hdr *hdr,
4094
+ unsigned int offset, unsigned int hd_len)
39674095 {
39684096 struct qdio_buffer *buffer = buf->buffer;
39694097 int element = buf->next_element_to_fill;
39704098 int length = skb_headlen(skb) - offset;
39714099 char *data = skb->data + offset;
3972
- int length_here, cnt;
4100
+ unsigned int elem_length, cnt;
4101
+ bool is_first_elem = true;
4102
+
4103
+ __skb_queue_tail(&buf->skb_list, skb);
4104
+
4105
+ /* build dedicated element for HW Header */
4106
+ if (hd_len) {
4107
+ is_first_elem = false;
4108
+
4109
+ buffer->element[element].addr = virt_to_phys(hdr);
4110
+ buffer->element[element].length = hd_len;
4111
+ buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4112
+
4113
+ /* HW header is allocated from cache: */
4114
+ if ((void *)hdr != skb->data)
4115
+ buf->is_header[element] = 1;
4116
+ /* HW header was pushed and is contiguous with linear part: */
4117
+ else if (length > 0 && !PAGE_ALIGNED(data) &&
4118
+ (data == (char *)hdr + hd_len))
4119
+ buffer->element[element].eflags |=
4120
+ SBAL_EFLAGS_CONTIGUOUS;
4121
+
4122
+ element++;
4123
+ }
39734124
39744125 /* map linear part into buffer element(s) */
39754126 while (length > 0) {
3976
- /* length_here is the remaining amount of data in this page */
3977
- length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3978
- if (length < length_here)
3979
- length_here = length;
4127
+ elem_length = min_t(unsigned int, length,
4128
+ PAGE_SIZE - offset_in_page(data));
39804129
3981
- buffer->element[element].addr = data;
3982
- buffer->element[element].length = length_here;
3983
- length -= length_here;
4130
+ buffer->element[element].addr = virt_to_phys(data);
4131
+ buffer->element[element].length = elem_length;
4132
+ length -= elem_length;
39844133 if (is_first_elem) {
39854134 is_first_elem = false;
39864135 if (length || skb_is_nonlinear(skb))
....@@ -3993,7 +4142,8 @@
39934142 buffer->element[element].eflags =
39944143 SBAL_EFLAGS_MIDDLE_FRAG;
39954144 }
3996
- data += length_here;
4145
+
4146
+ data += elem_length;
39974147 element++;
39984148 }
39994149
....@@ -4004,17 +4154,16 @@
40044154 data = skb_frag_address(frag);
40054155 length = skb_frag_size(frag);
40064156 while (length > 0) {
4007
- length_here = PAGE_SIZE -
4008
- ((unsigned long) data % PAGE_SIZE);
4009
- if (length < length_here)
4010
- length_here = length;
4157
+ elem_length = min_t(unsigned int, length,
4158
+ PAGE_SIZE - offset_in_page(data));
40114159
4012
- buffer->element[element].addr = data;
4013
- buffer->element[element].length = length_here;
4160
+ buffer->element[element].addr = virt_to_phys(data);
4161
+ buffer->element[element].length = elem_length;
40144162 buffer->element[element].eflags =
40154163 SBAL_EFLAGS_MIDDLE_FRAG;
4016
- length -= length_here;
4017
- data += length_here;
4164
+
4165
+ length -= elem_length;
4166
+ data += elem_length;
40184167 element++;
40194168 }
40204169 }
....@@ -4022,176 +4171,248 @@
40224171 if (buffer->element[element - 1].eflags)
40234172 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
40244173 buf->next_element_to_fill = element;
4174
+ return element;
40254175 }
40264176
4027
-/**
4028
- * qeth_fill_buffer() - map skb into an output buffer
4029
- * @queue: QDIO queue to submit the buffer on
4030
- * @buf: buffer to transport the skb
4031
- * @skb: skb to map into the buffer
4032
- * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
4033
- * from qeth_core_header_cache.
4034
- * @offset: when mapping the skb, start at skb->data + offset
4035
- * @hd_len: if > 0, build a dedicated header element of this size
4036
- */
4037
-static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4038
- struct qeth_qdio_out_buffer *buf,
4039
- struct sk_buff *skb, struct qeth_hdr *hdr,
4040
- unsigned int offset, unsigned int hd_len)
4177
+static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4178
+ struct sk_buff *skb, unsigned int elements,
4179
+ struct qeth_hdr *hdr, unsigned int offset,
4180
+ unsigned int hd_len)
40414181 {
4042
- struct qdio_buffer *buffer = buf->buffer;
4043
- bool is_first_elem = true;
4044
- int flush_cnt = 0;
4182
+ unsigned int bytes = qdisc_pkt_len(skb);
4183
+ struct qeth_qdio_out_buffer *buffer;
4184
+ unsigned int next_element;
4185
+ struct netdev_queue *txq;
4186
+ bool stopped = false;
4187
+ bool flush;
40454188
4046
- refcount_inc(&skb->users);
4047
- skb_queue_tail(&buf->skb_list, skb);
4189
+ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4190
+ txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
40484191
4049
- /* build dedicated header element */
4050
- if (hd_len) {
4051
- int element = buf->next_element_to_fill;
4052
- is_first_elem = false;
4053
-
4054
- buffer->element[element].addr = hdr;
4055
- buffer->element[element].length = hd_len;
4056
- buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4057
- /* remember to free cache-allocated qeth_hdr: */
4058
- buf->is_header[element] = ((void *)hdr != skb->data);
4059
- buf->next_element_to_fill++;
4060
- }
4061
-
4062
- __qeth_fill_buffer(skb, buf, is_first_elem, offset);
4063
-
4064
- if (!queue->do_pack) {
4065
- QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
4066
- /* set state to PRIMED -> will be flushed */
4067
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4068
- flush_cnt = 1;
4069
- } else {
4070
- QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
4071
- if (queue->card->options.performance_stats)
4072
- queue->card->perf_stats.skbs_sent_pack++;
4073
- if (buf->next_element_to_fill >=
4074
- QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
4075
- /*
4076
- * packed buffer if full -> set state PRIMED
4077
- * -> will be flushed
4078
- */
4079
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4080
- flush_cnt = 1;
4081
- }
4082
- }
4083
- return flush_cnt;
4084
-}
4085
-
4086
-int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
4087
- struct qeth_hdr *hdr, unsigned int offset,
4088
- unsigned int hd_len)
4089
-{
4090
- int index = queue->next_buf_to_fill;
4091
- struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
4092
-
4093
- /*
4094
- * check if buffer is empty to make sure that we do not 'overtake'
4095
- * ourselves and try to fill a buffer that is already primed
4192
+ /* Just a sanity check, the wake/stop logic should ensure that we always
4193
+ * get a free buffer.
40964194 */
40974195 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
40984196 return -EBUSY;
4099
- queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
4100
- qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
4101
- qeth_flush_buffers(queue, index, 1);
4197
+
4198
+ flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4199
+
4200
+ if (flush ||
4201
+ (buffer->next_element_to_fill + elements > queue->max_elements)) {
4202
+ if (buffer->next_element_to_fill > 0) {
4203
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4204
+ queue->bulk_count++;
4205
+ }
4206
+
4207
+ if (queue->bulk_count >= queue->bulk_max)
4208
+ flush = true;
4209
+
4210
+ if (flush)
4211
+ qeth_flush_queue(queue);
4212
+
4213
+ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4214
+ queue->bulk_count)];
4215
+
4216
+ /* Sanity-check again: */
4217
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4218
+ return -EBUSY;
4219
+ }
4220
+
4221
+ if (buffer->next_element_to_fill == 0 &&
4222
+ atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4223
+ /* If a TX completion happens right _here_ and misses to wake
4224
+ * the txq, then our re-check below will catch the race.
4225
+ */
4226
+ QETH_TXQ_STAT_INC(queue, stopped);
4227
+ netif_tx_stop_queue(txq);
4228
+ stopped = true;
4229
+ }
4230
+
4231
+ next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4232
+ buffer->bytes += bytes;
4233
+ buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4234
+ queue->prev_hdr = hdr;
4235
+
4236
+ flush = __netdev_tx_sent_queue(txq, bytes,
4237
+ !stopped && netdev_xmit_more());
4238
+
4239
+ if (flush || next_element >= queue->max_elements) {
4240
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4241
+ queue->bulk_count++;
4242
+
4243
+ if (queue->bulk_count >= queue->bulk_max)
4244
+ flush = true;
4245
+
4246
+ if (flush)
4247
+ qeth_flush_queue(queue);
4248
+ }
4249
+
4250
+ if (stopped && !qeth_out_queue_is_full(queue))
4251
+ netif_tx_start_queue(txq);
41024252 return 0;
41034253 }
4104
-EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
41054254
41064255 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
41074256 struct sk_buff *skb, struct qeth_hdr *hdr,
41084257 unsigned int offset, unsigned int hd_len,
41094258 int elements_needed)
41104259 {
4260
+ unsigned int start_index = queue->next_buf_to_fill;
41114261 struct qeth_qdio_out_buffer *buffer;
4112
- int start_index;
4262
+ unsigned int next_element;
4263
+ struct netdev_queue *txq;
4264
+ bool stopped = false;
41134265 int flush_count = 0;
41144266 int do_pack = 0;
4115
- int tmp;
41164267 int rc = 0;
41174268
4118
- /* spin until we get the queue ... */
4119
- while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4120
- QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4121
- start_index = queue->next_buf_to_fill;
41224269 buffer = queue->bufs[queue->next_buf_to_fill];
4123
- /*
4124
- * check if buffer is empty to make sure that we do not 'overtake'
4125
- * ourselves and try to fill a buffer that is already primed
4270
+
4271
+ /* Just a sanity check, the wake/stop logic should ensure that we always
4272
+ * get a free buffer.
41264273 */
4127
- if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4128
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4274
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
41294275 return -EBUSY;
4130
- }
4276
+
4277
+ txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4278
+
41314279 /* check if we need to switch packing state of this queue */
41324280 qeth_switch_to_packing_if_needed(queue);
41334281 if (queue->do_pack) {
41344282 do_pack = 1;
41354283 /* does packet fit in current buffer? */
4136
- if ((QETH_MAX_BUFFER_ELEMENTS(card) -
4137
- buffer->next_element_to_fill) < elements_needed) {
4284
+ if (buffer->next_element_to_fill + elements_needed >
4285
+ queue->max_elements) {
41384286 /* ... no -> set state PRIMED */
41394287 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
41404288 flush_count++;
41414289 queue->next_buf_to_fill =
4142
- (queue->next_buf_to_fill + 1) %
4143
- QDIO_MAX_BUFFERS_PER_Q;
4290
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
41444291 buffer = queue->bufs[queue->next_buf_to_fill];
4145
- /* we did a step forward, so check buffer state
4146
- * again */
4292
+
4293
+ /* We stepped forward, so sanity-check again: */
41474294 if (atomic_read(&buffer->state) !=
41484295 QETH_QDIO_BUF_EMPTY) {
41494296 qeth_flush_buffers(queue, start_index,
41504297 flush_count);
4151
- atomic_set(&queue->state,
4152
- QETH_OUT_Q_UNLOCKED);
41534298 rc = -EBUSY;
41544299 goto out;
41554300 }
41564301 }
41574302 }
4158
- tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
4159
- queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4160
- QDIO_MAX_BUFFERS_PER_Q;
4161
- flush_count += tmp;
4303
+
4304
+ if (buffer->next_element_to_fill == 0 &&
4305
+ atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4306
+ /* If a TX completion happens right _here_ and misses to wake
4307
+ * the txq, then our re-check below will catch the race.
4308
+ */
4309
+ QETH_TXQ_STAT_INC(queue, stopped);
4310
+ netif_tx_stop_queue(txq);
4311
+ stopped = true;
4312
+ }
4313
+
4314
+ next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4315
+ buffer->bytes += qdisc_pkt_len(skb);
4316
+ buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4317
+
4318
+ if (queue->do_pack)
4319
+ QETH_TXQ_STAT_INC(queue, skbs_pack);
4320
+ if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4321
+ flush_count++;
4322
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4323
+ queue->next_buf_to_fill =
4324
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
4325
+ }
4326
+
41624327 if (flush_count)
41634328 qeth_flush_buffers(queue, start_index, flush_count);
4164
- else if (!atomic_read(&queue->set_pci_flags_count))
4165
- atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4166
- /*
4167
- * queue->state will go from LOCKED -> UNLOCKED or from
4168
- * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4169
- * (switch packing state or flush buffer to get another pci flag out).
4170
- * In that case we will enter this loop
4171
- */
4172
- while (atomic_dec_return(&queue->state)) {
4173
- start_index = queue->next_buf_to_fill;
4174
- /* check if we can go back to non-packing state */
4175
- tmp = qeth_switch_to_nonpacking_if_needed(queue);
4176
- /*
4177
- * check if we need to flush a packing buffer to get a pci
4178
- * flag out on the queue
4179
- */
4180
- if (!tmp && !atomic_read(&queue->set_pci_flags_count))
4181
- tmp = qeth_prep_flush_pack_buffer(queue);
4182
- if (tmp) {
4183
- qeth_flush_buffers(queue, start_index, tmp);
4184
- flush_count += tmp;
4185
- }
4186
- }
4187
-out:
4188
- /* at this point the queue is UNLOCKED again */
4189
- if (queue->card->options.performance_stats && do_pack)
4190
- queue->card->perf_stats.bufs_sent_pack += flush_count;
41914329
4330
+out:
4331
+ if (do_pack)
4332
+ QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4333
+
4334
+ if (stopped && !qeth_out_queue_is_full(queue))
4335
+ netif_tx_start_queue(txq);
41924336 return rc;
41934337 }
41944338 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4339
+
4340
+static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4341
+ unsigned int payload_len, struct sk_buff *skb,
4342
+ unsigned int proto_len)
4343
+{
4344
+ struct qeth_hdr_ext_tso *ext = &hdr->ext;
4345
+
4346
+ ext->hdr_tot_len = sizeof(*ext);
4347
+ ext->imb_hdr_no = 1;
4348
+ ext->hdr_type = 1;
4349
+ ext->hdr_version = 1;
4350
+ ext->hdr_len = 28;
4351
+ ext->payload_len = payload_len;
4352
+ ext->mss = skb_shinfo(skb)->gso_size;
4353
+ ext->dg_hdr_len = proto_len;
4354
+}
4355
+
4356
+int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4357
+ struct qeth_qdio_out_q *queue, int ipv,
4358
+ void (*fill_header)(struct qeth_qdio_out_q *queue,
4359
+ struct qeth_hdr *hdr, struct sk_buff *skb,
4360
+ int ipv, unsigned int data_len))
4361
+{
4362
+ unsigned int proto_len, hw_hdr_len;
4363
+ unsigned int frame_len = skb->len;
4364
+ bool is_tso = skb_is_gso(skb);
4365
+ unsigned int data_offset = 0;
4366
+ struct qeth_hdr *hdr = NULL;
4367
+ unsigned int hd_len = 0;
4368
+ unsigned int elements;
4369
+ int push_len, rc;
4370
+
4371
+ if (is_tso) {
4372
+ hw_hdr_len = sizeof(struct qeth_hdr_tso);
4373
+ proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4374
+ } else {
4375
+ hw_hdr_len = sizeof(struct qeth_hdr);
4376
+ proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4377
+ }
4378
+
4379
+ rc = skb_cow_head(skb, hw_hdr_len);
4380
+ if (rc)
4381
+ return rc;
4382
+
4383
+ push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4384
+ &elements);
4385
+ if (push_len < 0)
4386
+ return push_len;
4387
+ if (is_tso || !push_len) {
4388
+ /* HW header needs its own buffer element. */
4389
+ hd_len = hw_hdr_len + proto_len;
4390
+ data_offset = push_len + proto_len;
4391
+ }
4392
+ memset(hdr, 0, hw_hdr_len);
4393
+ fill_header(queue, hdr, skb, ipv, frame_len);
4394
+ if (is_tso)
4395
+ qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4396
+ frame_len - proto_len, skb, proto_len);
4397
+
4398
+ if (IS_IQD(card)) {
4399
+ rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4400
+ hd_len);
4401
+ } else {
4402
+ /* TODO: drop skb_orphan() once TX completion is fast enough */
4403
+ skb_orphan(skb);
4404
+ spin_lock(&queue->lock);
4405
+ rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4406
+ hd_len, elements);
4407
+ spin_unlock(&queue->lock);
4408
+ }
4409
+
4410
+ if (rc && !push_len)
4411
+ kmem_cache_free(qeth_core_header_cache, hdr);
4412
+
4413
+ return rc;
4414
+}
4415
+EXPORT_SYMBOL_GPL(qeth_xmit);
41954416
41964417 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
41974418 struct qeth_reply *reply, unsigned long data)
....@@ -4207,30 +4428,21 @@
42074428 setparms->data.mode = SET_PROMISC_MODE_OFF;
42084429 }
42094430 card->info.promisc_mode = setparms->data.mode;
4210
- return 0;
4431
+ return (cmd->hdr.return_code) ? -EIO : 0;
42114432 }
42124433
4213
-void qeth_setadp_promisc_mode(struct qeth_card *card)
4434
+void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
42144435 {
4215
- enum qeth_ipa_promisc_modes mode;
4216
- struct net_device *dev = card->dev;
4436
+ enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4437
+ SET_PROMISC_MODE_OFF;
42174438 struct qeth_cmd_buffer *iob;
42184439 struct qeth_ipa_cmd *cmd;
42194440
42204441 QETH_CARD_TEXT(card, 4, "setprom");
4221
-
4222
- if (((dev->flags & IFF_PROMISC) &&
4223
- (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
4224
- (!(dev->flags & IFF_PROMISC) &&
4225
- (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
4226
- return;
4227
- mode = SET_PROMISC_MODE_OFF;
4228
- if (dev->flags & IFF_PROMISC)
4229
- mode = SET_PROMISC_MODE_ON;
42304442 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
42314443
42324444 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4233
- sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
4445
+ SETADP_DATA_SIZEOF(mode));
42344446 if (!iob)
42354447 return;
42364448 cmd = __ipa_cmd(iob);
....@@ -4239,33 +4451,25 @@
42394451 }
42404452 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
42414453
4242
-struct net_device_stats *qeth_get_stats(struct net_device *dev)
4243
-{
4244
- struct qeth_card *card;
4245
-
4246
- card = dev->ml_priv;
4247
-
4248
- QETH_CARD_TEXT(card, 5, "getstat");
4249
-
4250
- return &card->stats;
4251
-}
4252
-EXPORT_SYMBOL_GPL(qeth_get_stats);
4253
-
42544454 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
42554455 struct qeth_reply *reply, unsigned long data)
42564456 {
42574457 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4458
+ struct qeth_ipacmd_setadpparms *adp_cmd;
42584459
42594460 QETH_CARD_TEXT(card, 4, "chgmaccb");
42604461 if (qeth_setadpparms_inspect_rc(cmd))
4261
- return 0;
4462
+ return -EIO;
42624463
4263
- if (!card->options.layer2 ||
4264
- !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
4265
- ether_addr_copy(card->dev->dev_addr,
4266
- cmd->data.setadapterparms.data.change_addr.addr);
4267
- card->info.mac_bits |= QETH_LAYER2_MAC_READ;
4268
- }
4464
+ adp_cmd = &cmd->data.setadapterparms;
4465
+ if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4466
+ return -EADDRNOTAVAIL;
4467
+
4468
+ if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4469
+ !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4470
+ return -EADDRNOTAVAIL;
4471
+
4472
+ ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
42694473 return 0;
42704474 }
42714475
....@@ -4278,8 +4482,7 @@
42784482 QETH_CARD_TEXT(card, 4, "chgmac");
42794483
42804484 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4281
- sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4282
- sizeof(struct qeth_change_addr));
4485
+ SETADP_DATA_SIZEOF(change_addr));
42834486 if (!iob)
42844487 return -ENOMEM;
42854488 cmd = __ipa_cmd(iob);
....@@ -4298,90 +4501,67 @@
42984501 {
42994502 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
43004503 struct qeth_set_access_ctrl *access_ctrl_req;
4301
- int fallback = *(int *)reply->param;
43024504
43034505 QETH_CARD_TEXT(card, 4, "setaccb");
4304
- if (cmd->hdr.return_code)
4305
- return 0;
4306
- qeth_setadpparms_inspect_rc(cmd);
43074506
43084507 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4309
- QETH_DBF_TEXT_(SETUP, 2, "setaccb");
4310
- QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
4311
- QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
4312
- cmd->data.setadapterparms.hdr.return_code);
4508
+ QETH_CARD_TEXT_(card, 2, "rc=%d",
4509
+ cmd->data.setadapterparms.hdr.return_code);
43134510 if (cmd->data.setadapterparms.hdr.return_code !=
43144511 SET_ACCESS_CTRL_RC_SUCCESS)
4315
- QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
4316
- card->gdev->dev.kobj.name,
4317
- access_ctrl_req->subcmd_code,
4318
- cmd->data.setadapterparms.hdr.return_code);
4319
- switch (cmd->data.setadapterparms.hdr.return_code) {
4512
+ QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4513
+ access_ctrl_req->subcmd_code, CARD_DEVID(card),
4514
+ cmd->data.setadapterparms.hdr.return_code);
4515
+ switch (qeth_setadpparms_inspect_rc(cmd)) {
43204516 case SET_ACCESS_CTRL_RC_SUCCESS:
4321
- if (card->options.isolation == ISOLATION_MODE_NONE) {
4517
+ if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
43224518 dev_info(&card->gdev->dev,
43234519 "QDIO data connection isolation is deactivated\n");
4324
- } else {
4520
+ else
43254521 dev_info(&card->gdev->dev,
43264522 "QDIO data connection isolation is activated\n");
4327
- }
4328
- break;
4523
+ return 0;
43294524 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4330
- QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already "
4331
- "deactivated\n", dev_name(&card->gdev->dev));
4332
- if (fallback)
4333
- card->options.isolation = card->options.prev_isolation;
4334
- break;
4525
+ QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4526
+ CARD_DEVID(card));
4527
+ return 0;
43354528 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4336
- QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already"
4337
- " activated\n", dev_name(&card->gdev->dev));
4338
- if (fallback)
4339
- card->options.isolation = card->options.prev_isolation;
4340
- break;
4529
+ QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4530
+ CARD_DEVID(card));
4531
+ return 0;
43414532 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
43424533 dev_err(&card->gdev->dev, "Adapter does not "
43434534 "support QDIO data connection isolation\n");
4344
- break;
4535
+ return -EOPNOTSUPP;
43454536 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
43464537 dev_err(&card->gdev->dev,
43474538 "Adapter is dedicated. "
43484539 "QDIO data connection isolation not supported\n");
4349
- if (fallback)
4350
- card->options.isolation = card->options.prev_isolation;
4351
- break;
4540
+ return -EOPNOTSUPP;
43524541 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
43534542 dev_err(&card->gdev->dev,
43544543 "TSO does not permit QDIO data connection isolation\n");
4355
- if (fallback)
4356
- card->options.isolation = card->options.prev_isolation;
4357
- break;
4544
+ return -EPERM;
43584545 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
43594546 dev_err(&card->gdev->dev, "The adjacent switch port does not "
43604547 "support reflective relay mode\n");
4361
- if (fallback)
4362
- card->options.isolation = card->options.prev_isolation;
4363
- break;
4548
+ return -EOPNOTSUPP;
43644549 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
43654550 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
43664551 "enabled at the adjacent switch port");
4367
- if (fallback)
4368
- card->options.isolation = card->options.prev_isolation;
4369
- break;
4552
+ return -EREMOTEIO;
43704553 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
43714554 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
43724555 "at the adjacent switch failed\n");
4373
- break;
4556
+ /* benign error while disabling ISOLATION_MODE_FWD */
4557
+ return 0;
43744558 default:
4375
- /* this should never happen */
4376
- if (fallback)
4377
- card->options.isolation = card->options.prev_isolation;
4378
- break;
4559
+ return -EIO;
43794560 }
4380
- return 0;
43814561 }
43824562
4383
-static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4384
- enum qeth_ipa_isolation_modes isolation, int fallback)
4563
+int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4564
+ enum qeth_ipa_isolation_modes mode)
43854565 {
43864566 int rc;
43874567 struct qeth_cmd_buffer *iob;
....@@ -4390,60 +4570,37 @@
43904570
43914571 QETH_CARD_TEXT(card, 4, "setacctl");
43924572
4393
- QETH_DBF_TEXT_(SETUP, 2, "setacctl");
4394
- QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
4573
+ if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4574
+ dev_err(&card->gdev->dev,
4575
+ "Adapter does not support QDIO data connection isolation\n");
4576
+ return -EOPNOTSUPP;
4577
+ }
43954578
43964579 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4397
- sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4398
- sizeof(struct qeth_set_access_ctrl));
4580
+ SETADP_DATA_SIZEOF(set_access_ctrl));
43994581 if (!iob)
44004582 return -ENOMEM;
44014583 cmd = __ipa_cmd(iob);
44024584 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4403
- access_ctrl_req->subcmd_code = isolation;
4585
+ access_ctrl_req->subcmd_code = mode;
44044586
44054587 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4406
- &fallback);
4407
- QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
4408
- return rc;
4409
-}
4410
-
4411
-int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4412
-{
4413
- int rc = 0;
4414
-
4415
- QETH_CARD_TEXT(card, 4, "setactlo");
4416
-
4417
- if ((card->info.type == QETH_CARD_TYPE_OSD ||
4418
- card->info.type == QETH_CARD_TYPE_OSX) &&
4419
- qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4420
- rc = qeth_setadpparms_set_access_ctrl(card,
4421
- card->options.isolation, fallback);
4422
- if (rc) {
4423
- QETH_DBF_MESSAGE(3,
4424
- "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
4425
- card->gdev->dev.kobj.name,
4426
- rc);
4427
- rc = -EOPNOTSUPP;
4428
- }
4429
- } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4430
- card->options.isolation = ISOLATION_MODE_NONE;
4431
-
4432
- dev_err(&card->gdev->dev, "Adapter does not "
4433
- "support QDIO data connection isolation\n");
4434
- rc = -EOPNOTSUPP;
4588
+ NULL);
4589
+ if (rc) {
4590
+ QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4591
+ QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4592
+ rc, CARD_DEVID(card));
44354593 }
4594
+
44364595 return rc;
44374596 }
4438
-EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
44394597
4440
-void qeth_tx_timeout(struct net_device *dev)
4598
+void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
44414599 {
44424600 struct qeth_card *card;
44434601
44444602 card = dev->ml_priv;
44454603 QETH_CARD_TEXT(card, 4, "txtimeo");
4446
- card->stats.tx_errors++;
44474604 qeth_schedule_recovery(card);
44484605 }
44494606 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
....@@ -4458,7 +4615,8 @@
44584615 rc = BMCR_FULLDPLX;
44594616 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
44604617 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4461
- (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
4618
+ (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4619
+ (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
44624620 rc |= BMCR_SPEED100;
44634621 break;
44644622 case MII_BMSR: /* Basic mode status register */
....@@ -4490,7 +4648,9 @@
44904648 case MII_NWAYTEST: /* N-way auto-neg test register */
44914649 break;
44924650 case MII_RERRCOUNTER: /* rx error counter */
4493
- rc = card->stats.rx_errors;
4651
+ rc = card->stats.rx_length_errors +
4652
+ card->stats.rx_frame_errors +
4653
+ card->stats.rx_fifo_errors;
44944654 break;
44954655 case MII_SREVISION: /* silicon revision */
44964656 break;
....@@ -4512,85 +4672,50 @@
45124672 return rc;
45134673 }
45144674
4515
-static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
4516
- struct qeth_cmd_buffer *iob, int len,
4517
- int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
4518
- unsigned long),
4519
- void *reply_param)
4520
-{
4521
- u16 s1, s2;
4522
-
4523
- QETH_CARD_TEXT(card, 4, "sendsnmp");
4524
-
4525
- memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4526
- memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4527
- &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4528
- /* adjust PDU length fields in IPA_PDU_HEADER */
4529
- s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4530
- s2 = (u32) len;
4531
- memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4532
- memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4533
- memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4534
- memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4535
- return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4536
- reply_cb, reply_param);
4537
-}
4538
-
45394675 static int qeth_snmp_command_cb(struct qeth_card *card,
4540
- struct qeth_reply *reply, unsigned long sdata)
4676
+ struct qeth_reply *reply, unsigned long data)
45414677 {
4542
- struct qeth_ipa_cmd *cmd;
4543
- struct qeth_arp_query_info *qinfo;
4544
- unsigned char *data;
4678
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4679
+ struct qeth_arp_query_info *qinfo = reply->param;
4680
+ struct qeth_ipacmd_setadpparms *adp_cmd;
4681
+ unsigned int data_len;
45454682 void *snmp_data;
4546
- __u16 data_len;
45474683
45484684 QETH_CARD_TEXT(card, 3, "snpcmdcb");
45494685
4550
- cmd = (struct qeth_ipa_cmd *) sdata;
4551
- data = (unsigned char *)((char *)cmd - reply->offset);
4552
- qinfo = (struct qeth_arp_query_info *) reply->param;
4553
-
45544686 if (cmd->hdr.return_code) {
45554687 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4556
- return 0;
4688
+ return -EIO;
45574689 }
45584690 if (cmd->data.setadapterparms.hdr.return_code) {
45594691 cmd->hdr.return_code =
45604692 cmd->data.setadapterparms.hdr.return_code;
45614693 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4562
- return 0;
4694
+ return -EIO;
45634695 }
4564
- data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
4565
- if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4566
- snmp_data = &cmd->data.setadapterparms.data.snmp;
4567
- data_len -= offsetof(struct qeth_ipa_cmd,
4568
- data.setadapterparms.data.snmp);
4696
+
4697
+ adp_cmd = &cmd->data.setadapterparms;
4698
+ data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4699
+ if (adp_cmd->hdr.seq_no == 1) {
4700
+ snmp_data = &adp_cmd->data.snmp;
45694701 } else {
4570
- snmp_data = &cmd->data.setadapterparms.data.snmp.request;
4571
- data_len -= offsetof(struct qeth_ipa_cmd,
4572
- data.setadapterparms.data.snmp.request);
4702
+ snmp_data = &adp_cmd->data.snmp.request;
4703
+ data_len -= offsetof(struct qeth_snmp_cmd, request);
45734704 }
45744705
45754706 /* check if there is enough room in userspace */
45764707 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4577
- QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
4578
- cmd->hdr.return_code = IPA_RC_ENOMEM;
4579
- return 0;
4708
+ QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4709
+ return -ENOSPC;
45804710 }
45814711 QETH_CARD_TEXT_(card, 4, "snore%i",
4582
- cmd->data.setadapterparms.hdr.used_total);
4712
+ cmd->data.setadapterparms.hdr.used_total);
45834713 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4584
- cmd->data.setadapterparms.hdr.seq_no);
4714
+ cmd->data.setadapterparms.hdr.seq_no);
45854715 /*copy entries to user buffer*/
45864716 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
45874717 qinfo->udata_offset += data_len;
45884718
4589
- /* check if all replies received ... */
4590
- QETH_CARD_TEXT_(card, 4, "srtot%i",
4591
- cmd->data.setadapterparms.hdr.used_total);
4592
- QETH_CARD_TEXT_(card, 4, "srseq%i",
4593
- cmd->data.setadapterparms.hdr.seq_no);
45944719 if (cmd->data.setadapterparms.hdr.seq_no <
45954720 cmd->data.setadapterparms.hdr.used_total)
45964721 return 1;
....@@ -4599,88 +4724,79 @@
45994724
46004725 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
46014726 {
4727
+ struct qeth_snmp_ureq __user *ureq;
46024728 struct qeth_cmd_buffer *iob;
4603
- struct qeth_ipa_cmd *cmd;
4604
- struct qeth_snmp_ureq *ureq;
46054729 unsigned int req_len;
46064730 struct qeth_arp_query_info qinfo = {0, };
46074731 int rc = 0;
46084732
46094733 QETH_CARD_TEXT(card, 3, "snmpcmd");
46104734
4611
- if (card->info.guestlan)
4735
+ if (IS_VM_NIC(card))
46124736 return -EOPNOTSUPP;
46134737
46144738 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4615
- (!card->options.layer2)) {
4739
+ IS_LAYER3(card))
46164740 return -EOPNOTSUPP;
4617
- }
4618
- /* skip 4 bytes (data_len struct member) to get req_len */
4619
- if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4741
+
4742
+ ureq = (struct qeth_snmp_ureq __user *) udata;
4743
+ if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4744
+ get_user(req_len, &ureq->hdr.req_len))
46204745 return -EFAULT;
4621
- if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
4622
- sizeof(struct qeth_ipacmd_hdr) -
4623
- sizeof(struct qeth_ipacmd_setadpparms_hdr)))
4746
+
4747
+ /* Sanitize user input, to avoid overflows in iob size calculation: */
4748
+ if (req_len > QETH_BUFSIZE)
46244749 return -EINVAL;
4625
- ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
4626
- if (IS_ERR(ureq)) {
4627
- QETH_CARD_TEXT(card, 2, "snmpnome");
4628
- return PTR_ERR(ureq);
4750
+
4751
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4752
+ if (!iob)
4753
+ return -ENOMEM;
4754
+
4755
+ if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4756
+ &ureq->cmd, req_len)) {
4757
+ qeth_put_cmd(iob);
4758
+ return -EFAULT;
46294759 }
4630
- qinfo.udata_len = ureq->hdr.data_len;
4760
+
46314761 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
46324762 if (!qinfo.udata) {
4633
- kfree(ureq);
4763
+ qeth_put_cmd(iob);
46344764 return -ENOMEM;
46354765 }
46364766 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
46374767
4638
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4639
- QETH_SNMP_SETADP_CMDLENGTH + req_len);
4640
- if (!iob) {
4641
- rc = -ENOMEM;
4642
- goto out;
4643
- }
4644
- cmd = __ipa_cmd(iob);
4645
- memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4646
- rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4647
- qeth_snmp_command_cb, (void *)&qinfo);
4768
+ rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
46484769 if (rc)
4649
- QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
4650
- QETH_CARD_IFNAME(card), rc);
4770
+ QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4771
+ CARD_DEVID(card), rc);
46514772 else {
46524773 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
46534774 rc = -EFAULT;
46544775 }
4655
-out:
4656
- kfree(ureq);
4776
+
46574777 kfree(qinfo.udata);
46584778 return rc;
46594779 }
46604780
46614781 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4662
- struct qeth_reply *reply, unsigned long data)
4782
+ struct qeth_reply *reply,
4783
+ unsigned long data)
46634784 {
46644785 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4665
- struct qeth_qoat_priv *priv;
4666
- char *resdata;
4786
+ struct qeth_qoat_priv *priv = reply->param;
46674787 int resdatalen;
46684788
46694789 QETH_CARD_TEXT(card, 3, "qoatcb");
46704790 if (qeth_setadpparms_inspect_rc(cmd))
4671
- return 0;
4791
+ return -EIO;
46724792
4673
- priv = (struct qeth_qoat_priv *)reply->param;
46744793 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4675
- resdata = (char *)data + 28;
46764794
4677
- if (resdatalen > (priv->buffer_len - priv->response_len)) {
4678
- cmd->hdr.return_code = IPA_RC_FFFF;
4679
- return 0;
4680
- }
4795
+ if (resdatalen > (priv->buffer_len - priv->response_len))
4796
+ return -ENOSPC;
46814797
4682
- memcpy((priv->buffer + priv->response_len), resdata,
4683
- resdatalen);
4798
+ memcpy(priv->buffer + priv->response_len,
4799
+ &cmd->data.setadapterparms.hdr, resdatalen);
46844800 priv->response_len += resdatalen;
46854801
46864802 if (cmd->data.setadapterparms.hdr.seq_no <
....@@ -4701,28 +4817,20 @@
47014817
47024818 QETH_CARD_TEXT(card, 3, "qoatcmd");
47034819
4704
- if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4705
- rc = -EOPNOTSUPP;
4706
- goto out;
4707
- }
4820
+ if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4821
+ return -EOPNOTSUPP;
47084822
4709
- if (copy_from_user(&oat_data, udata,
4710
- sizeof(struct qeth_query_oat_data))) {
4711
- rc = -EFAULT;
4712
- goto out;
4713
- }
4823
+ if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4824
+ return -EFAULT;
47144825
47154826 priv.buffer_len = oat_data.buffer_len;
47164827 priv.response_len = 0;
47174828 priv.buffer = vzalloc(oat_data.buffer_len);
4718
- if (!priv.buffer) {
4719
- rc = -ENOMEM;
4720
- goto out;
4721
- }
4829
+ if (!priv.buffer)
4830
+ return -ENOMEM;
47224831
47234832 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4724
- sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4725
- sizeof(struct qeth_query_oat));
4833
+ SETADP_DATA_SIZEOF(query_oat));
47264834 if (!iob) {
47274835 rc = -ENOMEM;
47284836 goto out_free;
....@@ -4731,32 +4839,19 @@
47314839 oat_req = &cmd->data.setadapterparms.data.query_oat;
47324840 oat_req->subcmd_code = oat_data.command;
47334841
4734
- rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4735
- &priv);
4842
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
47364843 if (!rc) {
4737
- if (is_compat_task())
4738
- tmp = compat_ptr(oat_data.ptr);
4739
- else
4740
- tmp = (void __user *)(unsigned long)oat_data.ptr;
4741
-
4742
- if (copy_to_user(tmp, priv.buffer,
4743
- priv.response_len)) {
4744
- rc = -EFAULT;
4745
- goto out_free;
4746
- }
4747
-
4844
+ tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4845
+ u64_to_user_ptr(oat_data.ptr);
47484846 oat_data.response_len = priv.response_len;
47494847
4750
- if (copy_to_user(udata, &oat_data,
4751
- sizeof(struct qeth_query_oat_data)))
4848
+ if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4849
+ copy_to_user(udata, &oat_data, sizeof(oat_data)))
47524850 rc = -EFAULT;
4753
- } else
4754
- if (rc == IPA_RC_FFFF)
4755
- rc = -EFAULT;
4851
+ }
47564852
47574853 out_free:
47584854 vfree(priv.buffer);
4759
-out:
47604855 return rc;
47614856 }
47624857
....@@ -4769,7 +4864,7 @@
47694864
47704865 QETH_CARD_TEXT(card, 2, "qcrdincb");
47714866 if (qeth_setadpparms_inspect_rc(cmd))
4772
- return 0;
4867
+ return -EIO;
47734868
47744869 card_info = &cmd->data.setadapterparms.data.card_info;
47754870 carrier_info->card_type = card_info->card_type;
....@@ -4778,16 +4873,15 @@
47784873 return 0;
47794874 }
47804875
4781
-static int qeth_query_card_info(struct qeth_card *card,
4782
- struct carrier_info *carrier_info)
4876
+int qeth_query_card_info(struct qeth_card *card,
4877
+ struct carrier_info *carrier_info)
47834878 {
47844879 struct qeth_cmd_buffer *iob;
47854880
47864881 QETH_CARD_TEXT(card, 2, "qcrdinfo");
47874882 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
47884883 return -EOPNOTSUPP;
4789
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
4790
- sizeof(struct qeth_ipacmd_setadpparms_hdr));
4884
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
47914885 if (!iob)
47924886 return -ENOMEM;
47934887 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
....@@ -4806,10 +4900,9 @@
48064900 {
48074901 struct diag26c_mac_resp *response;
48084902 struct diag26c_mac_req *request;
4809
- struct ccw_dev_id id;
48104903 int rc;
48114904
4812
- QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
4905
+ QETH_CARD_TEXT(card, 2, "vmreqmac");
48134906
48144907 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
48154908 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
....@@ -4818,11 +4911,10 @@
48184911 goto out;
48194912 }
48204913
4821
- ccw_device_get_id(CARD_DDEV(card), &id);
48224914 request->resp_buf_len = sizeof(*response);
48234915 request->resp_version = DIAG26C_VERSION2;
48244916 request->op_code = DIAG26C_GET_MAC;
4825
- request->devno = id.devno;
4917
+ request->devno = card->info.ddev_devno;
48264918
48274919 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
48284920 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
....@@ -4834,13 +4926,13 @@
48344926 if (request->resp_buf_len < sizeof(*response) ||
48354927 response->version != request->resp_version) {
48364928 rc = -EIO;
4837
- QETH_DBF_TEXT(SETUP, 2, "badresp");
4838
- QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len,
4839
- sizeof(request->resp_buf_len));
4929
+ QETH_CARD_TEXT(card, 2, "badresp");
4930
+ QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4931
+ sizeof(request->resp_buf_len));
48404932 } else if (!is_valid_ether_addr(response->mac)) {
48414933 rc = -EINVAL;
4842
- QETH_DBF_TEXT(SETUP, 2, "badmac");
4843
- QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN);
4934
+ QETH_CARD_TEXT(card, 2, "badmac");
4935
+ QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
48444936 } else {
48454937 ether_addr_copy(card->dev->dev_addr, response->mac);
48464938 }
....@@ -4852,54 +4944,40 @@
48524944 }
48534945 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
48544946
4855
-static int qeth_get_qdio_q_format(struct qeth_card *card)
4856
-{
4857
- if (card->info.type == QETH_CARD_TYPE_IQD)
4858
- return QDIO_IQDIO_QFMT;
4859
- else
4860
- return QDIO_QETH_QFMT;
4861
-}
4862
-
48634947 static void qeth_determine_capabilities(struct qeth_card *card)
48644948 {
4949
+ struct qeth_channel *channel = &card->data;
4950
+ struct ccw_device *ddev = channel->ccwdev;
48654951 int rc;
4866
- int length;
4867
- char *prcd;
4868
- struct ccw_device *ddev;
48694952 int ddev_offline = 0;
48704953
4871
- QETH_DBF_TEXT(SETUP, 2, "detcapab");
4872
- ddev = CARD_DDEV(card);
4954
+ QETH_CARD_TEXT(card, 2, "detcapab");
48734955 if (!ddev->online) {
48744956 ddev_offline = 1;
4875
- rc = ccw_device_set_online(ddev);
4957
+ rc = qeth_start_channel(channel);
48764958 if (rc) {
4877
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4959
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
48784960 goto out;
48794961 }
48804962 }
48814963
4882
- rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4964
+ rc = qeth_read_conf_data(card);
48834965 if (rc) {
4884
- QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
4885
- dev_name(&card->gdev->dev), rc);
4886
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4966
+ QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4967
+ CARD_DEVID(card), rc);
4968
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
48874969 goto out_offline;
48884970 }
4889
- qeth_configure_unitaddr(card, prcd);
4890
- if (ddev_offline)
4891
- qeth_configure_blkt_default(card, prcd);
4892
- kfree(prcd);
48934971
48944972 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
48954973 if (rc)
4896
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4974
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
48974975
4898
- QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
4899
- QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1);
4900
- QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2);
4901
- QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3);
4902
- QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
4976
+ QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
4977
+ QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
4978
+ QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
4979
+ QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
4980
+ QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
49034981 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
49044982 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
49054983 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
....@@ -4909,117 +4987,91 @@
49094987 card->options.cq = QETH_CQ_NOTAVAILABLE;
49104988 }
49114989
4912
-
49134990 out_offline:
49144991 if (ddev_offline == 1)
4915
- ccw_device_set_offline(ddev);
4992
+ qeth_stop_channel(channel);
49164993 out:
49174994 return;
49184995 }
49194996
4920
-static void qeth_qdio_establish_cq(struct qeth_card *card,
4921
- struct qdio_buffer **in_sbal_ptrs,
4922
- void (**queue_start_poll)
4923
- (struct ccw_device *, int,
4924
- unsigned long))
4997
+static void qeth_read_ccw_conf_data(struct qeth_card *card)
49254998 {
4926
- int i;
4999
+ struct qeth_card_info *info = &card->info;
5000
+ struct ccw_device *cdev = CARD_DDEV(card);
5001
+ struct ccw_dev_id dev_id;
49275002
4928
- if (card->options.cq == QETH_CQ_ENABLED) {
4929
- int offset = QDIO_MAX_BUFFERS_PER_Q *
4930
- (card->qdio.no_in_queues - 1);
5003
+ QETH_CARD_TEXT(card, 2, "ccwconfd");
5004
+ ccw_device_get_id(cdev, &dev_id);
49315005
4932
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
4933
- in_sbal_ptrs[offset + i] =
4934
- card->qdio.c_q->bufs[i].buffer;
5006
+ info->ddev_devno = dev_id.devno;
5007
+ info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5008
+ !ccw_device_get_iid(cdev, &info->iid) &&
5009
+ !ccw_device_get_chid(cdev, 0, &info->chid);
5010
+ info->ssid = dev_id.ssid;
49355011
4936
- queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4937
- }
5012
+ dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5013
+ info->chid, info->chpid);
5014
+
5015
+ QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5016
+ QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5017
+ QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5018
+ QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5019
+ QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5020
+ QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5021
+ QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
49385022 }
49395023
49405024 static int qeth_qdio_establish(struct qeth_card *card)
49415025 {
5026
+ struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5027
+ struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5028
+ struct qeth_qib_parms *qib_parms = NULL;
49425029 struct qdio_initialize init_data;
4943
- char *qib_param_field;
4944
- struct qdio_buffer **in_sbal_ptrs;
4945
- void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4946
- struct qdio_buffer **out_sbal_ptrs;
4947
- int i, j, k;
5030
+ unsigned int i;
49485031 int rc = 0;
49495032
4950
- QETH_DBF_TEXT(SETUP, 2, "qdioest");
5033
+ QETH_CARD_TEXT(card, 2, "qdioest");
49515034
4952
- qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
4953
- GFP_KERNEL);
4954
- if (!qib_param_field) {
4955
- rc = -ENOMEM;
4956
- goto out_free_nothing;
5035
+ if (!IS_IQD(card) && !IS_VM_NIC(card)) {
5036
+ qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5037
+ if (!qib_parms)
5038
+ return -ENOMEM;
5039
+
5040
+ qeth_fill_qib_parms(card, qib_parms);
49575041 }
49585042
4959
- qeth_create_qib_param_field(card, qib_param_field);
4960
- qeth_create_qib_param_field_blkt(card, qib_param_field);
5043
+ in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5044
+ if (card->options.cq == QETH_CQ_ENABLED)
5045
+ in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
49615046
4962
- in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
4963
- sizeof(void *),
4964
- GFP_KERNEL);
4965
- if (!in_sbal_ptrs) {
4966
- rc = -ENOMEM;
4967
- goto out_free_qib_param;
4968
- }
4969
-
4970
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
4971
- in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
4972
-
4973
- queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4974
- GFP_KERNEL);
4975
- if (!queue_start_poll) {
4976
- rc = -ENOMEM;
4977
- goto out_free_in_sbals;
4978
- }
4979
- for (i = 0; i < card->qdio.no_in_queues; ++i)
4980
- queue_start_poll[i] = qeth_qdio_start_poll;
4981
-
4982
- qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4983
-
4984
- out_sbal_ptrs =
4985
- kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
4986
- sizeof(void *),
4987
- GFP_KERNEL);
4988
- if (!out_sbal_ptrs) {
4989
- rc = -ENOMEM;
4990
- goto out_free_queue_start_poll;
4991
- }
4992
-
4993
- for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4994
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++)
4995
- out_sbal_ptrs[k] =
4996
- card->qdio.out_qs[i]->bufs[j]->buffer;
5047
+ for (i = 0; i < card->qdio.no_out_queues; i++)
5048
+ out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
49975049
49985050 memset(&init_data, 0, sizeof(struct qdio_initialize));
4999
- init_data.cdev = CARD_DDEV(card);
5000
- init_data.q_format = qeth_get_qdio_q_format(card);
5051
+ init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5052
+ QDIO_QETH_QFMT;
50015053 init_data.qib_param_field_format = 0;
5002
- init_data.qib_param_field = qib_param_field;
5054
+ init_data.qib_param_field = (void *)qib_parms;
50035055 init_data.no_input_qs = card->qdio.no_in_queues;
50045056 init_data.no_output_qs = card->qdio.no_out_queues;
50055057 init_data.input_handler = qeth_qdio_input_handler;
50065058 init_data.output_handler = qeth_qdio_output_handler;
5007
- init_data.queue_start_poll_array = queue_start_poll;
5059
+ init_data.irq_poll = qeth_qdio_poll;
50085060 init_data.int_parm = (unsigned long) card;
5009
- init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
5010
- init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
5061
+ init_data.input_sbal_addr_array = in_sbal_ptrs;
5062
+ init_data.output_sbal_addr_array = out_sbal_ptrs;
50115063 init_data.output_sbal_state_array = card->qdio.out_bufstates;
5012
- init_data.scan_threshold =
5013
- (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
5064
+ init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
50145065
50155066 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
50165067 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5017
- rc = qdio_allocate(&init_data);
5068
+ rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5069
+ init_data.no_output_qs);
50185070 if (rc) {
50195071 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
50205072 goto out;
50215073 }
5022
- rc = qdio_establish(&init_data);
5074
+ rc = qdio_establish(CARD_DDEV(card), &init_data);
50235075 if (rc) {
50245076 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
50255077 qdio_free(CARD_DDEV(card));
....@@ -5036,31 +5088,25 @@
50365088 default:
50375089 break;
50385090 }
5091
+
50395092 out:
5040
- kfree(out_sbal_ptrs);
5041
-out_free_queue_start_poll:
5042
- kfree(queue_start_poll);
5043
-out_free_in_sbals:
5044
- kfree(in_sbal_ptrs);
5045
-out_free_qib_param:
5046
- kfree(qib_param_field);
5047
-out_free_nothing:
5093
+ kfree(qib_parms);
50485094 return rc;
50495095 }
50505096
50515097 static void qeth_core_free_card(struct qeth_card *card)
50525098 {
5053
- QETH_DBF_TEXT(SETUP, 2, "freecrd");
5054
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
5055
- qeth_clean_channel(&card->read);
5056
- qeth_clean_channel(&card->write);
5057
- qeth_clean_channel(&card->data);
5058
- qeth_free_qdio_buffers(card);
5099
+ QETH_CARD_TEXT(card, 2, "freecrd");
5100
+
50595101 unregister_service_level(&card->qeth_service_level);
5102
+ debugfs_remove_recursive(card->debugfs);
5103
+ qeth_put_cmd(card->read_cmd);
5104
+ destroy_workqueue(card->event_wq);
5105
+ dev_set_drvdata(&card->gdev->dev, NULL);
50605106 kfree(card);
50615107 }
50625108
5063
-void qeth_trace_features(struct qeth_card *card)
5109
+static void qeth_trace_features(struct qeth_card *card)
50645110 {
50655111 QETH_CARD_TEXT(card, 2, "features");
50665112 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
....@@ -5069,19 +5115,22 @@
50695115 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
50705116 sizeof(card->info.diagass_support));
50715117 }
5072
-EXPORT_SYMBOL_GPL(qeth_trace_features);
50735118
50745119 static struct ccw_device_id qeth_ids[] = {
50755120 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
50765121 .driver_info = QETH_CARD_TYPE_OSD},
50775122 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
50785123 .driver_info = QETH_CARD_TYPE_IQD},
5124
+#ifdef CONFIG_QETH_OSN
50795125 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
50805126 .driver_info = QETH_CARD_TYPE_OSN},
5127
+#endif
50815128 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
50825129 .driver_info = QETH_CARD_TYPE_OSM},
5130
+#ifdef CONFIG_QETH_OSX
50835131 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
50845132 .driver_info = QETH_CARD_TYPE_OSX},
5133
+#endif
50855134 {},
50865135 };
50875136 MODULE_DEVICE_TABLE(ccw, qeth_ids);
....@@ -5096,65 +5145,69 @@
50965145 .remove = ccwgroup_remove_ccwdev,
50975146 };
50985147
5099
-int qeth_core_hardsetup_card(struct qeth_card *card)
5148
+static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
51005149 {
51015150 int retries = 3;
51025151 int rc;
51035152
5104
- QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
5153
+ QETH_CARD_TEXT(card, 2, "hrdsetup");
51055154 atomic_set(&card->force_alloc_skb, 0);
51065155 rc = qeth_update_from_chp_desc(card);
51075156 if (rc)
51085157 return rc;
51095158 retry:
51105159 if (retries < 3)
5111
- QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
5112
- dev_name(&card->gdev->dev));
5113
- rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
5114
- ccw_device_set_offline(CARD_DDEV(card));
5115
- ccw_device_set_offline(CARD_WDEV(card));
5116
- ccw_device_set_offline(CARD_RDEV(card));
5160
+ QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5161
+ CARD_DEVID(card));
5162
+ rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5163
+ qeth_stop_channel(&card->data);
5164
+ qeth_stop_channel(&card->write);
5165
+ qeth_stop_channel(&card->read);
51175166 qdio_free(CARD_DDEV(card));
5118
- rc = ccw_device_set_online(CARD_RDEV(card));
5167
+
5168
+ rc = qeth_start_channel(&card->read);
51195169 if (rc)
51205170 goto retriable;
5121
- rc = ccw_device_set_online(CARD_WDEV(card));
5171
+ rc = qeth_start_channel(&card->write);
51225172 if (rc)
51235173 goto retriable;
5124
- rc = ccw_device_set_online(CARD_DDEV(card));
5174
+ rc = qeth_start_channel(&card->data);
51255175 if (rc)
51265176 goto retriable;
51275177 retriable:
51285178 if (rc == -ERESTARTSYS) {
5129
- QETH_DBF_TEXT(SETUP, 2, "break1");
5179
+ QETH_CARD_TEXT(card, 2, "break1");
51305180 return rc;
51315181 } else if (rc) {
5132
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
5182
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
51335183 if (--retries < 0)
51345184 goto out;
51355185 else
51365186 goto retry;
51375187 }
5188
+
51385189 qeth_determine_capabilities(card);
5139
- qeth_init_tokens(card);
5140
- qeth_init_func_level(card);
5141
- rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
5142
- if (rc == -ERESTARTSYS) {
5143
- QETH_DBF_TEXT(SETUP, 2, "break2");
5190
+ qeth_read_ccw_conf_data(card);
5191
+ qeth_idx_init(card);
5192
+
5193
+ rc = qeth_idx_activate_read_channel(card);
5194
+ if (rc == -EINTR) {
5195
+ QETH_CARD_TEXT(card, 2, "break2");
51445196 return rc;
51455197 } else if (rc) {
5146
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
5198
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
51475199 if (--retries < 0)
51485200 goto out;
51495201 else
51505202 goto retry;
51515203 }
5152
- rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
5153
- if (rc == -ERESTARTSYS) {
5154
- QETH_DBF_TEXT(SETUP, 2, "break3");
5204
+
5205
+ rc = qeth_idx_activate_write_channel(card);
5206
+ if (rc == -EINTR) {
5207
+ QETH_CARD_TEXT(card, 2, "break3");
51555208 return rc;
51565209 } else if (rc) {
5157
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
5210
+ QETH_CARD_TEXT_(card, 2, "4err%d", rc);
51585211 if (--retries < 0)
51595212 goto out;
51605213 else
....@@ -5163,27 +5216,26 @@
51635216 card->read_or_write_problem = 0;
51645217 rc = qeth_mpc_initialize(card);
51655218 if (rc) {
5166
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
5219
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
51675220 goto out;
51685221 }
51695222
51705223 rc = qeth_send_startlan(card);
51715224 if (rc) {
5172
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
5173
- if (rc == IPA_RC_LAN_OFFLINE) {
5174
- dev_warn(&card->gdev->dev,
5175
- "The LAN is offline\n");
5176
- card->lan_online = 0;
5225
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5226
+ if (rc == -ENETDOWN) {
5227
+ dev_warn(&card->gdev->dev, "The LAN is offline\n");
5228
+ *carrier_ok = false;
51775229 } else {
5178
- rc = -ENODEV;
51795230 goto out;
51805231 }
5181
- } else
5182
- card->lan_online = 1;
5232
+ } else {
5233
+ *carrier_ok = true;
5234
+ }
51835235
5184
- card->options.ipa4.supported_funcs = 0;
5185
- card->options.ipa6.supported_funcs = 0;
5186
- card->options.adp.supported_funcs = 0;
5236
+ card->options.ipa4.supported = 0;
5237
+ card->options.ipa6.supported = 0;
5238
+ card->options.adp.supported = 0;
51875239 card->options.sbp.supported_funcs = 0;
51885240 card->info.diagass_support = 0;
51895241 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
....@@ -5197,48 +5249,296 @@
51975249 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
51985250 rc = qeth_query_setadapterparms(card);
51995251 if (rc < 0) {
5200
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
5252
+ QETH_CARD_TEXT_(card, 2, "7err%d", rc);
52015253 goto out;
52025254 }
52035255 }
52045256 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
52055257 rc = qeth_query_setdiagass(card);
5206
- if (rc < 0) {
5207
- QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
5208
- goto out;
5209
- }
5258
+ if (rc)
5259
+ QETH_CARD_TEXT_(card, 2, "8err%d", rc);
52105260 }
5261
+
5262
+ qeth_trace_features(card);
5263
+
5264
+ if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5265
+ (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5266
+ card->info.hwtrap = 0;
5267
+
5268
+ if (card->options.isolation != ISOLATION_MODE_NONE) {
5269
+ rc = qeth_setadpparms_set_access_ctrl(card,
5270
+ card->options.isolation);
5271
+ if (rc)
5272
+ goto out;
5273
+ }
5274
+
5275
+ rc = qeth_init_qdio_queues(card);
5276
+ if (rc) {
5277
+ QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5278
+ goto out;
5279
+ }
5280
+
52115281 return 0;
52125282 out:
52135283 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
52145284 "an error on the device\n");
5215
- QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
5216
- dev_name(&card->gdev->dev), rc);
5285
+ QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5286
+ CARD_DEVID(card), rc);
52175287 return rc;
52185288 }
5219
-EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
52205289
5221
-static void qeth_create_skb_frag(struct qdio_buffer_element *element,
5222
- struct sk_buff *skb, int offset, int data_len)
5290
+static int qeth_set_online(struct qeth_card *card,
5291
+ const struct qeth_discipline *disc)
52235292 {
5224
- struct page *page = virt_to_page(element->addr);
5225
- unsigned int next_frag;
5293
+ bool carrier_ok;
5294
+ int rc;
52265295
5227
- /* first fill the linear space */
5228
- if (!skb->len) {
5229
- unsigned int linear = min(data_len, skb_tailroom(skb));
5296
+ mutex_lock(&card->conf_mutex);
5297
+ QETH_CARD_TEXT(card, 2, "setonlin");
52305298
5231
- skb_put_data(skb, element->addr + offset, linear);
5232
- data_len -= linear;
5233
- if (!data_len)
5234
- return;
5235
- offset += linear;
5236
- /* fall through to add page frag for remaining data */
5299
+ rc = qeth_hardsetup_card(card, &carrier_ok);
5300
+ if (rc) {
5301
+ QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
5302
+ rc = -ENODEV;
5303
+ goto err_hardsetup;
52375304 }
5305
+
5306
+ qeth_print_status_message(card);
5307
+
5308
+ if (card->dev->reg_state != NETREG_REGISTERED)
5309
+ /* no need for locking / error handling at this early stage: */
5310
+ qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
5311
+
5312
+ rc = disc->set_online(card, carrier_ok);
5313
+ if (rc)
5314
+ goto err_online;
5315
+
5316
+ /* let user_space know that device is online */
5317
+ kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5318
+
5319
+ mutex_unlock(&card->conf_mutex);
5320
+ return 0;
5321
+
5322
+err_online:
5323
+err_hardsetup:
5324
+ qeth_qdio_clear_card(card, 0);
5325
+ qeth_clear_working_pool_list(card);
5326
+ qeth_flush_local_addrs(card);
5327
+
5328
+ qeth_stop_channel(&card->data);
5329
+ qeth_stop_channel(&card->write);
5330
+ qeth_stop_channel(&card->read);
5331
+ qdio_free(CARD_DDEV(card));
5332
+
5333
+ mutex_unlock(&card->conf_mutex);
5334
+ return rc;
5335
+}
5336
+
5337
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
5338
+ bool resetting)
5339
+{
5340
+ int rc, rc2, rc3;
5341
+
5342
+ mutex_lock(&card->conf_mutex);
5343
+ QETH_CARD_TEXT(card, 3, "setoffl");
5344
+
5345
+ if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5346
+ qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5347
+ card->info.hwtrap = 1;
5348
+ }
5349
+
5350
+ /* cancel any stalled cmd that might block the rtnl: */
5351
+ qeth_clear_ipacmd_list(card);
5352
+
5353
+ rtnl_lock();
5354
+ card->info.open_when_online = card->dev->flags & IFF_UP;
5355
+ dev_close(card->dev);
5356
+ netif_device_detach(card->dev);
5357
+ netif_carrier_off(card->dev);
5358
+ rtnl_unlock();
5359
+
5360
+ cancel_work_sync(&card->rx_mode_work);
5361
+
5362
+ disc->set_offline(card);
5363
+
5364
+ qeth_qdio_clear_card(card, 0);
5365
+ qeth_drain_output_queues(card);
5366
+ qeth_clear_working_pool_list(card);
5367
+ qeth_flush_local_addrs(card);
5368
+ card->info.promisc_mode = 0;
5369
+
5370
+ rc = qeth_stop_channel(&card->data);
5371
+ rc2 = qeth_stop_channel(&card->write);
5372
+ rc3 = qeth_stop_channel(&card->read);
5373
+ if (!rc)
5374
+ rc = (rc2) ? rc2 : rc3;
5375
+ if (rc)
5376
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5377
+ qdio_free(CARD_DDEV(card));
5378
+
5379
+ /* let user_space know that device is offline */
5380
+ kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5381
+
5382
+ mutex_unlock(&card->conf_mutex);
5383
+ return 0;
5384
+}
5385
+EXPORT_SYMBOL_GPL(qeth_set_offline);
5386
+
5387
+static int qeth_do_reset(void *data)
5388
+{
5389
+ const struct qeth_discipline *disc;
5390
+ struct qeth_card *card = data;
5391
+ int rc;
5392
+
5393
+ /* Lock-free, other users will block until we are done. */
5394
+ disc = card->discipline;
5395
+
5396
+ QETH_CARD_TEXT(card, 2, "recover1");
5397
+ if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5398
+ return 0;
5399
+ QETH_CARD_TEXT(card, 2, "recover2");
5400
+ dev_warn(&card->gdev->dev,
5401
+ "A recovery process has been started for the device\n");
5402
+
5403
+ qeth_set_offline(card, disc, true);
5404
+ rc = qeth_set_online(card, disc);
5405
+ if (!rc) {
5406
+ dev_info(&card->gdev->dev,
5407
+ "Device successfully recovered!\n");
5408
+ } else {
5409
+ ccwgroup_set_offline(card->gdev);
5410
+ dev_warn(&card->gdev->dev,
5411
+ "The qeth device driver failed to recover an error on the device\n");
5412
+ }
5413
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5414
+ qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5415
+ return 0;
5416
+}
5417
+
5418
+#if IS_ENABLED(CONFIG_QETH_L3)
5419
+static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5420
+ struct qeth_hdr *hdr)
5421
+{
5422
+ struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5423
+ struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5424
+ struct net_device *dev = skb->dev;
5425
+
5426
+ if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5427
+ dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5428
+ "FAKELL", skb->len);
5429
+ return;
5430
+ }
5431
+
5432
+ if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5433
+ u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5434
+ ETH_P_IP;
5435
+ unsigned char tg_addr[ETH_ALEN];
5436
+
5437
+ skb_reset_network_header(skb);
5438
+ switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5439
+ case QETH_CAST_MULTICAST:
5440
+ if (prot == ETH_P_IP)
5441
+ ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5442
+ else
5443
+ ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5444
+ QETH_CARD_STAT_INC(card, rx_multicast);
5445
+ break;
5446
+ case QETH_CAST_BROADCAST:
5447
+ ether_addr_copy(tg_addr, dev->broadcast);
5448
+ QETH_CARD_STAT_INC(card, rx_multicast);
5449
+ break;
5450
+ default:
5451
+ if (card->options.sniffer)
5452
+ skb->pkt_type = PACKET_OTHERHOST;
5453
+ ether_addr_copy(tg_addr, dev->dev_addr);
5454
+ }
5455
+
5456
+ if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5457
+ dev_hard_header(skb, dev, prot, tg_addr,
5458
+ &l3_hdr->next_hop.rx.src_mac, skb->len);
5459
+ else
5460
+ dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5461
+ skb->len);
5462
+ }
5463
+
5464
+ /* copy VLAN tag from hdr into skb */
5465
+ if (!card->options.sniffer &&
5466
+ (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5467
+ QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5468
+ u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5469
+ l3_hdr->vlan_id :
5470
+ l3_hdr->next_hop.rx.vlan_id;
5471
+
5472
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5473
+ }
5474
+}
5475
+#endif
5476
+
5477
+static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5478
+ struct qeth_hdr *hdr, bool uses_frags)
5479
+{
5480
+ struct napi_struct *napi = &card->napi;
5481
+ bool is_cso;
5482
+
5483
+ switch (hdr->hdr.l2.id) {
5484
+ case QETH_HEADER_TYPE_OSN:
5485
+ skb_push(skb, sizeof(*hdr));
5486
+ skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
5487
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5488
+ QETH_CARD_STAT_INC(card, rx_packets);
5489
+
5490
+ card->osn_info.data_cb(skb);
5491
+ return;
5492
+#if IS_ENABLED(CONFIG_QETH_L3)
5493
+ case QETH_HEADER_TYPE_LAYER3:
5494
+ qeth_l3_rebuild_skb(card, skb, hdr);
5495
+ is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5496
+ break;
5497
+#endif
5498
+ case QETH_HEADER_TYPE_LAYER2:
5499
+ is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5500
+ break;
5501
+ default:
5502
+ /* never happens */
5503
+ if (uses_frags)
5504
+ napi_free_frags(napi);
5505
+ else
5506
+ dev_kfree_skb_any(skb);
5507
+ return;
5508
+ }
5509
+
5510
+ if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5511
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
5512
+ QETH_CARD_STAT_INC(card, rx_skb_csum);
5513
+ } else {
5514
+ skb->ip_summed = CHECKSUM_NONE;
5515
+ }
5516
+
5517
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5518
+ QETH_CARD_STAT_INC(card, rx_packets);
5519
+ if (skb_is_nonlinear(skb)) {
5520
+ QETH_CARD_STAT_INC(card, rx_sg_skbs);
5521
+ QETH_CARD_STAT_ADD(card, rx_sg_frags,
5522
+ skb_shinfo(skb)->nr_frags);
5523
+ }
5524
+
5525
+ if (uses_frags) {
5526
+ napi_gro_frags(napi);
5527
+ } else {
5528
+ skb->protocol = eth_type_trans(skb, skb->dev);
5529
+ napi_gro_receive(napi, skb);
5530
+ }
5531
+}
5532
+
5533
+static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5534
+{
5535
+ struct page *page = virt_to_page(data);
5536
+ unsigned int next_frag;
52385537
52395538 next_frag = skb_shinfo(skb)->nr_frags;
52405539 get_page(page);
5241
- skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
5540
+ skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5541
+ data_len);
52425542 }
52435543
52445544 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
....@@ -5246,126 +5546,217 @@
52465546 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
52475547 }
52485548
5249
-struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
5250
- struct qeth_qdio_buffer *qethbuffer,
5251
- struct qdio_buffer_element **__element, int *__offset,
5252
- struct qeth_hdr **hdr)
5549
+static int qeth_extract_skb(struct qeth_card *card,
5550
+ struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5551
+ int *__offset)
52535552 {
5254
- struct qdio_buffer_element *element = *__element;
5553
+ struct qeth_priv *priv = netdev_priv(card->dev);
52555554 struct qdio_buffer *buffer = qethbuffer->buffer;
5555
+ struct napi_struct *napi = &card->napi;
5556
+ struct qdio_buffer_element *element;
5557
+ unsigned int linear_len = 0;
5558
+ bool uses_frags = false;
52565559 int offset = *__offset;
5560
+ bool use_rx_sg = false;
5561
+ unsigned int headroom;
5562
+ struct qeth_hdr *hdr;
52575563 struct sk_buff *skb;
52585564 int skb_len = 0;
5259
- void *data_ptr;
5260
- int data_len;
5261
- int headroom = 0;
5262
- int use_rx_sg = 0;
52635565
5566
+ element = &buffer->element[*element_no];
5567
+
5568
+next_packet:
52645569 /* qeth_hdr must not cross element boundaries */
52655570 while (element->length < offset + sizeof(struct qeth_hdr)) {
52665571 if (qeth_is_last_sbale(element))
5267
- return NULL;
5572
+ return -ENODATA;
52685573 element++;
52695574 offset = 0;
52705575 }
5271
- *hdr = element->addr + offset;
52725576
5273
- offset += sizeof(struct qeth_hdr);
5274
- switch ((*hdr)->hdr.l2.id) {
5577
+ hdr = phys_to_virt(element->addr) + offset;
5578
+ offset += sizeof(*hdr);
5579
+ skb = NULL;
5580
+
5581
+ switch (hdr->hdr.l2.id) {
52755582 case QETH_HEADER_TYPE_LAYER2:
5276
- skb_len = (*hdr)->hdr.l2.pkt_length;
5583
+ skb_len = hdr->hdr.l2.pkt_length;
5584
+ linear_len = ETH_HLEN;
5585
+ headroom = 0;
52775586 break;
52785587 case QETH_HEADER_TYPE_LAYER3:
5279
- skb_len = (*hdr)->hdr.l3.length;
5588
+ skb_len = hdr->hdr.l3.length;
5589
+ if (!IS_LAYER3(card)) {
5590
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5591
+ goto walk_packet;
5592
+ }
5593
+
5594
+ if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5595
+ linear_len = ETH_HLEN;
5596
+ headroom = 0;
5597
+ break;
5598
+ }
5599
+
5600
+ if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5601
+ linear_len = sizeof(struct ipv6hdr);
5602
+ else
5603
+ linear_len = sizeof(struct iphdr);
52805604 headroom = ETH_HLEN;
52815605 break;
52825606 case QETH_HEADER_TYPE_OSN:
5283
- skb_len = (*hdr)->hdr.osn.pdu_length;
5607
+ skb_len = hdr->hdr.osn.pdu_length;
5608
+ if (!IS_OSN(card)) {
5609
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5610
+ goto walk_packet;
5611
+ }
5612
+
5613
+ linear_len = skb_len;
52845614 headroom = sizeof(struct qeth_hdr);
52855615 break;
52865616 default:
5287
- break;
5617
+ if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5618
+ QETH_CARD_STAT_INC(card, rx_frame_errors);
5619
+ else
5620
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5621
+
5622
+ /* Can't determine packet length, drop the whole buffer. */
5623
+ return -EPROTONOSUPPORT;
52885624 }
52895625
5290
- if (!skb_len)
5291
- return NULL;
5626
+ if (skb_len < linear_len) {
5627
+ QETH_CARD_STAT_INC(card, rx_dropped_runt);
5628
+ goto walk_packet;
5629
+ }
52925630
5293
- if (((skb_len >= card->options.rx_sg_cb) &&
5294
- (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
5295
- (!atomic_read(&card->force_alloc_skb))) ||
5296
- (card->options.cq == QETH_CQ_ENABLED))
5297
- use_rx_sg = 1;
5631
+ use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5632
+ (skb_len > READ_ONCE(priv->rx_copybreak) &&
5633
+ !atomic_read(&card->force_alloc_skb) &&
5634
+ !IS_OSN(card));
52985635
5299
- if (use_rx_sg && qethbuffer->rx_skb) {
5636
+ if (use_rx_sg) {
53005637 /* QETH_CQ_ENABLED only: */
5301
- skb = qethbuffer->rx_skb;
5302
- qethbuffer->rx_skb = NULL;
5303
- } else {
5304
- unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
5638
+ if (qethbuffer->rx_skb &&
5639
+ skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5640
+ skb = qethbuffer->rx_skb;
5641
+ qethbuffer->rx_skb = NULL;
5642
+ goto use_skb;
5643
+ }
53055644
5306
- skb = napi_alloc_skb(&card->napi, linear + headroom);
5645
+ skb = napi_get_frags(napi);
5646
+ if (!skb) {
5647
+ /* -ENOMEM, no point in falling back further. */
5648
+ QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5649
+ goto walk_packet;
5650
+ }
5651
+
5652
+ if (skb_tailroom(skb) >= linear_len + headroom) {
5653
+ uses_frags = true;
5654
+ goto use_skb;
5655
+ }
5656
+
5657
+ netdev_info_once(card->dev,
5658
+ "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5659
+ linear_len + headroom, skb_tailroom(skb));
5660
+ /* Shouldn't happen. Don't optimize, fall back to linear skb. */
53075661 }
5308
- if (!skb)
5309
- goto no_mem;
5662
+
5663
+ linear_len = skb_len;
5664
+ skb = napi_alloc_skb(napi, linear_len + headroom);
5665
+ if (!skb) {
5666
+ QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5667
+ goto walk_packet;
5668
+ }
5669
+
5670
+use_skb:
53105671 if (headroom)
53115672 skb_reserve(skb, headroom);
5312
-
5313
- data_ptr = element->addr + offset;
5673
+walk_packet:
53145674 while (skb_len) {
5315
- data_len = min(skb_len, (int)(element->length - offset));
5316
- if (data_len) {
5317
- if (use_rx_sg)
5318
- qeth_create_skb_frag(element, skb, offset,
5319
- data_len);
5320
- else
5321
- skb_put_data(skb, data_ptr, data_len);
5322
- }
5675
+ int data_len = min(skb_len, (int)(element->length - offset));
5676
+ char *data = phys_to_virt(element->addr) + offset;
5677
+
53235678 skb_len -= data_len;
5679
+ offset += data_len;
5680
+
5681
+ /* Extract data from current element: */
5682
+ if (skb && data_len) {
5683
+ if (linear_len) {
5684
+ unsigned int copy_len;
5685
+
5686
+ copy_len = min_t(unsigned int, linear_len,
5687
+ data_len);
5688
+
5689
+ skb_put_data(skb, data, copy_len);
5690
+ linear_len -= copy_len;
5691
+ data_len -= copy_len;
5692
+ data += copy_len;
5693
+ }
5694
+
5695
+ if (data_len)
5696
+ qeth_create_skb_frag(skb, data, data_len);
5697
+ }
5698
+
5699
+ /* Step forward to next element: */
53245700 if (skb_len) {
53255701 if (qeth_is_last_sbale(element)) {
53265702 QETH_CARD_TEXT(card, 4, "unexeob");
53275703 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5328
- dev_kfree_skb_any(skb);
5329
- card->stats.rx_errors++;
5330
- return NULL;
5704
+ if (skb) {
5705
+ if (uses_frags)
5706
+ napi_free_frags(napi);
5707
+ else
5708
+ dev_kfree_skb_any(skb);
5709
+ QETH_CARD_STAT_INC(card,
5710
+ rx_length_errors);
5711
+ }
5712
+ return -EMSGSIZE;
53315713 }
53325714 element++;
53335715 offset = 0;
5334
- data_ptr = element->addr;
5335
- } else {
5336
- offset += data_len;
53375716 }
53385717 }
5339
- *__element = element;
5718
+
5719
+ /* This packet was skipped, go get another one: */
5720
+ if (!skb)
5721
+ goto next_packet;
5722
+
5723
+ *element_no = element - &buffer->element[0];
53405724 *__offset = offset;
5341
- if (use_rx_sg && card->options.performance_stats) {
5342
- card->perf_stats.sg_skbs_rx++;
5343
- card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
5344
- }
5345
- return skb;
5346
-no_mem:
5347
- if (net_ratelimit()) {
5348
- QETH_CARD_TEXT(card, 2, "noskbmem");
5349
- }
5350
- card->stats.rx_dropped++;
5351
- return NULL;
5725
+
5726
+ qeth_receive_skb(card, skb, hdr, uses_frags);
5727
+ return 0;
53525728 }
5353
-EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
53545729
5355
-int qeth_poll(struct napi_struct *napi, int budget)
5730
+static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5731
+ struct qeth_qdio_buffer *buf, bool *done)
53565732 {
5357
- struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5358
- int work_done = 0;
5359
- struct qeth_qdio_buffer *buffer;
5360
- int done;
5361
- int new_budget = budget;
5733
+ unsigned int work_done = 0;
53625734
5363
- if (card->options.performance_stats) {
5364
- card->perf_stats.inbound_cnt++;
5365
- card->perf_stats.inbound_start_time = qeth_get_micros();
5735
+ while (budget) {
5736
+ if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5737
+ &card->rx.e_offset)) {
5738
+ *done = true;
5739
+ break;
5740
+ }
5741
+
5742
+ work_done++;
5743
+ budget--;
53665744 }
53675745
5368
- while (1) {
5746
+ return work_done;
5747
+}
5748
+
5749
+static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5750
+{
5751
+ struct qeth_rx *ctx = &card->rx;
5752
+ unsigned int work_done = 0;
5753
+
5754
+ while (budget > 0) {
5755
+ struct qeth_qdio_buffer *buffer;
5756
+ unsigned int skbs_done = 0;
5757
+ bool done = false;
5758
+
5759
+ /* Fetch completed RX buffers: */
53695760 if (!card->rx.b_count) {
53705761 card->rx.qdio_err = 0;
53715762 card->rx.b_count = qdio_get_next_buffers(
....@@ -5375,59 +5766,227 @@
53755766 card->rx.b_count = 0;
53765767 break;
53775768 }
5378
- card->rx.b_element =
5379
- &card->qdio.in_q->bufs[card->rx.b_index]
5380
- .buffer->element[0];
5381
- card->rx.e_offset = 0;
53825769 }
53835770
5384
- while (card->rx.b_count) {
5385
- buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5386
- if (!(card->rx.qdio_err &&
5387
- qeth_check_qdio_errors(card, buffer->buffer,
5388
- card->rx.qdio_err, "qinerr")))
5389
- work_done +=
5390
- card->discipline->process_rx_buffer(
5391
- card, new_budget, &done);
5392
- else
5393
- done = 1;
5771
+ /* Process one completed RX buffer: */
5772
+ buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5773
+ if (!(card->rx.qdio_err &&
5774
+ qeth_check_qdio_errors(card, buffer->buffer,
5775
+ card->rx.qdio_err, "qinerr")))
5776
+ skbs_done = qeth_extract_skbs(card, budget, buffer,
5777
+ &done);
5778
+ else
5779
+ done = true;
53945780
5395
- if (done) {
5396
- if (card->options.performance_stats)
5397
- card->perf_stats.bufs_rec++;
5398
- qeth_put_buffer_pool_entry(card,
5399
- buffer->pool_entry);
5400
- qeth_queue_input_buffer(card, card->rx.b_index);
5401
- card->rx.b_count--;
5402
- if (card->rx.b_count) {
5403
- card->rx.b_index =
5404
- (card->rx.b_index + 1) %
5405
- QDIO_MAX_BUFFERS_PER_Q;
5406
- card->rx.b_element =
5407
- &card->qdio.in_q
5408
- ->bufs[card->rx.b_index]
5409
- .buffer->element[0];
5410
- card->rx.e_offset = 0;
5411
- }
5412
- }
5781
+ work_done += skbs_done;
5782
+ budget -= skbs_done;
54135783
5414
- if (work_done >= budget)
5415
- goto out;
5416
- else
5417
- new_budget = budget - work_done;
5784
+ if (done) {
5785
+ QETH_CARD_STAT_INC(card, rx_bufs);
5786
+ qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5787
+ buffer->pool_entry = NULL;
5788
+ card->rx.b_count--;
5789
+ ctx->bufs_refill++;
5790
+ ctx->bufs_refill -= qeth_rx_refill_queue(card,
5791
+ ctx->bufs_refill);
5792
+
5793
+ /* Step forward to next buffer: */
5794
+ card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5795
+ card->rx.buf_element = 0;
5796
+ card->rx.e_offset = 0;
54185797 }
54195798 }
54205799
5421
- napi_complete_done(napi, work_done);
5422
- if (qdio_start_irq(card->data.ccwdev, 0))
5423
- napi_schedule(&card->napi);
5424
-out:
5425
- if (card->options.performance_stats)
5426
- card->perf_stats.inbound_time += qeth_get_micros() -
5427
- card->perf_stats.inbound_start_time;
5800
+ return work_done;
5801
+}
5802
+
5803
+static void qeth_cq_poll(struct qeth_card *card)
5804
+{
5805
+ unsigned int work_done = 0;
5806
+
5807
+ while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5808
+ unsigned int start, error;
5809
+ int completed;
5810
+
5811
+ completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
5812
+ &error);
5813
+ if (completed <= 0)
5814
+ return;
5815
+
5816
+ qeth_qdio_cq_handler(card, error, 1, start, completed);
5817
+ work_done += completed;
5818
+ }
5819
+}
5820
+
5821
+int qeth_poll(struct napi_struct *napi, int budget)
5822
+{
5823
+ struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5824
+ unsigned int work_done;
5825
+
5826
+ work_done = qeth_rx_poll(card, budget);
5827
+
5828
+ if (card->options.cq == QETH_CQ_ENABLED)
5829
+ qeth_cq_poll(card);
5830
+
5831
+ if (budget) {
5832
+ struct qeth_rx *ctx = &card->rx;
5833
+
5834
+ /* Process any substantial refill backlog: */
5835
+ ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5836
+
5837
+ /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5838
+ if (work_done >= budget)
5839
+ return work_done;
5840
+ }
5841
+
5842
+ if (napi_complete_done(napi, work_done) &&
5843
+ qdio_start_irq(CARD_DDEV(card)))
5844
+ napi_schedule(napi);
5845
+
54285846 return work_done;
54295847 }
54305848 EXPORT_SYMBOL_GPL(qeth_poll);
5849
+
5850
+static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5851
+ unsigned int bidx, bool error, int budget)
5852
+{
5853
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5854
+ u8 sflags = buffer->buffer->element[15].sflags;
5855
+ struct qeth_card *card = queue->card;
5856
+
5857
+ if (queue->bufstates && (queue->bufstates[bidx].flags &
5858
+ QDIO_OUTBUF_STATE_FLAG_PENDING)) {
5859
+ WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
5860
+
5861
+ QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5862
+
5863
+ switch (atomic_cmpxchg(&buffer->state,
5864
+ QETH_QDIO_BUF_PRIMED,
5865
+ QETH_QDIO_BUF_PENDING)) {
5866
+ case QETH_QDIO_BUF_PRIMED:
5867
+ /* We have initial ownership, no QAOB (yet): */
5868
+ qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5869
+
5870
+ /* Handle race with qeth_qdio_handle_aob(): */
5871
+ switch (atomic_xchg(&buffer->state,
5872
+ QETH_QDIO_BUF_NEED_QAOB)) {
5873
+ case QETH_QDIO_BUF_PENDING:
5874
+ /* No concurrent QAOB notification. */
5875
+
5876
+ /* Prepare the queue slot for immediate re-use: */
5877
+ qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5878
+ if (qeth_init_qdio_out_buf(queue, bidx)) {
5879
+ QETH_CARD_TEXT(card, 2, "outofbuf");
5880
+ qeth_schedule_recovery(card);
5881
+ }
5882
+
5883
+ list_add(&buffer->list_entry,
5884
+ &queue->pending_bufs);
5885
+ /* Skip clearing the buffer: */
5886
+ return;
5887
+ case QETH_QDIO_BUF_QAOB_OK:
5888
+ qeth_notify_skbs(queue, buffer,
5889
+ TX_NOTIFY_DELAYED_OK);
5890
+ error = false;
5891
+ break;
5892
+ case QETH_QDIO_BUF_QAOB_ERROR:
5893
+ qeth_notify_skbs(queue, buffer,
5894
+ TX_NOTIFY_DELAYED_GENERALERROR);
5895
+ error = true;
5896
+ break;
5897
+ default:
5898
+ WARN_ON_ONCE(1);
5899
+ }
5900
+
5901
+ break;
5902
+ case QETH_QDIO_BUF_QAOB_OK:
5903
+ /* qeth_qdio_handle_aob() already received a QAOB: */
5904
+ qeth_notify_skbs(queue, buffer, TX_NOTIFY_OK);
5905
+ error = false;
5906
+ break;
5907
+ case QETH_QDIO_BUF_QAOB_ERROR:
5908
+ /* qeth_qdio_handle_aob() already received a QAOB: */
5909
+ qeth_notify_skbs(queue, buffer, TX_NOTIFY_GENERALERROR);
5910
+ error = true;
5911
+ break;
5912
+ default:
5913
+ WARN_ON_ONCE(1);
5914
+ }
5915
+ } else if (card->options.cq == QETH_CQ_ENABLED) {
5916
+ qeth_notify_skbs(queue, buffer,
5917
+ qeth_compute_cq_notification(sflags, 0));
5918
+ }
5919
+
5920
+ qeth_clear_output_buffer(queue, buffer, error, budget);
5921
+}
5922
+
5923
+static int qeth_tx_poll(struct napi_struct *napi, int budget)
5924
+{
5925
+ struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5926
+ unsigned int queue_no = queue->queue_no;
5927
+ struct qeth_card *card = queue->card;
5928
+ struct net_device *dev = card->dev;
5929
+ unsigned int work_done = 0;
5930
+ struct netdev_queue *txq;
5931
+
5932
+ txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5933
+
5934
+ while (1) {
5935
+ unsigned int start, error, i;
5936
+ unsigned int packets = 0;
5937
+ unsigned int bytes = 0;
5938
+ int completed;
5939
+
5940
+ qeth_tx_complete_pending_bufs(card, queue, false);
5941
+
5942
+ if (qeth_out_queue_is_empty(queue)) {
5943
+ napi_complete(napi);
5944
+ return 0;
5945
+ }
5946
+
5947
+ /* Give the CPU a breather: */
5948
+ if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5949
+ QETH_TXQ_STAT_INC(queue, completion_yield);
5950
+ if (napi_complete_done(napi, 0))
5951
+ napi_schedule(napi);
5952
+ return 0;
5953
+ }
5954
+
5955
+ completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
5956
+ &start, &error);
5957
+ if (completed <= 0) {
5958
+ /* Ensure we see TX completion for pending work: */
5959
+ if (napi_complete_done(napi, 0))
5960
+ qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
5961
+ return 0;
5962
+ }
5963
+
5964
+ for (i = start; i < start + completed; i++) {
5965
+ struct qeth_qdio_out_buffer *buffer;
5966
+ unsigned int bidx = QDIO_BUFNR(i);
5967
+
5968
+ buffer = queue->bufs[bidx];
5969
+ packets += buffer->frames;
5970
+ bytes += buffer->bytes;
5971
+
5972
+ qeth_handle_send_error(card, buffer, error);
5973
+ qeth_iqd_tx_complete(queue, bidx, error, budget);
5974
+ }
5975
+
5976
+ netdev_tx_completed_queue(txq, packets, bytes);
5977
+ atomic_sub(completed, &queue->used_buffers);
5978
+ work_done += completed;
5979
+
5980
+ /* xmit may have observed the full-condition, but not yet
5981
+ * stopped the txq. In which case the code below won't trigger.
5982
+ * So before returning, xmit will re-check the txq's fill level
5983
+ * and wake it up if needed.
5984
+ */
5985
+ if (netif_tx_queue_stopped(txq) &&
5986
+ !qeth_out_queue_is_full(queue))
5987
+ netif_tx_wake_queue(txq);
5988
+ }
5989
+}
54315990
54325991 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
54335992 {
....@@ -5436,95 +5995,91 @@
54365995 return cmd->hdr.return_code;
54375996 }
54385997
5998
+static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5999
+ struct qeth_reply *reply,
6000
+ unsigned long data)
6001
+{
6002
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6003
+ struct qeth_ipa_caps *caps = reply->param;
6004
+
6005
+ if (qeth_setassparms_inspect_rc(cmd))
6006
+ return -EIO;
6007
+
6008
+ caps->supported = cmd->data.setassparms.data.caps.supported;
6009
+ caps->enabled = cmd->data.setassparms.data.caps.enabled;
6010
+ return 0;
6011
+}
6012
+
54396013 int qeth_setassparms_cb(struct qeth_card *card,
54406014 struct qeth_reply *reply, unsigned long data)
54416015 {
5442
- struct qeth_ipa_cmd *cmd;
6016
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
54436017
54446018 QETH_CARD_TEXT(card, 4, "defadpcb");
54456019
5446
- cmd = (struct qeth_ipa_cmd *) data;
5447
- if (cmd->hdr.return_code == 0) {
5448
- cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5449
- if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5450
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5451
- if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5452
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5453
- }
6020
+ if (cmd->hdr.return_code)
6021
+ return -EIO;
6022
+
6023
+ cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6024
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6025
+ card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6026
+ if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6027
+ card->options.ipa6.enabled = cmd->hdr.assists.enabled;
54546028 return 0;
54556029 }
54566030 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
54576031
54586032 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
54596033 enum qeth_ipa_funcs ipa_func,
5460
- __u16 cmd_code, __u16 len,
6034
+ u16 cmd_code,
6035
+ unsigned int data_length,
54616036 enum qeth_prot_versions prot)
54626037 {
6038
+ struct qeth_ipacmd_setassparms *setassparms;
6039
+ struct qeth_ipacmd_setassparms_hdr *hdr;
54636040 struct qeth_cmd_buffer *iob;
5464
- struct qeth_ipa_cmd *cmd;
54656041
54666042 QETH_CARD_TEXT(card, 4, "getasscm");
5467
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
6043
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6044
+ data_length +
6045
+ offsetof(struct qeth_ipacmd_setassparms,
6046
+ data));
6047
+ if (!iob)
6048
+ return NULL;
54686049
5469
- if (iob) {
5470
- cmd = __ipa_cmd(iob);
5471
- cmd->data.setassparms.hdr.assist_no = ipa_func;
5472
- cmd->data.setassparms.hdr.length = 8 + len;
5473
- cmd->data.setassparms.hdr.command_code = cmd_code;
5474
- cmd->data.setassparms.hdr.return_code = 0;
5475
- cmd->data.setassparms.hdr.seq_no = 0;
5476
- }
6050
+ setassparms = &__ipa_cmd(iob)->data.setassparms;
6051
+ setassparms->assist_no = ipa_func;
54776052
6053
+ hdr = &setassparms->hdr;
6054
+ hdr->length = sizeof(*hdr) + data_length;
6055
+ hdr->command_code = cmd_code;
54786056 return iob;
54796057 }
54806058 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
54816059
5482
-int qeth_send_setassparms(struct qeth_card *card,
5483
- struct qeth_cmd_buffer *iob, __u16 len, long data,
5484
- int (*reply_cb)(struct qeth_card *,
5485
- struct qeth_reply *, unsigned long),
5486
- void *reply_param)
5487
-{
5488
- int rc;
5489
- struct qeth_ipa_cmd *cmd;
5490
-
5491
- QETH_CARD_TEXT(card, 4, "sendassp");
5492
-
5493
- cmd = __ipa_cmd(iob);
5494
- if (len <= sizeof(__u32))
5495
- cmd->data.setassparms.data.flags_32bit = (__u32) data;
5496
- else /* (len > sizeof(__u32)) */
5497
- memcpy(&cmd->data.setassparms.data, (void *) data, len);
5498
-
5499
- rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
5500
- return rc;
5501
-}
5502
-EXPORT_SYMBOL_GPL(qeth_send_setassparms);
5503
-
55046060 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
55056061 enum qeth_ipa_funcs ipa_func,
5506
- u16 cmd_code, long data,
6062
+ u16 cmd_code, u32 *data,
55076063 enum qeth_prot_versions prot)
55086064 {
5509
- int rc;
5510
- int length = 0;
6065
+ unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
55116066 struct qeth_cmd_buffer *iob;
55126067
55136068 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5514
- if (data)
5515
- length = sizeof(__u32);
55166069 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
55176070 if (!iob)
55186071 return -ENOMEM;
5519
- rc = qeth_send_setassparms(card, iob, length, data,
5520
- qeth_setassparms_cb, NULL);
5521
- return rc;
6072
+
6073
+ if (data)
6074
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6075
+ return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
55226076 }
55236077 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
55246078
55256079 static void qeth_unregister_dbf_views(void)
55266080 {
55276081 int x;
6082
+
55286083 for (x = 0; x < QETH_DBF_INFOS; x++) {
55296084 debug_unregister(qeth_dbf[x].id);
55306085 qeth_dbf[x].id = NULL;
....@@ -5575,11 +6130,11 @@
55756130 return 0;
55766131 }
55776132
6133
+static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
6134
+
55786135 int qeth_core_load_discipline(struct qeth_card *card,
55796136 enum qeth_discipline_id discipline)
55806137 {
5581
- int rc = 0;
5582
-
55836138 mutex_lock(&qeth_mod_mutex);
55846139 switch (discipline) {
55856140 case QETH_DISCIPLINE_LAYER3:
....@@ -5593,22 +6148,25 @@
55936148 default:
55946149 break;
55956150 }
6151
+ mutex_unlock(&qeth_mod_mutex);
55966152
55976153 if (!card->discipline) {
55986154 dev_err(&card->gdev->dev, "There is no kernel module to "
55996155 "support discipline %d\n", discipline);
5600
- rc = -EINVAL;
6156
+ return -EINVAL;
56016157 }
5602
- mutex_unlock(&qeth_mod_mutex);
5603
- return rc;
6158
+
6159
+ card->options.layer = discipline;
6160
+ return 0;
56046161 }
56056162
56066163 void qeth_core_free_discipline(struct qeth_card *card)
56076164 {
5608
- if (card->options.layer2)
6165
+ if (IS_LAYER2(card))
56096166 symbol_put(qeth_l2_discipline);
56106167 else
56116168 symbol_put(qeth_l3_discipline);
6169
+ card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
56126170 card->discipline = NULL;
56136171 }
56146172
....@@ -5694,20 +6252,30 @@
56946252 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
56956253 {
56966254 struct net_device *dev;
6255
+ struct qeth_priv *priv;
56976256
56986257 switch (card->info.type) {
56996258 case QETH_CARD_TYPE_IQD:
5700
- dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
6259
+ dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6260
+ ether_setup, QETH_MAX_OUT_QUEUES, 1);
6261
+ break;
6262
+ case QETH_CARD_TYPE_OSM:
6263
+ dev = alloc_etherdev(sizeof(*priv));
57016264 break;
57026265 case QETH_CARD_TYPE_OSN:
5703
- dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
6266
+ dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
6267
+ ether_setup);
57046268 break;
57056269 default:
5706
- dev = alloc_etherdev(0);
6270
+ dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
57076271 }
57086272
57096273 if (!dev)
57106274 return NULL;
6275
+
6276
+ priv = netdev_priv(dev);
6277
+ priv->rx_copybreak = QETH_RX_COPYBREAK;
6278
+ priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
57116279
57126280 dev->ml_priv = card;
57136281 dev->watchdog_timeo = QETH_TX_TIMEOUT;
....@@ -5718,7 +6286,10 @@
57186286 SET_NETDEV_DEV(dev, &card->gdev->dev);
57196287 netif_carrier_off(dev);
57206288
5721
- if (!IS_OSN(card)) {
6289
+ if (IS_OSN(card)) {
6290
+ dev->ethtool_ops = &qeth_osn_ethtool_ops;
6291
+ } else {
6292
+ dev->ethtool_ops = &qeth_ethtool_ops;
57226293 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
57236294 dev->hw_features |= NETIF_F_SG;
57246295 dev->vlan_features |= NETIF_F_SG;
....@@ -5746,7 +6317,6 @@
57466317 struct device *dev;
57476318 int rc;
57486319 enum qeth_discipline_id enforced_disc;
5749
- unsigned long flags;
57506320 char dbf_name[DBF_NAME_LEN];
57516321
57526322 QETH_DBF_TEXT(SETUP, 2, "probedev");
....@@ -5773,12 +6343,7 @@
57736343 goto err_card;
57746344 }
57756345
5776
- dev_set_drvdata(&gdev->dev, card);
57776346 qeth_setup_card(card);
5778
- rc = qeth_update_from_chp_desc(card);
5779
- if (rc)
5780
- goto err_chp_desc;
5781
-
57826347 card->dev = qeth_alloc_netdev(card);
57836348 if (!card->dev) {
57846349 rc = -ENOMEM;
....@@ -5786,6 +6351,13 @@
57866351 }
57876352
57886353 qeth_determine_capabilities(card);
6354
+ qeth_set_blkt_defaults(card);
6355
+
6356
+ card->qdio.no_out_queues = card->dev->num_tx_queues;
6357
+ rc = qeth_update_from_chp_desc(card);
6358
+ if (rc)
6359
+ goto err_chp_desc;
6360
+
57896361 enforced_disc = qeth_enforce_discipline(card);
57906362 switch (enforced_disc) {
57916363 case QETH_DISCIPLINE_UNDETERMINED:
....@@ -5793,29 +6365,26 @@
57936365 break;
57946366 default:
57956367 card->info.layer_enforced = true;
6368
+ /* It's so early that we don't need the discipline_mutex yet. */
57966369 rc = qeth_core_load_discipline(card, enforced_disc);
57976370 if (rc)
57986371 goto err_load;
57996372
5800
- gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
5801
- ? card->discipline->devtype
5802
- : &qeth_osn_devtype;
6373
+ gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
6374
+ card->discipline->devtype;
58036375 rc = card->discipline->setup(card->gdev);
58046376 if (rc)
58056377 goto err_disc;
58066378 break;
58076379 }
58086380
5809
- write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
5810
- list_add_tail(&card->list, &qeth_core_card_list.list);
5811
- write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
58126381 return 0;
58136382
58146383 err_disc:
58156384 qeth_core_free_discipline(card);
58166385 err_load:
5817
- free_netdev(card->dev);
58186386 err_chp_desc:
6387
+ free_netdev(card->dev);
58196388 err_card:
58206389 qeth_core_free_card(card);
58216390 err_dev:
....@@ -5825,22 +6394,21 @@
58256394
58266395 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
58276396 {
5828
- unsigned long flags;
58296397 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
58306398
5831
- QETH_DBF_TEXT(SETUP, 2, "removedv");
6399
+ QETH_CARD_TEXT(card, 2, "removedv");
58326400
6401
+ mutex_lock(&card->discipline_mutex);
58336402 if (card->discipline) {
58346403 card->discipline->remove(gdev);
58356404 qeth_core_free_discipline(card);
58366405 }
6406
+ mutex_unlock(&card->discipline_mutex);
58376407
5838
- write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
5839
- list_del(&card->list);
5840
- write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
6408
+ qeth_free_qdio_queues(card);
6409
+
58416410 free_netdev(card->dev);
58426411 qeth_core_free_card(card);
5843
- dev_set_drvdata(&gdev->dev, NULL);
58446412 put_device(&gdev->dev);
58456413 }
58466414
....@@ -5850,11 +6418,10 @@
58506418 int rc = 0;
58516419 enum qeth_discipline_id def_discipline;
58526420
6421
+ mutex_lock(&card->discipline_mutex);
58536422 if (!card->discipline) {
5854
- if (card->info.type == QETH_CARD_TYPE_IQD)
5855
- def_discipline = QETH_DISCIPLINE_LAYER3;
5856
- else
5857
- def_discipline = QETH_DISCIPLINE_LAYER2;
6423
+ def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6424
+ QETH_DISCIPLINE_LAYER2;
58586425 rc = qeth_core_load_discipline(card, def_discipline);
58596426 if (rc)
58606427 goto err;
....@@ -5864,50 +6431,36 @@
58646431 goto err;
58656432 }
58666433 }
5867
- rc = card->discipline->set_online(gdev);
6434
+
6435
+ rc = qeth_set_online(card, card->discipline);
6436
+
58686437 err:
6438
+ mutex_unlock(&card->discipline_mutex);
58696439 return rc;
58706440 }
58716441
58726442 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
58736443 {
58746444 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5875
- return card->discipline->set_offline(gdev);
6445
+ int rc;
6446
+
6447
+ mutex_lock(&card->discipline_mutex);
6448
+ rc = qeth_set_offline(card, card->discipline, false);
6449
+ mutex_unlock(&card->discipline_mutex);
6450
+
6451
+ return rc;
58766452 }
58776453
58786454 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
58796455 {
58806456 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6457
+
58816458 qeth_set_allowed_threads(card, 0, 1);
58826459 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
58836460 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
58846461 qeth_qdio_clear_card(card, 0);
5885
- qeth_clear_qdio_buffers(card);
6462
+ qeth_drain_output_queues(card);
58866463 qdio_free(CARD_DDEV(card));
5887
-}
5888
-
5889
-static int qeth_core_freeze(struct ccwgroup_device *gdev)
5890
-{
5891
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5892
- if (card->discipline && card->discipline->freeze)
5893
- return card->discipline->freeze(gdev);
5894
- return 0;
5895
-}
5896
-
5897
-static int qeth_core_thaw(struct ccwgroup_device *gdev)
5898
-{
5899
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5900
- if (card->discipline && card->discipline->thaw)
5901
- return card->discipline->thaw(gdev);
5902
- return 0;
5903
-}
5904
-
5905
-static int qeth_core_restore(struct ccwgroup_device *gdev)
5906
-{
5907
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5908
- if (card->discipline && card->discipline->restore)
5909
- return card->discipline->restore(gdev);
5910
- return 0;
59116464 }
59126465
59136466 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
....@@ -5946,12 +6499,22 @@
59466499 .set_online = qeth_core_set_online,
59476500 .set_offline = qeth_core_set_offline,
59486501 .shutdown = qeth_core_shutdown,
5949
- .prepare = NULL,
5950
- .complete = NULL,
5951
- .freeze = qeth_core_freeze,
5952
- .thaw = qeth_core_thaw,
5953
- .restore = qeth_core_restore,
59546502 };
6503
+
6504
+struct qeth_card *qeth_get_card_by_busid(char *bus_id)
6505
+{
6506
+ struct ccwgroup_device *gdev;
6507
+ struct qeth_card *card;
6508
+
6509
+ gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
6510
+ if (!gdev)
6511
+ return NULL;
6512
+
6513
+ card = dev_get_drvdata(&gdev->dev);
6514
+ put_device(&gdev->dev);
6515
+ return card;
6516
+}
6517
+EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
59556518
59566519 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
59576520 {
....@@ -5959,27 +6522,15 @@
59596522 struct mii_ioctl_data *mii_data;
59606523 int rc = 0;
59616524
5962
- if (!card)
5963
- return -ENODEV;
5964
-
5965
- if (!qeth_card_hw_is_reachable(card))
5966
- return -ENODEV;
5967
-
5968
- if (card->info.type == QETH_CARD_TYPE_OSN)
5969
- return -EPERM;
5970
-
59716525 switch (cmd) {
59726526 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
59736527 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
59746528 break;
59756529 case SIOC_QETH_GET_CARD_TYPE:
5976
- if ((card->info.type == QETH_CARD_TYPE_OSD ||
5977
- card->info.type == QETH_CARD_TYPE_OSM ||
5978
- card->info.type == QETH_CARD_TYPE_OSX) &&
5979
- !card->info.guestlan)
6530
+ if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6531
+ !IS_VM_NIC(card))
59806532 return 1;
5981
- else
5982
- return 0;
6533
+ return 0;
59836534 case SIOCGMIIPHY:
59846535 mii_data = if_mii(rq);
59856536 mii_data->phy_id = 0;
....@@ -6007,464 +6558,168 @@
60076558 }
60086559 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
60096560
6010
-static struct {
6011
- const char str[ETH_GSTRING_LEN];
6012
-} qeth_ethtool_stats_keys[] = {
6013
-/* 0 */{"rx skbs"},
6014
- {"rx buffers"},
6015
- {"tx skbs"},
6016
- {"tx buffers"},
6017
- {"tx skbs no packing"},
6018
- {"tx buffers no packing"},
6019
- {"tx skbs packing"},
6020
- {"tx buffers packing"},
6021
- {"tx sg skbs"},
6022
- {"tx buffer elements"},
6023
-/* 10 */{"rx sg skbs"},
6024
- {"rx sg frags"},
6025
- {"rx sg page allocs"},
6026
- {"tx large kbytes"},
6027
- {"tx large count"},
6028
- {"tx pk state ch n->p"},
6029
- {"tx pk state ch p->n"},
6030
- {"tx pk watermark low"},
6031
- {"tx pk watermark high"},
6032
- {"queue 0 buffer usage"},
6033
-/* 20 */{"queue 1 buffer usage"},
6034
- {"queue 2 buffer usage"},
6035
- {"queue 3 buffer usage"},
6036
- {"rx poll time"},
6037
- {"rx poll count"},
6038
- {"rx do_QDIO time"},
6039
- {"rx do_QDIO count"},
6040
- {"tx handler time"},
6041
- {"tx handler count"},
6042
- {"tx time"},
6043
-/* 30 */{"tx count"},
6044
- {"tx do_QDIO time"},
6045
- {"tx do_QDIO count"},
6046
- {"tx csum"},
6047
- {"tx lin"},
6048
- {"tx linfail"},
6049
- {"cq handler count"},
6050
- {"cq handler time"},
6051
- {"rx csum"}
6052
-};
6053
-
6054
-int qeth_core_get_sset_count(struct net_device *dev, int stringset)
6055
-{
6056
- switch (stringset) {
6057
- case ETH_SS_STATS:
6058
- return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
6059
- default:
6060
- return -EINVAL;
6061
- }
6062
-}
6063
-EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
6064
-
6065
-void qeth_core_get_ethtool_stats(struct net_device *dev,
6066
- struct ethtool_stats *stats, u64 *data)
6067
-{
6068
- struct qeth_card *card = dev->ml_priv;
6069
- data[0] = card->stats.rx_packets -
6070
- card->perf_stats.initial_rx_packets;
6071
- data[1] = card->perf_stats.bufs_rec;
6072
- data[2] = card->stats.tx_packets -
6073
- card->perf_stats.initial_tx_packets;
6074
- data[3] = card->perf_stats.bufs_sent;
6075
- data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
6076
- - card->perf_stats.skbs_sent_pack;
6077
- data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
6078
- data[6] = card->perf_stats.skbs_sent_pack;
6079
- data[7] = card->perf_stats.bufs_sent_pack;
6080
- data[8] = card->perf_stats.sg_skbs_sent;
6081
- data[9] = card->perf_stats.buf_elements_sent;
6082
- data[10] = card->perf_stats.sg_skbs_rx;
6083
- data[11] = card->perf_stats.sg_frags_rx;
6084
- data[12] = card->perf_stats.sg_alloc_page_rx;
6085
- data[13] = (card->perf_stats.large_send_bytes >> 10);
6086
- data[14] = card->perf_stats.large_send_cnt;
6087
- data[15] = card->perf_stats.sc_dp_p;
6088
- data[16] = card->perf_stats.sc_p_dp;
6089
- data[17] = QETH_LOW_WATERMARK_PACK;
6090
- data[18] = QETH_HIGH_WATERMARK_PACK;
6091
- data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
6092
- data[20] = (card->qdio.no_out_queues > 1) ?
6093
- atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
6094
- data[21] = (card->qdio.no_out_queues > 2) ?
6095
- atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
6096
- data[22] = (card->qdio.no_out_queues > 3) ?
6097
- atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
6098
- data[23] = card->perf_stats.inbound_time;
6099
- data[24] = card->perf_stats.inbound_cnt;
6100
- data[25] = card->perf_stats.inbound_do_qdio_time;
6101
- data[26] = card->perf_stats.inbound_do_qdio_cnt;
6102
- data[27] = card->perf_stats.outbound_handler_time;
6103
- data[28] = card->perf_stats.outbound_handler_cnt;
6104
- data[29] = card->perf_stats.outbound_time;
6105
- data[30] = card->perf_stats.outbound_cnt;
6106
- data[31] = card->perf_stats.outbound_do_qdio_time;
6107
- data[32] = card->perf_stats.outbound_do_qdio_cnt;
6108
- data[33] = card->perf_stats.tx_csum;
6109
- data[34] = card->perf_stats.tx_lin;
6110
- data[35] = card->perf_stats.tx_linfail;
6111
- data[36] = card->perf_stats.cq_cnt;
6112
- data[37] = card->perf_stats.cq_time;
6113
- data[38] = card->perf_stats.rx_csum;
6114
-}
6115
-EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
6116
-
6117
-void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
6118
-{
6119
- switch (stringset) {
6120
- case ETH_SS_STATS:
6121
- memcpy(data, &qeth_ethtool_stats_keys,
6122
- sizeof(qeth_ethtool_stats_keys));
6123
- break;
6124
- default:
6125
- WARN_ON(1);
6126
- break;
6127
- }
6128
-}
6129
-EXPORT_SYMBOL_GPL(qeth_core_get_strings);
6130
-
6131
-void qeth_core_get_drvinfo(struct net_device *dev,
6132
- struct ethtool_drvinfo *info)
6133
-{
6134
- struct qeth_card *card = dev->ml_priv;
6135
-
6136
- strlcpy(info->driver, card->options.layer2 ? "qeth_l2" : "qeth_l3",
6137
- sizeof(info->driver));
6138
- strlcpy(info->version, "1.0", sizeof(info->version));
6139
- strlcpy(info->fw_version, card->info.mcl_level,
6140
- sizeof(info->fw_version));
6141
- snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
6142
- CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
6143
-}
6144
-EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
6145
-
6146
-/* Helper function to fill 'advertising' and 'supported' which are the same. */
6147
-/* Autoneg and full-duplex are supported and advertised unconditionally. */
6148
-/* Always advertise and support all speeds up to specified, and only one */
6149
-/* specified port type. */
6150
-static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
6151
- int maxspeed, int porttype)
6152
-{
6153
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
6154
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
6155
- ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
6156
-
6157
- ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
6158
- ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
6159
-
6160
- switch (porttype) {
6161
- case PORT_TP:
6162
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
6163
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
6164
- break;
6165
- case PORT_FIBRE:
6166
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
6167
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
6168
- break;
6169
- default:
6170
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
6171
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
6172
- WARN_ON_ONCE(1);
6173
- }
6174
-
6175
- /* fallthrough from high to low, to select all legal speeds: */
6176
- switch (maxspeed) {
6177
- case SPEED_10000:
6178
- ethtool_link_ksettings_add_link_mode(cmd, supported,
6179
- 10000baseT_Full);
6180
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
6181
- 10000baseT_Full);
6182
- case SPEED_1000:
6183
- ethtool_link_ksettings_add_link_mode(cmd, supported,
6184
- 1000baseT_Full);
6185
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
6186
- 1000baseT_Full);
6187
- ethtool_link_ksettings_add_link_mode(cmd, supported,
6188
- 1000baseT_Half);
6189
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
6190
- 1000baseT_Half);
6191
- case SPEED_100:
6192
- ethtool_link_ksettings_add_link_mode(cmd, supported,
6193
- 100baseT_Full);
6194
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
6195
- 100baseT_Full);
6196
- ethtool_link_ksettings_add_link_mode(cmd, supported,
6197
- 100baseT_Half);
6198
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
6199
- 100baseT_Half);
6200
- case SPEED_10:
6201
- ethtool_link_ksettings_add_link_mode(cmd, supported,
6202
- 10baseT_Full);
6203
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
6204
- 10baseT_Full);
6205
- ethtool_link_ksettings_add_link_mode(cmd, supported,
6206
- 10baseT_Half);
6207
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
6208
- 10baseT_Half);
6209
- /* end fallthrough */
6210
- break;
6211
- default:
6212
- ethtool_link_ksettings_add_link_mode(cmd, supported,
6213
- 10baseT_Full);
6214
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
6215
- 10baseT_Full);
6216
- ethtool_link_ksettings_add_link_mode(cmd, supported,
6217
- 10baseT_Half);
6218
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
6219
- 10baseT_Half);
6220
- WARN_ON_ONCE(1);
6221
- }
6222
-}
6223
-
6224
-int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
6225
- struct ethtool_link_ksettings *cmd)
6226
-{
6227
- struct qeth_card *card = netdev->ml_priv;
6228
- enum qeth_link_types link_type;
6229
- struct carrier_info carrier_info;
6230
- int rc;
6231
-
6232
- if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
6233
- link_type = QETH_LINK_TYPE_10GBIT_ETH;
6234
- else
6235
- link_type = card->info.link_type;
6236
-
6237
- cmd->base.duplex = DUPLEX_FULL;
6238
- cmd->base.autoneg = AUTONEG_ENABLE;
6239
- cmd->base.phy_address = 0;
6240
- cmd->base.mdio_support = 0;
6241
- cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
6242
- cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6243
-
6244
- switch (link_type) {
6245
- case QETH_LINK_TYPE_FAST_ETH:
6246
- case QETH_LINK_TYPE_LANE_ETH100:
6247
- cmd->base.speed = SPEED_100;
6248
- cmd->base.port = PORT_TP;
6249
- break;
6250
- case QETH_LINK_TYPE_GBIT_ETH:
6251
- case QETH_LINK_TYPE_LANE_ETH1000:
6252
- cmd->base.speed = SPEED_1000;
6253
- cmd->base.port = PORT_FIBRE;
6254
- break;
6255
- case QETH_LINK_TYPE_10GBIT_ETH:
6256
- cmd->base.speed = SPEED_10000;
6257
- cmd->base.port = PORT_FIBRE;
6258
- break;
6259
- default:
6260
- cmd->base.speed = SPEED_10;
6261
- cmd->base.port = PORT_TP;
6262
- }
6263
- qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
6264
-
6265
- /* Check if we can obtain more accurate information. */
6266
- /* If QUERY_CARD_INFO command is not supported or fails, */
6267
- /* just return the heuristics that was filled above. */
6268
- if (!qeth_card_hw_is_reachable(card))
6269
- return -ENODEV;
6270
- rc = qeth_query_card_info(card, &carrier_info);
6271
- if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
6272
- return 0;
6273
- if (rc) /* report error from the hardware operation */
6274
- return rc;
6275
- /* on success, fill in the information got from the hardware */
6276
-
6277
- netdev_dbg(netdev,
6278
- "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
6279
- carrier_info.card_type,
6280
- carrier_info.port_mode,
6281
- carrier_info.port_speed);
6282
-
6283
- /* Update attributes for which we've obtained more authoritative */
6284
- /* information, leave the rest the way they where filled above. */
6285
- switch (carrier_info.card_type) {
6286
- case CARD_INFO_TYPE_1G_COPPER_A:
6287
- case CARD_INFO_TYPE_1G_COPPER_B:
6288
- cmd->base.port = PORT_TP;
6289
- qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
6290
- break;
6291
- case CARD_INFO_TYPE_1G_FIBRE_A:
6292
- case CARD_INFO_TYPE_1G_FIBRE_B:
6293
- cmd->base.port = PORT_FIBRE;
6294
- qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
6295
- break;
6296
- case CARD_INFO_TYPE_10G_FIBRE_A:
6297
- case CARD_INFO_TYPE_10G_FIBRE_B:
6298
- cmd->base.port = PORT_FIBRE;
6299
- qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
6300
- break;
6301
- }
6302
-
6303
- switch (carrier_info.port_mode) {
6304
- case CARD_INFO_PORTM_FULLDUPLEX:
6305
- cmd->base.duplex = DUPLEX_FULL;
6306
- break;
6307
- case CARD_INFO_PORTM_HALFDUPLEX:
6308
- cmd->base.duplex = DUPLEX_HALF;
6309
- break;
6310
- }
6311
-
6312
- switch (carrier_info.port_speed) {
6313
- case CARD_INFO_PORTS_10M:
6314
- cmd->base.speed = SPEED_10;
6315
- break;
6316
- case CARD_INFO_PORTS_100M:
6317
- cmd->base.speed = SPEED_100;
6318
- break;
6319
- case CARD_INFO_PORTS_1G:
6320
- cmd->base.speed = SPEED_1000;
6321
- break;
6322
- case CARD_INFO_PORTS_10G:
6323
- cmd->base.speed = SPEED_10000;
6324
- break;
6325
- }
6326
-
6327
- return 0;
6328
-}
6329
-EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings);
6330
-
6331
-/* Callback to handle checksum offload command reply from OSA card.
6332
- * Verify that required features have been enabled on the card.
6333
- * Return error in hdr->return_code as this value is checked by caller.
6334
- *
6335
- * Always returns zero to indicate no further messages from the OSA card.
6336
- */
6337
-static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
6338
- struct qeth_reply *reply,
6339
- unsigned long data)
6561
+static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6562
+ unsigned long data)
63406563 {
63416564 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6342
- struct qeth_checksum_cmd *chksum_cb =
6343
- (struct qeth_checksum_cmd *)reply->param;
6565
+ u32 *features = reply->param;
63446566
6345
- QETH_CARD_TEXT(card, 4, "chkdoccb");
63466567 if (qeth_setassparms_inspect_rc(cmd))
6347
- return 0;
6568
+ return -EIO;
63486569
6349
- memset(chksum_cb, 0, sizeof(*chksum_cb));
6350
- if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6351
- chksum_cb->supported =
6352
- cmd->data.setassparms.data.chksum.supported;
6353
- QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
6354
- }
6355
- if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
6356
- chksum_cb->supported =
6357
- cmd->data.setassparms.data.chksum.supported;
6358
- chksum_cb->enabled =
6359
- cmd->data.setassparms.data.chksum.enabled;
6360
- QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
6361
- QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
6362
- }
6570
+ *features = cmd->data.setassparms.data.flags_32bit;
63636571 return 0;
63646572 }
63656573
6366
-/* Send command to OSA card and check results. */
6367
-static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
6368
- enum qeth_ipa_funcs ipa_func,
6369
- __u16 cmd_code, long data,
6370
- struct qeth_checksum_cmd *chksum_cb,
6371
- enum qeth_prot_versions prot)
6574
+static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6575
+ enum qeth_prot_versions prot)
63726576 {
6373
- struct qeth_cmd_buffer *iob;
6374
- int rc = -ENOMEM;
6375
-
6376
- QETH_CARD_TEXT(card, 4, "chkdocmd");
6377
- iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6378
- sizeof(__u32), prot);
6379
- if (iob)
6380
- rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
6381
- qeth_ipa_checksum_run_cmd_cb,
6382
- chksum_cb);
6383
- return rc;
6577
+ return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6578
+ NULL, prot);
63846579 }
63856580
6386
-static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
6387
- enum qeth_prot_versions prot)
6581
+static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6582
+ enum qeth_prot_versions prot, u8 *lp2lp)
63886583 {
63896584 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6390
- struct qeth_checksum_cmd chksum_cb;
6585
+ struct qeth_cmd_buffer *iob;
6586
+ struct qeth_ipa_caps caps;
6587
+ u32 features;
63916588 int rc;
63926589
6393
- if (prot == QETH_PROT_IPV4)
6590
+ /* some L3 HW requires combined L3+L4 csum offload: */
6591
+ if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6592
+ cstype == IPA_OUTBOUND_CHECKSUM)
63946593 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6395
- rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6396
- &chksum_cb, prot);
6397
- if (!rc) {
6398
- if ((required_features & chksum_cb.supported) !=
6399
- required_features)
6400
- rc = -EIO;
6401
- else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
6402
- cstype == IPA_INBOUND_CHECKSUM)
6403
- dev_warn(&card->gdev->dev,
6404
- "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
6405
- QETH_CARD_IFNAME(card));
6406
- }
6407
- if (rc) {
6408
- qeth_send_simple_setassparms_prot(card, cstype,
6409
- IPA_CMD_ASS_STOP, 0, prot);
6410
- dev_warn(&card->gdev->dev,
6411
- "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n",
6412
- prot, QETH_CARD_IFNAME(card));
6413
- return rc;
6414
- }
6415
- rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6416
- chksum_cb.supported, &chksum_cb,
6594
+
6595
+ iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
64176596 prot);
6418
- if (!rc) {
6419
- if ((required_features & chksum_cb.enabled) !=
6420
- required_features)
6421
- rc = -EIO;
6422
- }
6423
- if (rc) {
6424
- qeth_send_simple_setassparms_prot(card, cstype,
6425
- IPA_CMD_ASS_STOP, 0, prot);
6426
- dev_warn(&card->gdev->dev,
6427
- "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n",
6428
- prot, QETH_CARD_IFNAME(card));
6597
+ if (!iob)
6598
+ return -ENOMEM;
6599
+
6600
+ rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6601
+ if (rc)
64296602 return rc;
6603
+
6604
+ if ((required_features & features) != required_features) {
6605
+ qeth_set_csum_off(card, cstype, prot);
6606
+ return -EOPNOTSUPP;
6607
+ }
6608
+
6609
+ iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6610
+ SETASS_DATA_SIZEOF(flags_32bit),
6611
+ prot);
6612
+ if (!iob) {
6613
+ qeth_set_csum_off(card, cstype, prot);
6614
+ return -ENOMEM;
6615
+ }
6616
+
6617
+ if (features & QETH_IPA_CHECKSUM_LP2LP)
6618
+ required_features |= QETH_IPA_CHECKSUM_LP2LP;
6619
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6620
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6621
+ if (rc) {
6622
+ qeth_set_csum_off(card, cstype, prot);
6623
+ return rc;
6624
+ }
6625
+
6626
+ if (!qeth_ipa_caps_supported(&caps, required_features) ||
6627
+ !qeth_ipa_caps_enabled(&caps, required_features)) {
6628
+ qeth_set_csum_off(card, cstype, prot);
6629
+ return -EOPNOTSUPP;
64306630 }
64316631
64326632 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
64336633 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6634
+
6635
+ if (lp2lp)
6636
+ *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6637
+
64346638 return 0;
64356639 }
64366640
64376641 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6438
- enum qeth_prot_versions prot)
6642
+ enum qeth_prot_versions prot, u8 *lp2lp)
64396643 {
6440
- int rc = (on) ? qeth_send_checksum_on(card, cstype, prot)
6441
- : qeth_send_simple_setassparms_prot(card, cstype,
6442
- IPA_CMD_ASS_STOP, 0,
6443
- prot);
6444
- return rc ? -EIO : 0;
6644
+ return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6645
+ qeth_set_csum_off(card, cstype, prot);
64456646 }
64466647
6447
-static int qeth_set_ipa_tso(struct qeth_card *card, int on)
6648
+static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6649
+ unsigned long data)
64486650 {
6651
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6652
+ struct qeth_tso_start_data *tso_data = reply->param;
6653
+
6654
+ if (qeth_setassparms_inspect_rc(cmd))
6655
+ return -EIO;
6656
+
6657
+ tso_data->mss = cmd->data.setassparms.data.tso.mss;
6658
+ tso_data->supported = cmd->data.setassparms.data.tso.supported;
6659
+ return 0;
6660
+}
6661
+
6662
+static int qeth_set_tso_off(struct qeth_card *card,
6663
+ enum qeth_prot_versions prot)
6664
+{
6665
+ return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6666
+ IPA_CMD_ASS_STOP, NULL, prot);
6667
+}
6668
+
6669
+static int qeth_set_tso_on(struct qeth_card *card,
6670
+ enum qeth_prot_versions prot)
6671
+{
6672
+ struct qeth_tso_start_data tso_data;
6673
+ struct qeth_cmd_buffer *iob;
6674
+ struct qeth_ipa_caps caps;
64496675 int rc;
64506676
6451
- QETH_CARD_TEXT(card, 3, "sttso");
6677
+ iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6678
+ IPA_CMD_ASS_START, 0, prot);
6679
+ if (!iob)
6680
+ return -ENOMEM;
64526681
6453
- if (on) {
6454
- rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
6455
- IPA_CMD_ASS_START, 0);
6456
- if (rc) {
6457
- dev_warn(&card->gdev->dev,
6458
- "Starting outbound TCP segmentation offload for %s failed\n",
6459
- QETH_CARD_IFNAME(card));
6460
- return -EIO;
6461
- }
6462
- dev_info(&card->gdev->dev, "Outbound TSO enabled\n");
6463
- } else {
6464
- rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
6465
- IPA_CMD_ASS_STOP, 0);
6682
+ rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6683
+ if (rc)
6684
+ return rc;
6685
+
6686
+ if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6687
+ qeth_set_tso_off(card, prot);
6688
+ return -EOPNOTSUPP;
64666689 }
6467
- return rc;
6690
+
6691
+ iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6692
+ IPA_CMD_ASS_ENABLE,
6693
+ SETASS_DATA_SIZEOF(caps), prot);
6694
+ if (!iob) {
6695
+ qeth_set_tso_off(card, prot);
6696
+ return -ENOMEM;
6697
+ }
6698
+
6699
+ /* enable TSO capability */
6700
+ __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6701
+ QETH_IPA_LARGE_SEND_TCP;
6702
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6703
+ if (rc) {
6704
+ qeth_set_tso_off(card, prot);
6705
+ return rc;
6706
+ }
6707
+
6708
+ if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6709
+ !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6710
+ qeth_set_tso_off(card, prot);
6711
+ return -EOPNOTSUPP;
6712
+ }
6713
+
6714
+ dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6715
+ tso_data.mss);
6716
+ return 0;
6717
+}
6718
+
6719
+static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6720
+ enum qeth_prot_versions prot)
6721
+{
6722
+ return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
64686723 }
64696724
64706725 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
....@@ -6474,13 +6729,13 @@
64746729
64756730 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
64766731 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6477
- QETH_PROT_IPV4);
6732
+ QETH_PROT_IPV4, NULL);
64786733 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
64796734 /* no/one Offload Assist available, so the rc is trivial */
64806735 return rc_ipv4;
64816736
64826737 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6483
- QETH_PROT_IPV6);
6738
+ QETH_PROT_IPV6, NULL);
64846739
64856740 if (on)
64866741 /* enable: success if any Assist is active */
....@@ -6490,8 +6745,6 @@
64906745 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
64916746 }
64926747
6493
-#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
6494
- NETIF_F_IPV6_CSUM)
64956748 /**
64966749 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
64976750 * @dev: a net_device
....@@ -6501,19 +6754,40 @@
65016754 struct qeth_card *card = dev->ml_priv;
65026755 netdev_features_t features;
65036756
6504
- rtnl_lock();
65056757 features = dev->features;
6506
- /* force-off any feature that needs an IPA sequence.
6758
+ /* force-off any feature that might need an IPA sequence.
65076759 * netdev_update_features() will restart them.
65086760 */
6509
- dev->features &= ~QETH_HW_FEATURES;
6761
+ dev->features &= ~dev->hw_features;
6762
+ /* toggle VLAN filter, so that VIDs are re-programmed: */
6763
+ if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6764
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6765
+ dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6766
+ }
65106767 netdev_update_features(dev);
65116768 if (features != dev->features)
65126769 dev_warn(&card->gdev->dev,
65136770 "Device recovery failed to restore all offload features\n");
6514
- rtnl_unlock();
65156771 }
65166772 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6773
+
6774
+static void qeth_check_restricted_features(struct qeth_card *card,
6775
+ netdev_features_t changed,
6776
+ netdev_features_t actual)
6777
+{
6778
+ netdev_features_t ipv6_features = NETIF_F_TSO6;
6779
+ netdev_features_t ipv4_features = NETIF_F_TSO;
6780
+
6781
+ if (!card->info.has_lp2lp_cso_v6)
6782
+ ipv6_features |= NETIF_F_IPV6_CSUM;
6783
+ if (!card->info.has_lp2lp_cso_v4)
6784
+ ipv4_features |= NETIF_F_IP_CSUM;
6785
+
6786
+ if ((changed & ipv6_features) && !(actual & ipv6_features))
6787
+ qeth_flush_local_addrs6(card);
6788
+ if ((changed & ipv4_features) && !(actual & ipv4_features))
6789
+ qeth_flush_local_addrs4(card);
6790
+}
65176791
65186792 int qeth_set_features(struct net_device *dev, netdev_features_t features)
65196793 {
....@@ -6521,18 +6795,20 @@
65216795 netdev_features_t changed = dev->features ^ features;
65226796 int rc = 0;
65236797
6524
- QETH_DBF_TEXT(SETUP, 2, "setfeat");
6525
- QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6798
+ QETH_CARD_TEXT(card, 2, "setfeat");
6799
+ QETH_CARD_HEX(card, 2, &features, sizeof(features));
65266800
65276801 if ((changed & NETIF_F_IP_CSUM)) {
65286802 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6529
- IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
6803
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6804
+ &card->info.has_lp2lp_cso_v4);
65306805 if (rc)
65316806 changed ^= NETIF_F_IP_CSUM;
65326807 }
65336808 if (changed & NETIF_F_IPV6_CSUM) {
65346809 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6535
- IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
6810
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6811
+ &card->info.has_lp2lp_cso_v6);
65366812 if (rc)
65376813 changed ^= NETIF_F_IPV6_CSUM;
65386814 }
....@@ -6541,11 +6817,21 @@
65416817 if (rc)
65426818 changed ^= NETIF_F_RXCSUM;
65436819 }
6544
- if ((changed & NETIF_F_TSO)) {
6545
- rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0);
6820
+ if (changed & NETIF_F_TSO) {
6821
+ rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6822
+ QETH_PROT_IPV4);
65466823 if (rc)
65476824 changed ^= NETIF_F_TSO;
65486825 }
6826
+ if (changed & NETIF_F_TSO6) {
6827
+ rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6828
+ QETH_PROT_IPV6);
6829
+ if (rc)
6830
+ changed ^= NETIF_F_TSO6;
6831
+ }
6832
+
6833
+ qeth_check_restricted_features(card, dev->features ^ features,
6834
+ dev->features ^ changed);
65496835
65506836 /* everything changed successfully? */
65516837 if ((dev->features ^ features) == changed)
....@@ -6561,7 +6847,7 @@
65616847 {
65626848 struct qeth_card *card = dev->ml_priv;
65636849
6564
- QETH_DBF_TEXT(SETUP, 2, "fixfeat");
6850
+ QETH_CARD_TEXT(card, 2, "fixfeat");
65656851 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
65666852 features &= ~NETIF_F_IP_CSUM;
65676853 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
....@@ -6571,11 +6857,10 @@
65716857 features &= ~NETIF_F_RXCSUM;
65726858 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
65736859 features &= ~NETIF_F_TSO;
6574
- /* if the card isn't up, remove features that require hw changes */
6575
- if (card->state == CARD_STATE_DOWN ||
6576
- card->state == CARD_STATE_RECOVER)
6577
- features &= ~QETH_HW_FEATURES;
6578
- QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6860
+ if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6861
+ features &= ~NETIF_F_TSO6;
6862
+
6863
+ QETH_CARD_HEX(card, 2, &features, sizeof(features));
65796864 return features;
65806865 }
65816866 EXPORT_SYMBOL_GPL(qeth_fix_features);
....@@ -6584,6 +6869,36 @@
65846869 struct net_device *dev,
65856870 netdev_features_t features)
65866871 {
6872
+ struct qeth_card *card = dev->ml_priv;
6873
+
6874
+ /* Traffic with local next-hop is not eligible for some offloads: */
6875
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
6876
+ READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6877
+ netdev_features_t restricted = 0;
6878
+
6879
+ if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6880
+ restricted |= NETIF_F_ALL_TSO;
6881
+
6882
+ switch (vlan_get_protocol(skb)) {
6883
+ case htons(ETH_P_IP):
6884
+ if (!card->info.has_lp2lp_cso_v4)
6885
+ restricted |= NETIF_F_IP_CSUM;
6886
+
6887
+ if (restricted && qeth_next_hop_is_local_v4(card, skb))
6888
+ features &= ~restricted;
6889
+ break;
6890
+ case htons(ETH_P_IPV6):
6891
+ if (!card->info.has_lp2lp_cso_v6)
6892
+ restricted |= NETIF_F_IPV6_CSUM;
6893
+
6894
+ if (restricted && qeth_next_hop_is_local_v6(card, skb))
6895
+ features &= ~restricted;
6896
+ break;
6897
+ default:
6898
+ break;
6899
+ }
6900
+ }
6901
+
65876902 /* GSO segmentation builds skbs with
65886903 * a (small) linear part for the headers, and
65896904 * page frags for the data.
....@@ -6606,21 +6921,167 @@
66066921 }
66076922 EXPORT_SYMBOL_GPL(qeth_features_check);
66086923
6924
+void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6925
+{
6926
+ struct qeth_card *card = dev->ml_priv;
6927
+ struct qeth_qdio_out_q *queue;
6928
+ unsigned int i;
6929
+
6930
+ QETH_CARD_TEXT(card, 5, "getstat");
6931
+
6932
+ stats->rx_packets = card->stats.rx_packets;
6933
+ stats->rx_bytes = card->stats.rx_bytes;
6934
+ stats->rx_errors = card->stats.rx_length_errors +
6935
+ card->stats.rx_frame_errors +
6936
+ card->stats.rx_fifo_errors;
6937
+ stats->rx_dropped = card->stats.rx_dropped_nomem +
6938
+ card->stats.rx_dropped_notsupp +
6939
+ card->stats.rx_dropped_runt;
6940
+ stats->multicast = card->stats.rx_multicast;
6941
+ stats->rx_length_errors = card->stats.rx_length_errors;
6942
+ stats->rx_frame_errors = card->stats.rx_frame_errors;
6943
+ stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6944
+
6945
+ for (i = 0; i < card->qdio.no_out_queues; i++) {
6946
+ queue = card->qdio.out_qs[i];
6947
+
6948
+ stats->tx_packets += queue->stats.tx_packets;
6949
+ stats->tx_bytes += queue->stats.tx_bytes;
6950
+ stats->tx_errors += queue->stats.tx_errors;
6951
+ stats->tx_dropped += queue->stats.tx_dropped;
6952
+ }
6953
+}
6954
+EXPORT_SYMBOL_GPL(qeth_get_stats64);
6955
+
6956
+#define TC_IQD_UCAST 0
6957
+static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
6958
+ unsigned int ucast_txqs)
6959
+{
6960
+ unsigned int prio;
6961
+
6962
+ /* IQD requires mcast traffic to be placed on a dedicated queue, and
6963
+ * qeth_iqd_select_queue() deals with this.
6964
+ * For unicast traffic, we defer the queue selection to the stack.
6965
+ * By installing a trivial prio map that spans over only the unicast
6966
+ * queues, we can encourage the stack to spread the ucast traffic evenly
6967
+ * without selecting the mcast queue.
6968
+ */
6969
+
6970
+ /* One traffic class, spanning over all active ucast queues: */
6971
+ netdev_set_num_tc(dev, 1);
6972
+ netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
6973
+ QETH_IQD_MIN_UCAST_TXQ);
6974
+
6975
+ /* Map all priorities to this traffic class: */
6976
+ for (prio = 0; prio <= TC_BITMASK; prio++)
6977
+ netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
6978
+}
6979
+
6980
+int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
6981
+{
6982
+ struct net_device *dev = card->dev;
6983
+ int rc;
6984
+
6985
+ /* Per netif_setup_tc(), adjust the mapping first: */
6986
+ if (IS_IQD(card))
6987
+ qeth_iqd_set_prio_tc_map(dev, count - 1);
6988
+
6989
+ rc = netif_set_real_num_tx_queues(dev, count);
6990
+
6991
+ if (rc && IS_IQD(card))
6992
+ qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
6993
+
6994
+ return rc;
6995
+}
6996
+EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
6997
+
6998
+u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6999
+ u8 cast_type, struct net_device *sb_dev)
7000
+{
7001
+ u16 txq;
7002
+
7003
+ if (cast_type != RTN_UNICAST)
7004
+ return QETH_IQD_MCAST_TXQ;
7005
+ if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
7006
+ return QETH_IQD_MIN_UCAST_TXQ;
7007
+
7008
+ txq = netdev_pick_tx(dev, skb, sb_dev);
7009
+ return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7010
+}
7011
+EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
7012
+
7013
+int qeth_open(struct net_device *dev)
7014
+{
7015
+ struct qeth_card *card = dev->ml_priv;
7016
+
7017
+ QETH_CARD_TEXT(card, 4, "qethopen");
7018
+
7019
+ card->data.state = CH_STATE_UP;
7020
+ netif_tx_start_all_queues(dev);
7021
+
7022
+ local_bh_disable();
7023
+ if (IS_IQD(card)) {
7024
+ struct qeth_qdio_out_q *queue;
7025
+ unsigned int i;
7026
+
7027
+ qeth_for_each_output_queue(card, queue, i) {
7028
+ netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
7029
+ QETH_NAPI_WEIGHT);
7030
+ napi_enable(&queue->napi);
7031
+ napi_schedule(&queue->napi);
7032
+ }
7033
+ }
7034
+
7035
+ napi_enable(&card->napi);
7036
+ napi_schedule(&card->napi);
7037
+ /* kick-start the NAPI softirq: */
7038
+ local_bh_enable();
7039
+
7040
+ return 0;
7041
+}
7042
+EXPORT_SYMBOL_GPL(qeth_open);
7043
+
7044
+int qeth_stop(struct net_device *dev)
7045
+{
7046
+ struct qeth_card *card = dev->ml_priv;
7047
+
7048
+ QETH_CARD_TEXT(card, 4, "qethstop");
7049
+
7050
+ napi_disable(&card->napi);
7051
+ cancel_delayed_work_sync(&card->buffer_reclaim_work);
7052
+ qdio_stop_irq(CARD_DDEV(card));
7053
+
7054
+ if (IS_IQD(card)) {
7055
+ struct qeth_qdio_out_q *queue;
7056
+ unsigned int i;
7057
+
7058
+ /* Quiesce the NAPI instances: */
7059
+ qeth_for_each_output_queue(card, queue, i)
7060
+ napi_disable(&queue->napi);
7061
+
7062
+ /* Stop .ndo_start_xmit, might still access queue->napi. */
7063
+ netif_tx_disable(dev);
7064
+
7065
+ qeth_for_each_output_queue(card, queue, i) {
7066
+ del_timer_sync(&queue->timer);
7067
+ /* Queues may get re-allocated, so remove the NAPIs. */
7068
+ netif_napi_del(&queue->napi);
7069
+ }
7070
+ } else {
7071
+ netif_tx_disable(dev);
7072
+ }
7073
+
7074
+ return 0;
7075
+}
7076
+EXPORT_SYMBOL_GPL(qeth_stop);
7077
+
66097078 static int __init qeth_core_init(void)
66107079 {
66117080 int rc;
66127081
66137082 pr_info("loading core functions\n");
6614
- INIT_LIST_HEAD(&qeth_core_card_list.list);
6615
- INIT_LIST_HEAD(&qeth_dbf_list);
6616
- rwlock_init(&qeth_core_card_list.rwlock);
6617
- mutex_init(&qeth_mod_mutex);
66187083
6619
- qeth_wq = create_singlethread_workqueue("qeth_wq");
6620
- if (!qeth_wq) {
6621
- rc = -ENOMEM;
6622
- goto out_err;
6623
- }
7084
+ qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
66247085
66257086 rc = qeth_register_dbf_views();
66267087 if (rc)
....@@ -6629,8 +7090,10 @@
66297090 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
66307091 if (rc)
66317092 goto register_err;
6632
- qeth_core_header_cache = kmem_cache_create("qeth_hdr",
6633
- sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
7093
+ qeth_core_header_cache =
7094
+ kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7095
+ roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7096
+ 0, NULL);
66347097 if (!qeth_core_header_cache) {
66357098 rc = -ENOMEM;
66367099 goto slab_err;
....@@ -6661,8 +7124,7 @@
66617124 register_err:
66627125 qeth_unregister_dbf_views();
66637126 dbf_err:
6664
- destroy_workqueue(qeth_wq);
6665
-out_err:
7127
+ debugfs_remove_recursive(qeth_debugfs_root);
66667128 pr_err("Initializing the qeth device driver failed\n");
66677129 return rc;
66687130 }
....@@ -6670,13 +7132,13 @@
66707132 static void __exit qeth_core_exit(void)
66717133 {
66727134 qeth_clear_dbf_list();
6673
- destroy_workqueue(qeth_wq);
66747135 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
66757136 ccw_driver_unregister(&qeth_ccw_driver);
66767137 kmem_cache_destroy(qeth_qdio_outbuf_cache);
66777138 kmem_cache_destroy(qeth_core_header_cache);
66787139 root_device_unregister(qeth_core_root_dev);
66797140 qeth_unregister_dbf_views();
7141
+ debugfs_remove_recursive(qeth_debugfs_root);
66807142 pr_info("core functions removed\n");
66817143 }
66827144