hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/infiniband/core/mad.c
....@@ -3,7 +3,7 @@
33 * Copyright (c) 2005 Intel Corporation. All rights reserved.
44 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
55 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6
- * Copyright (c) 2014 Intel Corporation. All rights reserved.
6
+ * Copyright (c) 2014,2018 Intel Corporation. All rights reserved.
77 *
88 * This software is available to you under a choice of one of two
99 * licenses. You may choose to be licensed under the terms of the GNU
....@@ -38,10 +38,10 @@
3838 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3939
4040 #include <linux/dma-mapping.h>
41
-#include <linux/idr.h>
4241 #include <linux/slab.h>
4342 #include <linux/module.h>
4443 #include <linux/security.h>
44
+#include <linux/xarray.h>
4545 #include <rdma/ib_cache.h>
4646
4747 #include "mad_priv.h"
....@@ -51,6 +51,32 @@
5151 #include "opa_smi.h"
5252 #include "agent.h"
5353
54
+#define CREATE_TRACE_POINTS
55
+#include <trace/events/ib_mad.h>
56
+
57
+#ifdef CONFIG_TRACEPOINTS
58
+static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
59
+ struct ib_mad_qp_info *qp_info,
60
+ struct trace_event_raw_ib_mad_send_template *entry)
61
+{
62
+ u16 pkey;
63
+ struct ib_device *dev = qp_info->port_priv->device;
64
+ u8 pnum = qp_info->port_priv->port_num;
65
+ struct ib_ud_wr *wr = &mad_send_wr->send_wr;
66
+ struct rdma_ah_attr attr = {};
67
+
68
+ rdma_query_ah(wr->ah, &attr);
69
+
70
+ /* These are common */
71
+ entry->sl = attr.sl;
72
+ ib_query_pkey(dev, pnum, wr->pkey_index, &pkey);
73
+ entry->pkey = pkey;
74
+ entry->rqpn = wr->remote_qpn;
75
+ entry->rqkey = wr->remote_qkey;
76
+ entry->dlid = rdma_ah_get_dlid(&attr);
77
+}
78
+#endif
79
+
5480 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
5581 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
5682
....@@ -59,12 +85,8 @@
5985 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
6086 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
6187
62
-/*
63
- * The mlx4 driver uses the top byte to distinguish which virtual function
64
- * generated the MAD, so we must avoid using it.
65
- */
66
-#define AGENT_ID_LIMIT (1 << 24)
67
-static DEFINE_IDR(ib_mad_clients);
88
+static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
89
+static u32 ib_mad_client_next;
6890 static struct list_head ib_mad_port_list;
6991
7092 /* Port list lock */
....@@ -219,6 +241,10 @@
219241 struct ib_mad_mgmt_method_table *method;
220242 int ret2, qpn;
221243 u8 mgmt_class, vclass;
244
+
245
+ if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
246
+ (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
247
+ return ERR_PTR(-EPROTONOSUPPORT);
222248
223249 /* Validate parameters */
224250 qpn = get_spl_qp_index(qp_type);
....@@ -376,7 +402,7 @@
376402 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
377403 INIT_LIST_HEAD(&mad_agent_priv->local_list);
378404 INIT_WORK(&mad_agent_priv->local_work, local_completions);
379
- atomic_set(&mad_agent_priv->refcount, 1);
405
+ refcount_set(&mad_agent_priv->refcount, 1);
380406 init_completion(&mad_agent_priv->comp);
381407
382408 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
....@@ -385,18 +411,17 @@
385411 goto error4;
386412 }
387413
388
- idr_preload(GFP_KERNEL);
389
- idr_lock(&ib_mad_clients);
390
- ret2 = idr_alloc_cyclic(&ib_mad_clients, mad_agent_priv, 0,
391
- AGENT_ID_LIMIT, GFP_ATOMIC);
392
- idr_unlock(&ib_mad_clients);
393
- idr_preload_end();
394
-
414
+ /*
415
+ * The mlx4 driver uses the top byte to distinguish which virtual
416
+ * function generated the MAD, so we must avoid using it.
417
+ */
418
+ ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
419
+ mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
420
+ &ib_mad_client_next, GFP_KERNEL);
395421 if (ret2 < 0) {
396422 ret = ERR_PTR(ret2);
397423 goto error5;
398424 }
399
- mad_agent_priv->agent.hi_tid = ret2;
400425
401426 /*
402427 * Make sure MAD registration (if supplied)
....@@ -441,12 +466,11 @@
441466 }
442467 spin_unlock_irq(&port_priv->reg_lock);
443468
469
+ trace_ib_mad_create_agent(mad_agent_priv);
444470 return &mad_agent_priv->agent;
445471 error6:
446472 spin_unlock_irq(&port_priv->reg_lock);
447
- idr_lock(&ib_mad_clients);
448
- idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
449
- idr_unlock(&ib_mad_clients);
473
+ xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
450474 error5:
451475 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
452476 error4:
....@@ -458,139 +482,10 @@
458482 }
459483 EXPORT_SYMBOL(ib_register_mad_agent);
460484
461
-static inline int is_snooping_sends(int mad_snoop_flags)
462
-{
463
- return (mad_snoop_flags &
464
- (/*IB_MAD_SNOOP_POSTED_SENDS |
465
- IB_MAD_SNOOP_RMPP_SENDS |*/
466
- IB_MAD_SNOOP_SEND_COMPLETIONS /*|
467
- IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
468
-}
469
-
470
-static inline int is_snooping_recvs(int mad_snoop_flags)
471
-{
472
- return (mad_snoop_flags &
473
- (IB_MAD_SNOOP_RECVS /*|
474
- IB_MAD_SNOOP_RMPP_RECVS*/));
475
-}
476
-
477
-static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
478
- struct ib_mad_snoop_private *mad_snoop_priv)
479
-{
480
- struct ib_mad_snoop_private **new_snoop_table;
481
- unsigned long flags;
482
- int i;
483
-
484
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
485
- /* Check for empty slot in array. */
486
- for (i = 0; i < qp_info->snoop_table_size; i++)
487
- if (!qp_info->snoop_table[i])
488
- break;
489
-
490
- if (i == qp_info->snoop_table_size) {
491
- /* Grow table. */
492
- new_snoop_table = krealloc(qp_info->snoop_table,
493
- sizeof mad_snoop_priv *
494
- (qp_info->snoop_table_size + 1),
495
- GFP_ATOMIC);
496
- if (!new_snoop_table) {
497
- i = -ENOMEM;
498
- goto out;
499
- }
500
-
501
- qp_info->snoop_table = new_snoop_table;
502
- qp_info->snoop_table_size++;
503
- }
504
- qp_info->snoop_table[i] = mad_snoop_priv;
505
- atomic_inc(&qp_info->snoop_count);
506
-out:
507
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
508
- return i;
509
-}
510
-
511
-struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
512
- u8 port_num,
513
- enum ib_qp_type qp_type,
514
- int mad_snoop_flags,
515
- ib_mad_snoop_handler snoop_handler,
516
- ib_mad_recv_handler recv_handler,
517
- void *context)
518
-{
519
- struct ib_mad_port_private *port_priv;
520
- struct ib_mad_agent *ret;
521
- struct ib_mad_snoop_private *mad_snoop_priv;
522
- int qpn;
523
- int err;
524
-
525
- /* Validate parameters */
526
- if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
527
- (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
528
- ret = ERR_PTR(-EINVAL);
529
- goto error1;
530
- }
531
- qpn = get_spl_qp_index(qp_type);
532
- if (qpn == -1) {
533
- ret = ERR_PTR(-EINVAL);
534
- goto error1;
535
- }
536
- port_priv = ib_get_mad_port(device, port_num);
537
- if (!port_priv) {
538
- ret = ERR_PTR(-ENODEV);
539
- goto error1;
540
- }
541
- /* Allocate structures */
542
- mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
543
- if (!mad_snoop_priv) {
544
- ret = ERR_PTR(-ENOMEM);
545
- goto error1;
546
- }
547
-
548
- /* Now, fill in the various structures */
549
- mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
550
- mad_snoop_priv->agent.device = device;
551
- mad_snoop_priv->agent.recv_handler = recv_handler;
552
- mad_snoop_priv->agent.snoop_handler = snoop_handler;
553
- mad_snoop_priv->agent.context = context;
554
- mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
555
- mad_snoop_priv->agent.port_num = port_num;
556
- mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
557
- init_completion(&mad_snoop_priv->comp);
558
-
559
- err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
560
- if (err) {
561
- ret = ERR_PTR(err);
562
- goto error2;
563
- }
564
-
565
- mad_snoop_priv->snoop_index = register_snoop_agent(
566
- &port_priv->qp_info[qpn],
567
- mad_snoop_priv);
568
- if (mad_snoop_priv->snoop_index < 0) {
569
- ret = ERR_PTR(mad_snoop_priv->snoop_index);
570
- goto error3;
571
- }
572
-
573
- atomic_set(&mad_snoop_priv->refcount, 1);
574
- return &mad_snoop_priv->agent;
575
-error3:
576
- ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
577
-error2:
578
- kfree(mad_snoop_priv);
579
-error1:
580
- return ret;
581
-}
582
-EXPORT_SYMBOL(ib_register_mad_snoop);
583
-
584485 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
585486 {
586
- if (atomic_dec_and_test(&mad_agent_priv->refcount))
487
+ if (refcount_dec_and_test(&mad_agent_priv->refcount))
587488 complete(&mad_agent_priv->comp);
588
-}
589
-
590
-static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
591
-{
592
- if (atomic_dec_and_test(&mad_snoop_priv->refcount))
593
- complete(&mad_snoop_priv->comp);
594489 }
595490
596491 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
....@@ -598,6 +493,7 @@
598493 struct ib_mad_port_private *port_priv;
599494
600495 /* Note that we could still be handling received MADs */
496
+ trace_ib_mad_unregister_agent(mad_agent_priv);
601497
602498 /*
603499 * Canceling all sends results in dropping received response
....@@ -610,9 +506,7 @@
610506 spin_lock_irq(&port_priv->reg_lock);
611507 remove_mad_reg_req(mad_agent_priv);
612508 spin_unlock_irq(&port_priv->reg_lock);
613
- idr_lock(&ib_mad_clients);
614
- idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
615
- idr_unlock(&ib_mad_clients);
509
+ xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
616510
617511 flush_workqueue(port_priv->wq);
618512
....@@ -626,25 +520,6 @@
626520 kfree_rcu(mad_agent_priv, rcu);
627521 }
628522
629
-static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
630
-{
631
- struct ib_mad_qp_info *qp_info;
632
- unsigned long flags;
633
-
634
- qp_info = mad_snoop_priv->qp_info;
635
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
636
- qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
637
- atomic_dec(&qp_info->snoop_count);
638
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
639
-
640
- deref_snoop_agent(mad_snoop_priv);
641
- wait_for_completion(&mad_snoop_priv->comp);
642
-
643
- ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
644
-
645
- kfree(mad_snoop_priv);
646
-}
647
-
648523 /*
649524 * ib_unregister_mad_agent - Unregisters a client from using MAD services
650525 *
....@@ -653,20 +528,11 @@
653528 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
654529 {
655530 struct ib_mad_agent_private *mad_agent_priv;
656
- struct ib_mad_snoop_private *mad_snoop_priv;
657531
658
- /* If the TID is zero, the agent can only snoop. */
659
- if (mad_agent->hi_tid) {
660
- mad_agent_priv = container_of(mad_agent,
661
- struct ib_mad_agent_private,
662
- agent);
663
- unregister_mad_agent(mad_agent_priv);
664
- } else {
665
- mad_snoop_priv = container_of(mad_agent,
666
- struct ib_mad_snoop_private,
667
- agent);
668
- unregister_mad_snoop(mad_snoop_priv);
669
- }
532
+ mad_agent_priv = container_of(mad_agent,
533
+ struct ib_mad_agent_private,
534
+ agent);
535
+ unregister_mad_agent(mad_agent_priv);
670536 }
671537 EXPORT_SYMBOL(ib_unregister_mad_agent);
672538
....@@ -680,57 +546,6 @@
680546 list_del(&mad_list->list);
681547 mad_queue->count--;
682548 spin_unlock_irqrestore(&mad_queue->lock, flags);
683
-}
684
-
685
-static void snoop_send(struct ib_mad_qp_info *qp_info,
686
- struct ib_mad_send_buf *send_buf,
687
- struct ib_mad_send_wc *mad_send_wc,
688
- int mad_snoop_flags)
689
-{
690
- struct ib_mad_snoop_private *mad_snoop_priv;
691
- unsigned long flags;
692
- int i;
693
-
694
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
695
- for (i = 0; i < qp_info->snoop_table_size; i++) {
696
- mad_snoop_priv = qp_info->snoop_table[i];
697
- if (!mad_snoop_priv ||
698
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
699
- continue;
700
-
701
- atomic_inc(&mad_snoop_priv->refcount);
702
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
703
- mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
704
- send_buf, mad_send_wc);
705
- deref_snoop_agent(mad_snoop_priv);
706
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
707
- }
708
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
709
-}
710
-
711
-static void snoop_recv(struct ib_mad_qp_info *qp_info,
712
- struct ib_mad_recv_wc *mad_recv_wc,
713
- int mad_snoop_flags)
714
-{
715
- struct ib_mad_snoop_private *mad_snoop_priv;
716
- unsigned long flags;
717
- int i;
718
-
719
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
720
- for (i = 0; i < qp_info->snoop_table_size; i++) {
721
- mad_snoop_priv = qp_info->snoop_table[i];
722
- if (!mad_snoop_priv ||
723
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
724
- continue;
725
-
726
- atomic_inc(&mad_snoop_priv->refcount);
727
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
728
- mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
729
- mad_recv_wc);
730
- deref_snoop_agent(mad_snoop_priv);
731
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
732
- }
733
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
734549 }
735550
736551 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
....@@ -817,6 +632,8 @@
817632 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
818633 u32 opa_drslid;
819634
635
+ trace_ib_mad_handle_out_opa_smi(opa_smp);
636
+
820637 if ((opa_get_smp_direction(opa_smp)
821638 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
822639 OPA_LID_PERMISSIVE &&
....@@ -842,6 +659,8 @@
842659 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
843660 goto out;
844661 } else {
662
+ trace_ib_mad_handle_out_ib_smi(smp);
663
+
845664 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
846665 IB_LID_PERMISSIVE &&
847666 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
....@@ -884,10 +703,10 @@
884703 }
885704
886705 /* No GRH for DR SMP */
887
- ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
888
- (const struct ib_mad_hdr *)smp, mad_size,
889
- (struct ib_mad_hdr *)mad_priv->mad,
890
- &mad_size, &out_mad_pkey_index);
706
+ ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
707
+ (const struct ib_mad *)smp,
708
+ (struct ib_mad *)mad_priv->mad, &mad_size,
709
+ &out_mad_pkey_index);
891710 switch (ret)
892711 {
893712 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
....@@ -899,7 +718,7 @@
899718 * Reference MAD agent until receive
900719 * side of local completion handled
901720 */
902
- atomic_inc(&mad_agent_priv->refcount);
721
+ refcount_inc(&mad_agent_priv->refcount);
903722 } else
904723 kfree(mad_priv);
905724 break;
....@@ -939,7 +758,7 @@
939758 local->return_wc_byte_len = mad_size;
940759 }
941760 /* Reference MAD agent until send side of local completion handled */
942
- atomic_inc(&mad_agent_priv->refcount);
761
+ refcount_inc(&mad_agent_priv->refcount);
943762 /* Queue local completion to local list */
944763 spin_lock_irqsave(&mad_agent_priv->lock, flags);
945764 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
....@@ -1097,7 +916,7 @@
1097916 }
1098917
1099918 mad_send_wr->send_buf.mad_agent = mad_agent;
1100
- atomic_inc(&mad_agent_priv->refcount);
919
+ refcount_inc(&mad_agent_priv->refcount);
1101920 return &mad_send_wr->send_buf;
1102921 }
1103922 EXPORT_SYMBOL(ib_create_send_mad);
....@@ -1219,6 +1038,7 @@
12191038
12201039 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
12211040 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1041
+ trace_ib_mad_ib_send_mad(mad_send_wr, qp_info);
12221042 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
12231043 NULL);
12241044 list = &qp_info->send_queue.list;
....@@ -1311,7 +1131,7 @@
13111131 mad_send_wr->status = IB_WC_SUCCESS;
13121132
13131133 /* Reference MAD agent until send completes */
1314
- atomic_inc(&mad_agent_priv->refcount);
1134
+ refcount_inc(&mad_agent_priv->refcount);
13151135 spin_lock_irqsave(&mad_agent_priv->lock, flags);
13161136 list_add_tail(&mad_send_wr->agent_list,
13171137 &mad_agent_priv->send_list);
....@@ -1328,7 +1148,7 @@
13281148 spin_lock_irqsave(&mad_agent_priv->lock, flags);
13291149 list_del(&mad_send_wr->agent_list);
13301150 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1331
- atomic_dec(&mad_agent_priv->refcount);
1151
+ deref_mad_agent(mad_agent_priv);
13321152 goto error;
13331153 }
13341154 }
....@@ -1367,25 +1187,6 @@
13671187 }
13681188 }
13691189 EXPORT_SYMBOL(ib_free_recv_mad);
1370
-
1371
-struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1372
- u8 rmpp_version,
1373
- ib_mad_send_handler send_handler,
1374
- ib_mad_recv_handler recv_handler,
1375
- void *context)
1376
-{
1377
- return ERR_PTR(-EINVAL); /* XXX: for now */
1378
-}
1379
-EXPORT_SYMBOL(ib_redirect_mad_qp);
1380
-
1381
-int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1382
- struct ib_wc *wc)
1383
-{
1384
- dev_err(&mad_agent->device->dev,
1385
- "ib_process_mad_wc() not implemented yet\n");
1386
- return 0;
1387
-}
1388
-EXPORT_SYMBOL(ib_process_mad_wc);
13891190
13901191 static int method_in_use(struct ib_mad_mgmt_method_table **method,
13911192 struct ib_mad_reg_req *mad_reg_req)
....@@ -1752,8 +1553,8 @@
17521553 */
17531554 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
17541555 rcu_read_lock();
1755
- mad_agent = idr_find(&ib_mad_clients, hi_tid);
1756
- if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
1556
+ mad_agent = xa_load(&ib_mad_clients, hi_tid);
1557
+ if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount))
17571558 mad_agent = NULL;
17581559 rcu_read_unlock();
17591560 } else {
....@@ -1805,7 +1606,7 @@
18051606 }
18061607 }
18071608 if (mad_agent)
1808
- atomic_inc(&mad_agent->refcount);
1609
+ refcount_inc(&mad_agent->refcount);
18091610 out:
18101611 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
18111612 }
....@@ -2030,7 +1831,7 @@
20301831 mad_agent_priv->agent.recv_handler(
20311832 &mad_agent_priv->agent, NULL,
20321833 mad_recv_wc);
2033
- atomic_dec(&mad_agent_priv->refcount);
1834
+ deref_mad_agent(mad_agent_priv);
20341835 } else {
20351836 /* not user rmpp, revert to normal behavior and
20361837 * drop the mad */
....@@ -2047,7 +1848,7 @@
20471848 &mad_agent_priv->agent,
20481849 &mad_send_wr->send_buf,
20491850 mad_recv_wc);
2050
- atomic_dec(&mad_agent_priv->refcount);
1851
+ deref_mad_agent(mad_agent_priv);
20511852
20521853 mad_send_wc.status = IB_WC_SUCCESS;
20531854 mad_send_wc.vendor_err = 0;
....@@ -2072,6 +1873,8 @@
20721873 {
20731874 enum smi_forward_action retsmi;
20741875 struct ib_smp *smp = (struct ib_smp *)recv->mad;
1876
+
1877
+ trace_ib_mad_handle_ib_smi(smp);
20751878
20761879 if (smi_handle_dr_smp_recv(smp,
20771880 rdma_cap_ib_switch(port_priv->device),
....@@ -2157,6 +1960,8 @@
21571960 {
21581961 enum smi_forward_action retsmi;
21591962 struct opa_smp *smp = (struct opa_smp *)recv->mad;
1963
+
1964
+ trace_ib_mad_handle_opa_smi(smp);
21601965
21611966 if (opa_smi_handle_dr_smp_recv(smp,
21621967 rdma_cap_ib_switch(port_priv->device),
....@@ -2275,12 +2080,12 @@
22752080 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
22762081 recv->header.recv_wc.recv_buf.grh = &recv->grh;
22772082
2278
- if (atomic_read(&qp_info->snoop_count))
2279
- snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2280
-
22812083 /* Validate MAD */
22822084 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
22832085 goto out;
2086
+
2087
+ trace_ib_mad_recv_done_handler(qp_info, wc,
2088
+ (struct ib_mad_hdr *)recv->mad);
22842089
22852090 mad_size = recv->mad_size;
22862091 response = alloc_mad_private(mad_size, GFP_KERNEL);
....@@ -2301,14 +2106,12 @@
23012106 }
23022107
23032108 /* Give driver "right of first refusal" on incoming MAD */
2304
- if (port_priv->device->process_mad) {
2305
- ret = port_priv->device->process_mad(port_priv->device, 0,
2306
- port_priv->port_num,
2307
- wc, &recv->grh,
2308
- (const struct ib_mad_hdr *)recv->mad,
2309
- recv->mad_size,
2310
- (struct ib_mad_hdr *)response->mad,
2311
- &mad_size, &resp_mad_pkey_index);
2109
+ if (port_priv->device->ops.process_mad) {
2110
+ ret = port_priv->device->ops.process_mad(
2111
+ port_priv->device, 0, port_priv->port_num, wc,
2112
+ &recv->grh, (const struct ib_mad *)recv->mad,
2113
+ (struct ib_mad *)response->mad, &mad_size,
2114
+ &resp_mad_pkey_index);
23122115
23132116 if (opa)
23142117 wc->pkey_index = resp_mad_pkey_index;
....@@ -2330,6 +2133,7 @@
23302133
23312134 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
23322135 if (mad_agent) {
2136
+ trace_ib_mad_recv_done_agent(mad_agent);
23332137 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
23342138 /*
23352139 * recv is freed up in error cases in ib_mad_complete_recv
....@@ -2410,7 +2214,7 @@
24102214 }
24112215
24122216 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2413
- int timeout_ms)
2217
+ unsigned long timeout_ms)
24142218 {
24152219 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
24162220 wait_for_response(mad_send_wr);
....@@ -2494,6 +2298,9 @@
24942298 send_queue = mad_list->mad_queue;
24952299 qp_info = send_queue->qp_info;
24962300
2301
+ trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
2302
+ trace_ib_mad_send_done_handler(mad_send_wr, wc);
2303
+
24972304 retry:
24982305 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
24992306 mad_send_wr->header_mapping,
....@@ -2519,12 +2326,10 @@
25192326 mad_send_wc.send_buf = &mad_send_wr->send_buf;
25202327 mad_send_wc.status = wc->status;
25212328 mad_send_wc.vendor_err = wc->vendor_err;
2522
- if (atomic_read(&qp_info->snoop_count))
2523
- snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2524
- IB_MAD_SNOOP_SEND_COMPLETIONS);
25252329 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
25262330
25272331 if (queued_send_wr) {
2332
+ trace_ib_mad_send_done_resend(queued_send_wr, qp_info);
25282333 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
25292334 NULL);
25302335 if (ret) {
....@@ -2572,6 +2377,7 @@
25722377 if (mad_send_wr->retry) {
25732378 /* Repost send */
25742379 mad_send_wr->retry = 0;
2380
+ trace_ib_mad_error_handler(mad_send_wr, qp_info);
25752381 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
25762382 NULL);
25772383 if (!ret)
....@@ -2632,7 +2438,7 @@
26322438 list_del(&mad_send_wr->agent_list);
26332439 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
26342440 &mad_send_wc);
2635
- atomic_dec(&mad_agent_priv->refcount);
2441
+ deref_mad_agent(mad_agent_priv);
26362442 }
26372443 }
26382444
....@@ -2761,16 +2567,12 @@
27612567 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
27622568 local->mad_priv->header.recv_wc.recv_buf.mad =
27632569 (struct ib_mad *)local->mad_priv->mad;
2764
- if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2765
- snoop_recv(recv_mad_agent->qp_info,
2766
- &local->mad_priv->header.recv_wc,
2767
- IB_MAD_SNOOP_RECVS);
27682570 recv_mad_agent->agent.recv_handler(
27692571 &recv_mad_agent->agent,
27702572 &local->mad_send_wr->send_buf,
27712573 &local->mad_priv->header.recv_wc);
27722574 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2773
- atomic_dec(&recv_mad_agent->refcount);
2575
+ deref_mad_agent(recv_mad_agent);
27742576 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
27752577 }
27762578
....@@ -2779,15 +2581,11 @@
27792581 mad_send_wc.status = IB_WC_SUCCESS;
27802582 mad_send_wc.vendor_err = 0;
27812583 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2782
- if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2783
- snoop_send(mad_agent_priv->qp_info,
2784
- &local->mad_send_wr->send_buf,
2785
- &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
27862584 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
27872585 &mad_send_wc);
27882586
27892587 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2790
- atomic_dec(&mad_agent_priv->refcount);
2588
+ deref_mad_agent(mad_agent_priv);
27912589 if (free_mad)
27922590 kfree(local->mad_priv);
27932591 kfree(local);
....@@ -2873,7 +2671,7 @@
28732671 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
28742672 &mad_send_wc);
28752673
2876
- atomic_dec(&mad_agent_priv->refcount);
2674
+ deref_mad_agent(mad_agent_priv);
28772675 spin_lock_irqsave(&mad_agent_priv->lock, flags);
28782676 }
28792677 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
....@@ -3099,10 +2897,6 @@
30992897 init_mad_queue(qp_info, &qp_info->send_queue);
31002898 init_mad_queue(qp_info, &qp_info->recv_queue);
31012899 INIT_LIST_HEAD(&qp_info->overflow_list);
3102
- spin_lock_init(&qp_info->snoop_lock);
3103
- qp_info->snoop_table = NULL;
3104
- qp_info->snoop_table_size = 0;
3105
- atomic_set(&qp_info->snoop_count, 0);
31062900 }
31072901
31082902 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
....@@ -3146,7 +2940,6 @@
31462940 return;
31472941
31482942 ib_destroy_qp(qp_info->qp);
3149
- kfree(qp_info->snoop_table);
31502943 }
31512944
31522945 /*
....@@ -3284,9 +3077,11 @@
32843077 return 0;
32853078 }
32863079
3287
-static void ib_mad_init_device(struct ib_device *device)
3080
+static int ib_mad_init_device(struct ib_device *device)
32883081 {
32893082 int start, i;
3083
+ unsigned int count = 0;
3084
+ int ret;
32903085
32913086 start = rdma_start_port(device);
32923087
....@@ -3294,17 +3089,23 @@
32943089 if (!rdma_cap_ib_mad(device, i))
32953090 continue;
32963091
3297
- if (ib_mad_port_open(device, i)) {
3092
+ ret = ib_mad_port_open(device, i);
3093
+ if (ret) {
32983094 dev_err(&device->dev, "Couldn't open port %d\n", i);
32993095 goto error;
33003096 }
3301
- if (ib_agent_port_open(device, i)) {
3097
+ ret = ib_agent_port_open(device, i);
3098
+ if (ret) {
33023099 dev_err(&device->dev,
33033100 "Couldn't open port %d for agents\n", i);
33043101 goto error_agent;
33053102 }
3103
+ count++;
33063104 }
3307
- return;
3105
+ if (!count)
3106
+ return -EOPNOTSUPP;
3107
+
3108
+ return 0;
33083109
33093110 error_agent:
33103111 if (ib_mad_port_close(device, i))
....@@ -3321,13 +3122,14 @@
33213122 if (ib_mad_port_close(device, i))
33223123 dev_err(&device->dev, "Couldn't close port %d\n", i);
33233124 }
3125
+ return ret;
33243126 }
33253127
33263128 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
33273129 {
3328
- int i;
3130
+ unsigned int i;
33293131
3330
- for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3132
+ rdma_for_each_port (device, i) {
33313133 if (!rdma_cap_ib_mad(device, i))
33323134 continue;
33333135
....@@ -3354,9 +3156,6 @@
33543156 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
33553157
33563158 INIT_LIST_HEAD(&ib_mad_port_list);
3357
-
3358
- /* Client ID 0 is used for snoop-only clients */
3359
- idr_alloc(&ib_mad_clients, NULL, 0, 0, GFP_KERNEL);
33603159
33613160 if (ib_register_client(&mad_client)) {
33623161 pr_err("Couldn't register ib_mad client\n");