.. | .. |
---|
3 | 3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. |
---|
4 | 4 | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. |
---|
5 | 5 | * Copyright (c) 2009 HNR Consulting. All rights reserved. |
---|
6 | | - * Copyright (c) 2014 Intel Corporation. All rights reserved. |
---|
| 6 | + * Copyright (c) 2014,2018 Intel Corporation. All rights reserved. |
---|
7 | 7 | * |
---|
8 | 8 | * This software is available to you under a choice of one of two |
---|
9 | 9 | * licenses. You may choose to be licensed under the terms of the GNU |
---|
.. | .. |
---|
38 | 38 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
39 | 39 | |
---|
40 | 40 | #include <linux/dma-mapping.h> |
---|
41 | | -#include <linux/idr.h> |
---|
42 | 41 | #include <linux/slab.h> |
---|
43 | 42 | #include <linux/module.h> |
---|
44 | 43 | #include <linux/security.h> |
---|
| 44 | +#include <linux/xarray.h> |
---|
45 | 45 | #include <rdma/ib_cache.h> |
---|
46 | 46 | |
---|
47 | 47 | #include "mad_priv.h" |
---|
.. | .. |
---|
51 | 51 | #include "opa_smi.h" |
---|
52 | 52 | #include "agent.h" |
---|
53 | 53 | |
---|
| 54 | +#define CREATE_TRACE_POINTS |
---|
| 55 | +#include <trace/events/ib_mad.h> |
---|
| 56 | + |
---|
| 57 | +#ifdef CONFIG_TRACEPOINTS |
---|
| 58 | +static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, |
---|
| 59 | + struct ib_mad_qp_info *qp_info, |
---|
| 60 | + struct trace_event_raw_ib_mad_send_template *entry) |
---|
| 61 | +{ |
---|
| 62 | + u16 pkey; |
---|
| 63 | + struct ib_device *dev = qp_info->port_priv->device; |
---|
| 64 | + u8 pnum = qp_info->port_priv->port_num; |
---|
| 65 | + struct ib_ud_wr *wr = &mad_send_wr->send_wr; |
---|
| 66 | + struct rdma_ah_attr attr = {}; |
---|
| 67 | + |
---|
| 68 | + rdma_query_ah(wr->ah, &attr); |
---|
| 69 | + |
---|
| 70 | + /* These are common */ |
---|
| 71 | + entry->sl = attr.sl; |
---|
| 72 | + ib_query_pkey(dev, pnum, wr->pkey_index, &pkey); |
---|
| 73 | + entry->pkey = pkey; |
---|
| 74 | + entry->rqpn = wr->remote_qpn; |
---|
| 75 | + entry->rqkey = wr->remote_qkey; |
---|
| 76 | + entry->dlid = rdma_ah_get_dlid(&attr); |
---|
| 77 | +} |
---|
| 78 | +#endif |
---|
| 79 | + |
---|
54 | 80 | static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; |
---|
55 | 81 | static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; |
---|
56 | 82 | |
---|
.. | .. |
---|
59 | 85 | module_param_named(recv_queue_size, mad_recvq_size, int, 0444); |
---|
60 | 86 | MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); |
---|
61 | 87 | |
---|
62 | | -/* |
---|
63 | | - * The mlx4 driver uses the top byte to distinguish which virtual function |
---|
64 | | - * generated the MAD, so we must avoid using it. |
---|
65 | | - */ |
---|
66 | | -#define AGENT_ID_LIMIT (1 << 24) |
---|
67 | | -static DEFINE_IDR(ib_mad_clients); |
---|
| 88 | +static DEFINE_XARRAY_ALLOC1(ib_mad_clients); |
---|
| 89 | +static u32 ib_mad_client_next; |
---|
68 | 90 | static struct list_head ib_mad_port_list; |
---|
69 | 91 | |
---|
70 | 92 | /* Port list lock */ |
---|
.. | .. |
---|
219 | 241 | struct ib_mad_mgmt_method_table *method; |
---|
220 | 242 | int ret2, qpn; |
---|
221 | 243 | u8 mgmt_class, vclass; |
---|
| 244 | + |
---|
| 245 | + if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) || |
---|
| 246 | + (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num))) |
---|
| 247 | + return ERR_PTR(-EPROTONOSUPPORT); |
---|
222 | 248 | |
---|
223 | 249 | /* Validate parameters */ |
---|
224 | 250 | qpn = get_spl_qp_index(qp_type); |
---|
.. | .. |
---|
376 | 402 | INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); |
---|
377 | 403 | INIT_LIST_HEAD(&mad_agent_priv->local_list); |
---|
378 | 404 | INIT_WORK(&mad_agent_priv->local_work, local_completions); |
---|
379 | | - atomic_set(&mad_agent_priv->refcount, 1); |
---|
| 405 | + refcount_set(&mad_agent_priv->refcount, 1); |
---|
380 | 406 | init_completion(&mad_agent_priv->comp); |
---|
381 | 407 | |
---|
382 | 408 | ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); |
---|
.. | .. |
---|
385 | 411 | goto error4; |
---|
386 | 412 | } |
---|
387 | 413 | |
---|
388 | | - idr_preload(GFP_KERNEL); |
---|
389 | | - idr_lock(&ib_mad_clients); |
---|
390 | | - ret2 = idr_alloc_cyclic(&ib_mad_clients, mad_agent_priv, 0, |
---|
391 | | - AGENT_ID_LIMIT, GFP_ATOMIC); |
---|
392 | | - idr_unlock(&ib_mad_clients); |
---|
393 | | - idr_preload_end(); |
---|
394 | | - |
---|
| 414 | + /* |
---|
| 415 | + * The mlx4 driver uses the top byte to distinguish which virtual |
---|
| 416 | + * function generated the MAD, so we must avoid using it. |
---|
| 417 | + */ |
---|
| 418 | + ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid, |
---|
| 419 | + mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1), |
---|
| 420 | + &ib_mad_client_next, GFP_KERNEL); |
---|
395 | 421 | if (ret2 < 0) { |
---|
396 | 422 | ret = ERR_PTR(ret2); |
---|
397 | 423 | goto error5; |
---|
398 | 424 | } |
---|
399 | | - mad_agent_priv->agent.hi_tid = ret2; |
---|
400 | 425 | |
---|
401 | 426 | /* |
---|
402 | 427 | * Make sure MAD registration (if supplied) |
---|
.. | .. |
---|
441 | 466 | } |
---|
442 | 467 | spin_unlock_irq(&port_priv->reg_lock); |
---|
443 | 468 | |
---|
| 469 | + trace_ib_mad_create_agent(mad_agent_priv); |
---|
444 | 470 | return &mad_agent_priv->agent; |
---|
445 | 471 | error6: |
---|
446 | 472 | spin_unlock_irq(&port_priv->reg_lock); |
---|
447 | | - idr_lock(&ib_mad_clients); |
---|
448 | | - idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid); |
---|
449 | | - idr_unlock(&ib_mad_clients); |
---|
| 473 | + xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); |
---|
450 | 474 | error5: |
---|
451 | 475 | ib_mad_agent_security_cleanup(&mad_agent_priv->agent); |
---|
452 | 476 | error4: |
---|
.. | .. |
---|
458 | 482 | } |
---|
459 | 483 | EXPORT_SYMBOL(ib_register_mad_agent); |
---|
460 | 484 | |
---|
461 | | -static inline int is_snooping_sends(int mad_snoop_flags) |
---|
462 | | -{ |
---|
463 | | - return (mad_snoop_flags & |
---|
464 | | - (/*IB_MAD_SNOOP_POSTED_SENDS | |
---|
465 | | - IB_MAD_SNOOP_RMPP_SENDS |*/ |
---|
466 | | - IB_MAD_SNOOP_SEND_COMPLETIONS /*| |
---|
467 | | - IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); |
---|
468 | | -} |
---|
469 | | - |
---|
470 | | -static inline int is_snooping_recvs(int mad_snoop_flags) |
---|
471 | | -{ |
---|
472 | | - return (mad_snoop_flags & |
---|
473 | | - (IB_MAD_SNOOP_RECVS /*| |
---|
474 | | - IB_MAD_SNOOP_RMPP_RECVS*/)); |
---|
475 | | -} |
---|
476 | | - |
---|
477 | | -static int register_snoop_agent(struct ib_mad_qp_info *qp_info, |
---|
478 | | - struct ib_mad_snoop_private *mad_snoop_priv) |
---|
479 | | -{ |
---|
480 | | - struct ib_mad_snoop_private **new_snoop_table; |
---|
481 | | - unsigned long flags; |
---|
482 | | - int i; |
---|
483 | | - |
---|
484 | | - spin_lock_irqsave(&qp_info->snoop_lock, flags); |
---|
485 | | - /* Check for empty slot in array. */ |
---|
486 | | - for (i = 0; i < qp_info->snoop_table_size; i++) |
---|
487 | | - if (!qp_info->snoop_table[i]) |
---|
488 | | - break; |
---|
489 | | - |
---|
490 | | - if (i == qp_info->snoop_table_size) { |
---|
491 | | - /* Grow table. */ |
---|
492 | | - new_snoop_table = krealloc(qp_info->snoop_table, |
---|
493 | | - sizeof mad_snoop_priv * |
---|
494 | | - (qp_info->snoop_table_size + 1), |
---|
495 | | - GFP_ATOMIC); |
---|
496 | | - if (!new_snoop_table) { |
---|
497 | | - i = -ENOMEM; |
---|
498 | | - goto out; |
---|
499 | | - } |
---|
500 | | - |
---|
501 | | - qp_info->snoop_table = new_snoop_table; |
---|
502 | | - qp_info->snoop_table_size++; |
---|
503 | | - } |
---|
504 | | - qp_info->snoop_table[i] = mad_snoop_priv; |
---|
505 | | - atomic_inc(&qp_info->snoop_count); |
---|
506 | | -out: |
---|
507 | | - spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
---|
508 | | - return i; |
---|
509 | | -} |
---|
510 | | - |
---|
511 | | -struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, |
---|
512 | | - u8 port_num, |
---|
513 | | - enum ib_qp_type qp_type, |
---|
514 | | - int mad_snoop_flags, |
---|
515 | | - ib_mad_snoop_handler snoop_handler, |
---|
516 | | - ib_mad_recv_handler recv_handler, |
---|
517 | | - void *context) |
---|
518 | | -{ |
---|
519 | | - struct ib_mad_port_private *port_priv; |
---|
520 | | - struct ib_mad_agent *ret; |
---|
521 | | - struct ib_mad_snoop_private *mad_snoop_priv; |
---|
522 | | - int qpn; |
---|
523 | | - int err; |
---|
524 | | - |
---|
525 | | - /* Validate parameters */ |
---|
526 | | - if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || |
---|
527 | | - (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { |
---|
528 | | - ret = ERR_PTR(-EINVAL); |
---|
529 | | - goto error1; |
---|
530 | | - } |
---|
531 | | - qpn = get_spl_qp_index(qp_type); |
---|
532 | | - if (qpn == -1) { |
---|
533 | | - ret = ERR_PTR(-EINVAL); |
---|
534 | | - goto error1; |
---|
535 | | - } |
---|
536 | | - port_priv = ib_get_mad_port(device, port_num); |
---|
537 | | - if (!port_priv) { |
---|
538 | | - ret = ERR_PTR(-ENODEV); |
---|
539 | | - goto error1; |
---|
540 | | - } |
---|
541 | | - /* Allocate structures */ |
---|
542 | | - mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); |
---|
543 | | - if (!mad_snoop_priv) { |
---|
544 | | - ret = ERR_PTR(-ENOMEM); |
---|
545 | | - goto error1; |
---|
546 | | - } |
---|
547 | | - |
---|
548 | | - /* Now, fill in the various structures */ |
---|
549 | | - mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; |
---|
550 | | - mad_snoop_priv->agent.device = device; |
---|
551 | | - mad_snoop_priv->agent.recv_handler = recv_handler; |
---|
552 | | - mad_snoop_priv->agent.snoop_handler = snoop_handler; |
---|
553 | | - mad_snoop_priv->agent.context = context; |
---|
554 | | - mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; |
---|
555 | | - mad_snoop_priv->agent.port_num = port_num; |
---|
556 | | - mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; |
---|
557 | | - init_completion(&mad_snoop_priv->comp); |
---|
558 | | - |
---|
559 | | - err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); |
---|
560 | | - if (err) { |
---|
561 | | - ret = ERR_PTR(err); |
---|
562 | | - goto error2; |
---|
563 | | - } |
---|
564 | | - |
---|
565 | | - mad_snoop_priv->snoop_index = register_snoop_agent( |
---|
566 | | - &port_priv->qp_info[qpn], |
---|
567 | | - mad_snoop_priv); |
---|
568 | | - if (mad_snoop_priv->snoop_index < 0) { |
---|
569 | | - ret = ERR_PTR(mad_snoop_priv->snoop_index); |
---|
570 | | - goto error3; |
---|
571 | | - } |
---|
572 | | - |
---|
573 | | - atomic_set(&mad_snoop_priv->refcount, 1); |
---|
574 | | - return &mad_snoop_priv->agent; |
---|
575 | | -error3: |
---|
576 | | - ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); |
---|
577 | | -error2: |
---|
578 | | - kfree(mad_snoop_priv); |
---|
579 | | -error1: |
---|
580 | | - return ret; |
---|
581 | | -} |
---|
582 | | -EXPORT_SYMBOL(ib_register_mad_snoop); |
---|
583 | | - |
---|
584 | 485 | static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
---|
585 | 486 | { |
---|
586 | | - if (atomic_dec_and_test(&mad_agent_priv->refcount)) |
---|
| 487 | + if (refcount_dec_and_test(&mad_agent_priv->refcount)) |
---|
587 | 488 | complete(&mad_agent_priv->comp); |
---|
588 | | -} |
---|
589 | | - |
---|
590 | | -static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) |
---|
591 | | -{ |
---|
592 | | - if (atomic_dec_and_test(&mad_snoop_priv->refcount)) |
---|
593 | | - complete(&mad_snoop_priv->comp); |
---|
594 | 489 | } |
---|
595 | 490 | |
---|
596 | 491 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
---|
.. | .. |
---|
598 | 493 | struct ib_mad_port_private *port_priv; |
---|
599 | 494 | |
---|
600 | 495 | /* Note that we could still be handling received MADs */ |
---|
| 496 | + trace_ib_mad_unregister_agent(mad_agent_priv); |
---|
601 | 497 | |
---|
602 | 498 | /* |
---|
603 | 499 | * Canceling all sends results in dropping received response |
---|
.. | .. |
---|
610 | 506 | spin_lock_irq(&port_priv->reg_lock); |
---|
611 | 507 | remove_mad_reg_req(mad_agent_priv); |
---|
612 | 508 | spin_unlock_irq(&port_priv->reg_lock); |
---|
613 | | - idr_lock(&ib_mad_clients); |
---|
614 | | - idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid); |
---|
615 | | - idr_unlock(&ib_mad_clients); |
---|
| 509 | + xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); |
---|
616 | 510 | |
---|
617 | 511 | flush_workqueue(port_priv->wq); |
---|
618 | 512 | |
---|
.. | .. |
---|
626 | 520 | kfree_rcu(mad_agent_priv, rcu); |
---|
627 | 521 | } |
---|
628 | 522 | |
---|
629 | | -static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) |
---|
630 | | -{ |
---|
631 | | - struct ib_mad_qp_info *qp_info; |
---|
632 | | - unsigned long flags; |
---|
633 | | - |
---|
634 | | - qp_info = mad_snoop_priv->qp_info; |
---|
635 | | - spin_lock_irqsave(&qp_info->snoop_lock, flags); |
---|
636 | | - qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; |
---|
637 | | - atomic_dec(&qp_info->snoop_count); |
---|
638 | | - spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
---|
639 | | - |
---|
640 | | - deref_snoop_agent(mad_snoop_priv); |
---|
641 | | - wait_for_completion(&mad_snoop_priv->comp); |
---|
642 | | - |
---|
643 | | - ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); |
---|
644 | | - |
---|
645 | | - kfree(mad_snoop_priv); |
---|
646 | | -} |
---|
647 | | - |
---|
648 | 523 | /* |
---|
649 | 524 | * ib_unregister_mad_agent - Unregisters a client from using MAD services |
---|
650 | 525 | * |
---|
.. | .. |
---|
653 | 528 | void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) |
---|
654 | 529 | { |
---|
655 | 530 | struct ib_mad_agent_private *mad_agent_priv; |
---|
656 | | - struct ib_mad_snoop_private *mad_snoop_priv; |
---|
657 | 531 | |
---|
658 | | - /* If the TID is zero, the agent can only snoop. */ |
---|
659 | | - if (mad_agent->hi_tid) { |
---|
660 | | - mad_agent_priv = container_of(mad_agent, |
---|
661 | | - struct ib_mad_agent_private, |
---|
662 | | - agent); |
---|
663 | | - unregister_mad_agent(mad_agent_priv); |
---|
664 | | - } else { |
---|
665 | | - mad_snoop_priv = container_of(mad_agent, |
---|
666 | | - struct ib_mad_snoop_private, |
---|
667 | | - agent); |
---|
668 | | - unregister_mad_snoop(mad_snoop_priv); |
---|
669 | | - } |
---|
| 532 | + mad_agent_priv = container_of(mad_agent, |
---|
| 533 | + struct ib_mad_agent_private, |
---|
| 534 | + agent); |
---|
| 535 | + unregister_mad_agent(mad_agent_priv); |
---|
670 | 536 | } |
---|
671 | 537 | EXPORT_SYMBOL(ib_unregister_mad_agent); |
---|
672 | 538 | |
---|
.. | .. |
---|
680 | 546 | list_del(&mad_list->list); |
---|
681 | 547 | mad_queue->count--; |
---|
682 | 548 | spin_unlock_irqrestore(&mad_queue->lock, flags); |
---|
683 | | -} |
---|
684 | | - |
---|
685 | | -static void snoop_send(struct ib_mad_qp_info *qp_info, |
---|
686 | | - struct ib_mad_send_buf *send_buf, |
---|
687 | | - struct ib_mad_send_wc *mad_send_wc, |
---|
688 | | - int mad_snoop_flags) |
---|
689 | | -{ |
---|
690 | | - struct ib_mad_snoop_private *mad_snoop_priv; |
---|
691 | | - unsigned long flags; |
---|
692 | | - int i; |
---|
693 | | - |
---|
694 | | - spin_lock_irqsave(&qp_info->snoop_lock, flags); |
---|
695 | | - for (i = 0; i < qp_info->snoop_table_size; i++) { |
---|
696 | | - mad_snoop_priv = qp_info->snoop_table[i]; |
---|
697 | | - if (!mad_snoop_priv || |
---|
698 | | - !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) |
---|
699 | | - continue; |
---|
700 | | - |
---|
701 | | - atomic_inc(&mad_snoop_priv->refcount); |
---|
702 | | - spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
---|
703 | | - mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, |
---|
704 | | - send_buf, mad_send_wc); |
---|
705 | | - deref_snoop_agent(mad_snoop_priv); |
---|
706 | | - spin_lock_irqsave(&qp_info->snoop_lock, flags); |
---|
707 | | - } |
---|
708 | | - spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
---|
709 | | -} |
---|
710 | | - |
---|
711 | | -static void snoop_recv(struct ib_mad_qp_info *qp_info, |
---|
712 | | - struct ib_mad_recv_wc *mad_recv_wc, |
---|
713 | | - int mad_snoop_flags) |
---|
714 | | -{ |
---|
715 | | - struct ib_mad_snoop_private *mad_snoop_priv; |
---|
716 | | - unsigned long flags; |
---|
717 | | - int i; |
---|
718 | | - |
---|
719 | | - spin_lock_irqsave(&qp_info->snoop_lock, flags); |
---|
720 | | - for (i = 0; i < qp_info->snoop_table_size; i++) { |
---|
721 | | - mad_snoop_priv = qp_info->snoop_table[i]; |
---|
722 | | - if (!mad_snoop_priv || |
---|
723 | | - !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) |
---|
724 | | - continue; |
---|
725 | | - |
---|
726 | | - atomic_inc(&mad_snoop_priv->refcount); |
---|
727 | | - spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
---|
728 | | - mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, |
---|
729 | | - mad_recv_wc); |
---|
730 | | - deref_snoop_agent(mad_snoop_priv); |
---|
731 | | - spin_lock_irqsave(&qp_info->snoop_lock, flags); |
---|
732 | | - } |
---|
733 | | - spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
---|
734 | 549 | } |
---|
735 | 550 | |
---|
736 | 551 | static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, |
---|
.. | .. |
---|
817 | 632 | if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { |
---|
818 | 633 | u32 opa_drslid; |
---|
819 | 634 | |
---|
| 635 | + trace_ib_mad_handle_out_opa_smi(opa_smp); |
---|
| 636 | + |
---|
820 | 637 | if ((opa_get_smp_direction(opa_smp) |
---|
821 | 638 | ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == |
---|
822 | 639 | OPA_LID_PERMISSIVE && |
---|
.. | .. |
---|
842 | 659 | opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) |
---|
843 | 660 | goto out; |
---|
844 | 661 | } else { |
---|
| 662 | + trace_ib_mad_handle_out_ib_smi(smp); |
---|
| 663 | + |
---|
845 | 664 | if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == |
---|
846 | 665 | IB_LID_PERMISSIVE && |
---|
847 | 666 | smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == |
---|
.. | .. |
---|
884 | 703 | } |
---|
885 | 704 | |
---|
886 | 705 | /* No GRH for DR SMP */ |
---|
887 | | - ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, |
---|
888 | | - (const struct ib_mad_hdr *)smp, mad_size, |
---|
889 | | - (struct ib_mad_hdr *)mad_priv->mad, |
---|
890 | | - &mad_size, &out_mad_pkey_index); |
---|
| 706 | + ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL, |
---|
| 707 | + (const struct ib_mad *)smp, |
---|
| 708 | + (struct ib_mad *)mad_priv->mad, &mad_size, |
---|
| 709 | + &out_mad_pkey_index); |
---|
891 | 710 | switch (ret) |
---|
892 | 711 | { |
---|
893 | 712 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: |
---|
.. | .. |
---|
899 | 718 | * Reference MAD agent until receive |
---|
900 | 719 | * side of local completion handled |
---|
901 | 720 | */ |
---|
902 | | - atomic_inc(&mad_agent_priv->refcount); |
---|
| 721 | + refcount_inc(&mad_agent_priv->refcount); |
---|
903 | 722 | } else |
---|
904 | 723 | kfree(mad_priv); |
---|
905 | 724 | break; |
---|
.. | .. |
---|
939 | 758 | local->return_wc_byte_len = mad_size; |
---|
940 | 759 | } |
---|
941 | 760 | /* Reference MAD agent until send side of local completion handled */ |
---|
942 | | - atomic_inc(&mad_agent_priv->refcount); |
---|
| 761 | + refcount_inc(&mad_agent_priv->refcount); |
---|
943 | 762 | /* Queue local completion to local list */ |
---|
944 | 763 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
---|
945 | 764 | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); |
---|
.. | .. |
---|
1097 | 916 | } |
---|
1098 | 917 | |
---|
1099 | 918 | mad_send_wr->send_buf.mad_agent = mad_agent; |
---|
1100 | | - atomic_inc(&mad_agent_priv->refcount); |
---|
| 919 | + refcount_inc(&mad_agent_priv->refcount); |
---|
1101 | 920 | return &mad_send_wr->send_buf; |
---|
1102 | 921 | } |
---|
1103 | 922 | EXPORT_SYMBOL(ib_create_send_mad); |
---|
.. | .. |
---|
1219 | 1038 | |
---|
1220 | 1039 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
---|
1221 | 1040 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { |
---|
| 1041 | + trace_ib_mad_ib_send_mad(mad_send_wr, qp_info); |
---|
1222 | 1042 | ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, |
---|
1223 | 1043 | NULL); |
---|
1224 | 1044 | list = &qp_info->send_queue.list; |
---|
.. | .. |
---|
1311 | 1131 | mad_send_wr->status = IB_WC_SUCCESS; |
---|
1312 | 1132 | |
---|
1313 | 1133 | /* Reference MAD agent until send completes */ |
---|
1314 | | - atomic_inc(&mad_agent_priv->refcount); |
---|
| 1134 | + refcount_inc(&mad_agent_priv->refcount); |
---|
1315 | 1135 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
---|
1316 | 1136 | list_add_tail(&mad_send_wr->agent_list, |
---|
1317 | 1137 | &mad_agent_priv->send_list); |
---|
.. | .. |
---|
1328 | 1148 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
---|
1329 | 1149 | list_del(&mad_send_wr->agent_list); |
---|
1330 | 1150 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
---|
1331 | | - atomic_dec(&mad_agent_priv->refcount); |
---|
| 1151 | + deref_mad_agent(mad_agent_priv); |
---|
1332 | 1152 | goto error; |
---|
1333 | 1153 | } |
---|
1334 | 1154 | } |
---|
.. | .. |
---|
1367 | 1187 | } |
---|
1368 | 1188 | } |
---|
1369 | 1189 | EXPORT_SYMBOL(ib_free_recv_mad); |
---|
1370 | | - |
---|
1371 | | -struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, |
---|
1372 | | - u8 rmpp_version, |
---|
1373 | | - ib_mad_send_handler send_handler, |
---|
1374 | | - ib_mad_recv_handler recv_handler, |
---|
1375 | | - void *context) |
---|
1376 | | -{ |
---|
1377 | | - return ERR_PTR(-EINVAL); /* XXX: for now */ |
---|
1378 | | -} |
---|
1379 | | -EXPORT_SYMBOL(ib_redirect_mad_qp); |
---|
1380 | | - |
---|
1381 | | -int ib_process_mad_wc(struct ib_mad_agent *mad_agent, |
---|
1382 | | - struct ib_wc *wc) |
---|
1383 | | -{ |
---|
1384 | | - dev_err(&mad_agent->device->dev, |
---|
1385 | | - "ib_process_mad_wc() not implemented yet\n"); |
---|
1386 | | - return 0; |
---|
1387 | | -} |
---|
1388 | | -EXPORT_SYMBOL(ib_process_mad_wc); |
---|
1389 | 1190 | |
---|
1390 | 1191 | static int method_in_use(struct ib_mad_mgmt_method_table **method, |
---|
1391 | 1192 | struct ib_mad_reg_req *mad_reg_req) |
---|
.. | .. |
---|
1752 | 1553 | */ |
---|
1753 | 1554 | hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; |
---|
1754 | 1555 | rcu_read_lock(); |
---|
1755 | | - mad_agent = idr_find(&ib_mad_clients, hi_tid); |
---|
1756 | | - if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount)) |
---|
| 1556 | + mad_agent = xa_load(&ib_mad_clients, hi_tid); |
---|
| 1557 | + if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount)) |
---|
1757 | 1558 | mad_agent = NULL; |
---|
1758 | 1559 | rcu_read_unlock(); |
---|
1759 | 1560 | } else { |
---|
.. | .. |
---|
1805 | 1606 | } |
---|
1806 | 1607 | } |
---|
1807 | 1608 | if (mad_agent) |
---|
1808 | | - atomic_inc(&mad_agent->refcount); |
---|
| 1609 | + refcount_inc(&mad_agent->refcount); |
---|
1809 | 1610 | out: |
---|
1810 | 1611 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
---|
1811 | 1612 | } |
---|
.. | .. |
---|
2030 | 1831 | mad_agent_priv->agent.recv_handler( |
---|
2031 | 1832 | &mad_agent_priv->agent, NULL, |
---|
2032 | 1833 | mad_recv_wc); |
---|
2033 | | - atomic_dec(&mad_agent_priv->refcount); |
---|
| 1834 | + deref_mad_agent(mad_agent_priv); |
---|
2034 | 1835 | } else { |
---|
2035 | 1836 | /* not user rmpp, revert to normal behavior and |
---|
2036 | 1837 | * drop the mad */ |
---|
.. | .. |
---|
2047 | 1848 | &mad_agent_priv->agent, |
---|
2048 | 1849 | &mad_send_wr->send_buf, |
---|
2049 | 1850 | mad_recv_wc); |
---|
2050 | | - atomic_dec(&mad_agent_priv->refcount); |
---|
| 1851 | + deref_mad_agent(mad_agent_priv); |
---|
2051 | 1852 | |
---|
2052 | 1853 | mad_send_wc.status = IB_WC_SUCCESS; |
---|
2053 | 1854 | mad_send_wc.vendor_err = 0; |
---|
.. | .. |
---|
2072 | 1873 | { |
---|
2073 | 1874 | enum smi_forward_action retsmi; |
---|
2074 | 1875 | struct ib_smp *smp = (struct ib_smp *)recv->mad; |
---|
| 1876 | + |
---|
| 1877 | + trace_ib_mad_handle_ib_smi(smp); |
---|
2075 | 1878 | |
---|
2076 | 1879 | if (smi_handle_dr_smp_recv(smp, |
---|
2077 | 1880 | rdma_cap_ib_switch(port_priv->device), |
---|
.. | .. |
---|
2157 | 1960 | { |
---|
2158 | 1961 | enum smi_forward_action retsmi; |
---|
2159 | 1962 | struct opa_smp *smp = (struct opa_smp *)recv->mad; |
---|
| 1963 | + |
---|
| 1964 | + trace_ib_mad_handle_opa_smi(smp); |
---|
2160 | 1965 | |
---|
2161 | 1966 | if (opa_smi_handle_dr_smp_recv(smp, |
---|
2162 | 1967 | rdma_cap_ib_switch(port_priv->device), |
---|
.. | .. |
---|
2275 | 2080 | recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; |
---|
2276 | 2081 | recv->header.recv_wc.recv_buf.grh = &recv->grh; |
---|
2277 | 2082 | |
---|
2278 | | - if (atomic_read(&qp_info->snoop_count)) |
---|
2279 | | - snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); |
---|
2280 | | - |
---|
2281 | 2083 | /* Validate MAD */ |
---|
2282 | 2084 | if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) |
---|
2283 | 2085 | goto out; |
---|
| 2086 | + |
---|
| 2087 | + trace_ib_mad_recv_done_handler(qp_info, wc, |
---|
| 2088 | + (struct ib_mad_hdr *)recv->mad); |
---|
2284 | 2089 | |
---|
2285 | 2090 | mad_size = recv->mad_size; |
---|
2286 | 2091 | response = alloc_mad_private(mad_size, GFP_KERNEL); |
---|
.. | .. |
---|
2301 | 2106 | } |
---|
2302 | 2107 | |
---|
2303 | 2108 | /* Give driver "right of first refusal" on incoming MAD */ |
---|
2304 | | - if (port_priv->device->process_mad) { |
---|
2305 | | - ret = port_priv->device->process_mad(port_priv->device, 0, |
---|
2306 | | - port_priv->port_num, |
---|
2307 | | - wc, &recv->grh, |
---|
2308 | | - (const struct ib_mad_hdr *)recv->mad, |
---|
2309 | | - recv->mad_size, |
---|
2310 | | - (struct ib_mad_hdr *)response->mad, |
---|
2311 | | - &mad_size, &resp_mad_pkey_index); |
---|
| 2109 | + if (port_priv->device->ops.process_mad) { |
---|
| 2110 | + ret = port_priv->device->ops.process_mad( |
---|
| 2111 | + port_priv->device, 0, port_priv->port_num, wc, |
---|
| 2112 | + &recv->grh, (const struct ib_mad *)recv->mad, |
---|
| 2113 | + (struct ib_mad *)response->mad, &mad_size, |
---|
| 2114 | + &resp_mad_pkey_index); |
---|
2312 | 2115 | |
---|
2313 | 2116 | if (opa) |
---|
2314 | 2117 | wc->pkey_index = resp_mad_pkey_index; |
---|
.. | .. |
---|
2330 | 2133 | |
---|
2331 | 2134 | mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); |
---|
2332 | 2135 | if (mad_agent) { |
---|
| 2136 | + trace_ib_mad_recv_done_agent(mad_agent); |
---|
2333 | 2137 | ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); |
---|
2334 | 2138 | /* |
---|
2335 | 2139 | * recv is freed up in error cases in ib_mad_complete_recv |
---|
.. | .. |
---|
2410 | 2214 | } |
---|
2411 | 2215 | |
---|
2412 | 2216 | void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, |
---|
2413 | | - int timeout_ms) |
---|
| 2217 | + unsigned long timeout_ms) |
---|
2414 | 2218 | { |
---|
2415 | 2219 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); |
---|
2416 | 2220 | wait_for_response(mad_send_wr); |
---|
.. | .. |
---|
2494 | 2298 | send_queue = mad_list->mad_queue; |
---|
2495 | 2299 | qp_info = send_queue->qp_info; |
---|
2496 | 2300 | |
---|
| 2301 | + trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv); |
---|
| 2302 | + trace_ib_mad_send_done_handler(mad_send_wr, wc); |
---|
| 2303 | + |
---|
2497 | 2304 | retry: |
---|
2498 | 2305 | ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, |
---|
2499 | 2306 | mad_send_wr->header_mapping, |
---|
.. | .. |
---|
2519 | 2326 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
---|
2520 | 2327 | mad_send_wc.status = wc->status; |
---|
2521 | 2328 | mad_send_wc.vendor_err = wc->vendor_err; |
---|
2522 | | - if (atomic_read(&qp_info->snoop_count)) |
---|
2523 | | - snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, |
---|
2524 | | - IB_MAD_SNOOP_SEND_COMPLETIONS); |
---|
2525 | 2329 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); |
---|
2526 | 2330 | |
---|
2527 | 2331 | if (queued_send_wr) { |
---|
| 2332 | + trace_ib_mad_send_done_resend(queued_send_wr, qp_info); |
---|
2528 | 2333 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, |
---|
2529 | 2334 | NULL); |
---|
2530 | 2335 | if (ret) { |
---|
.. | .. |
---|
2572 | 2377 | if (mad_send_wr->retry) { |
---|
2573 | 2378 | /* Repost send */ |
---|
2574 | 2379 | mad_send_wr->retry = 0; |
---|
| 2380 | + trace_ib_mad_error_handler(mad_send_wr, qp_info); |
---|
2575 | 2381 | ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, |
---|
2576 | 2382 | NULL); |
---|
2577 | 2383 | if (!ret) |
---|
.. | .. |
---|
2632 | 2438 | list_del(&mad_send_wr->agent_list); |
---|
2633 | 2439 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
---|
2634 | 2440 | &mad_send_wc); |
---|
2635 | | - atomic_dec(&mad_agent_priv->refcount); |
---|
| 2441 | + deref_mad_agent(mad_agent_priv); |
---|
2636 | 2442 | } |
---|
2637 | 2443 | } |
---|
2638 | 2444 | |
---|
.. | .. |
---|
2761 | 2567 | local->mad_priv->header.recv_wc.recv_buf.grh = NULL; |
---|
2762 | 2568 | local->mad_priv->header.recv_wc.recv_buf.mad = |
---|
2763 | 2569 | (struct ib_mad *)local->mad_priv->mad; |
---|
2764 | | - if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) |
---|
2765 | | - snoop_recv(recv_mad_agent->qp_info, |
---|
2766 | | - &local->mad_priv->header.recv_wc, |
---|
2767 | | - IB_MAD_SNOOP_RECVS); |
---|
2768 | 2570 | recv_mad_agent->agent.recv_handler( |
---|
2769 | 2571 | &recv_mad_agent->agent, |
---|
2770 | 2572 | &local->mad_send_wr->send_buf, |
---|
2771 | 2573 | &local->mad_priv->header.recv_wc); |
---|
2772 | 2574 | spin_lock_irqsave(&recv_mad_agent->lock, flags); |
---|
2773 | | - atomic_dec(&recv_mad_agent->refcount); |
---|
| 2575 | + deref_mad_agent(recv_mad_agent); |
---|
2774 | 2576 | spin_unlock_irqrestore(&recv_mad_agent->lock, flags); |
---|
2775 | 2577 | } |
---|
2776 | 2578 | |
---|
.. | .. |
---|
2779 | 2581 | mad_send_wc.status = IB_WC_SUCCESS; |
---|
2780 | 2582 | mad_send_wc.vendor_err = 0; |
---|
2781 | 2583 | mad_send_wc.send_buf = &local->mad_send_wr->send_buf; |
---|
2782 | | - if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) |
---|
2783 | | - snoop_send(mad_agent_priv->qp_info, |
---|
2784 | | - &local->mad_send_wr->send_buf, |
---|
2785 | | - &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); |
---|
2786 | 2584 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
---|
2787 | 2585 | &mad_send_wc); |
---|
2788 | 2586 | |
---|
2789 | 2587 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
---|
2790 | | - atomic_dec(&mad_agent_priv->refcount); |
---|
| 2588 | + deref_mad_agent(mad_agent_priv); |
---|
2791 | 2589 | if (free_mad) |
---|
2792 | 2590 | kfree(local->mad_priv); |
---|
2793 | 2591 | kfree(local); |
---|
.. | .. |
---|
2873 | 2671 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
---|
2874 | 2672 | &mad_send_wc); |
---|
2875 | 2673 | |
---|
2876 | | - atomic_dec(&mad_agent_priv->refcount); |
---|
| 2674 | + deref_mad_agent(mad_agent_priv); |
---|
2877 | 2675 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
---|
2878 | 2676 | } |
---|
2879 | 2677 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
---|
.. | .. |
---|
3099 | 2897 | init_mad_queue(qp_info, &qp_info->send_queue); |
---|
3100 | 2898 | init_mad_queue(qp_info, &qp_info->recv_queue); |
---|
3101 | 2899 | INIT_LIST_HEAD(&qp_info->overflow_list); |
---|
3102 | | - spin_lock_init(&qp_info->snoop_lock); |
---|
3103 | | - qp_info->snoop_table = NULL; |
---|
3104 | | - qp_info->snoop_table_size = 0; |
---|
3105 | | - atomic_set(&qp_info->snoop_count, 0); |
---|
3106 | 2900 | } |
---|
3107 | 2901 | |
---|
3108 | 2902 | static int create_mad_qp(struct ib_mad_qp_info *qp_info, |
---|
.. | .. |
---|
3146 | 2940 | return; |
---|
3147 | 2941 | |
---|
3148 | 2942 | ib_destroy_qp(qp_info->qp); |
---|
3149 | | - kfree(qp_info->snoop_table); |
---|
3150 | 2943 | } |
---|
3151 | 2944 | |
---|
3152 | 2945 | /* |
---|
.. | .. |
---|
3284 | 3077 | return 0; |
---|
3285 | 3078 | } |
---|
3286 | 3079 | |
---|
3287 | | -static void ib_mad_init_device(struct ib_device *device) |
---|
| 3080 | +static int ib_mad_init_device(struct ib_device *device) |
---|
3288 | 3081 | { |
---|
3289 | 3082 | int start, i; |
---|
| 3083 | + unsigned int count = 0; |
---|
| 3084 | + int ret; |
---|
3290 | 3085 | |
---|
3291 | 3086 | start = rdma_start_port(device); |
---|
3292 | 3087 | |
---|
.. | .. |
---|
3294 | 3089 | if (!rdma_cap_ib_mad(device, i)) |
---|
3295 | 3090 | continue; |
---|
3296 | 3091 | |
---|
3297 | | - if (ib_mad_port_open(device, i)) { |
---|
| 3092 | + ret = ib_mad_port_open(device, i); |
---|
| 3093 | + if (ret) { |
---|
3298 | 3094 | dev_err(&device->dev, "Couldn't open port %d\n", i); |
---|
3299 | 3095 | goto error; |
---|
3300 | 3096 | } |
---|
3301 | | - if (ib_agent_port_open(device, i)) { |
---|
| 3097 | + ret = ib_agent_port_open(device, i); |
---|
| 3098 | + if (ret) { |
---|
3302 | 3099 | dev_err(&device->dev, |
---|
3303 | 3100 | "Couldn't open port %d for agents\n", i); |
---|
3304 | 3101 | goto error_agent; |
---|
3305 | 3102 | } |
---|
| 3103 | + count++; |
---|
3306 | 3104 | } |
---|
3307 | | - return; |
---|
| 3105 | + if (!count) |
---|
| 3106 | + return -EOPNOTSUPP; |
---|
| 3107 | + |
---|
| 3108 | + return 0; |
---|
3308 | 3109 | |
---|
3309 | 3110 | error_agent: |
---|
3310 | 3111 | if (ib_mad_port_close(device, i)) |
---|
.. | .. |
---|
3321 | 3122 | if (ib_mad_port_close(device, i)) |
---|
3322 | 3123 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
---|
3323 | 3124 | } |
---|
| 3125 | + return ret; |
---|
3324 | 3126 | } |
---|
3325 | 3127 | |
---|
3326 | 3128 | static void ib_mad_remove_device(struct ib_device *device, void *client_data) |
---|
3327 | 3129 | { |
---|
3328 | | - int i; |
---|
| 3130 | + unsigned int i; |
---|
3329 | 3131 | |
---|
3330 | | - for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { |
---|
| 3132 | + rdma_for_each_port (device, i) { |
---|
3331 | 3133 | if (!rdma_cap_ib_mad(device, i)) |
---|
3332 | 3134 | continue; |
---|
3333 | 3135 | |
---|
.. | .. |
---|
3354 | 3156 | mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); |
---|
3355 | 3157 | |
---|
3356 | 3158 | INIT_LIST_HEAD(&ib_mad_port_list); |
---|
3357 | | - |
---|
3358 | | - /* Client ID 0 is used for snoop-only clients */ |
---|
3359 | | - idr_alloc(&ib_mad_clients, NULL, 0, 0, GFP_KERNEL); |
---|
3360 | 3159 | |
---|
3361 | 3160 | if (ib_register_client(&mad_client)) { |
---|
3362 | 3161 | pr_err("Couldn't register ib_mad client\n"); |
---|