| .. | .. |
|---|
| 36 | 36 | #include <linux/list.h> |
|---|
| 37 | 37 | #include <linux/spinlock.h> |
|---|
| 38 | 38 | #include <linux/cgroup_rdma.h> |
|---|
| 39 | +#include <net/net_namespace.h> |
|---|
| 40 | +#include <net/netns/generic.h> |
|---|
| 39 | 41 | |
|---|
| 40 | 42 | #include <rdma/ib_verbs.h> |
|---|
| 41 | 43 | #include <rdma/opa_addr.h> |
|---|
| 42 | 44 | #include <rdma/ib_mad.h> |
|---|
| 43 | 45 | #include <rdma/restrack.h> |
|---|
| 44 | 46 | #include "mad_priv.h" |
|---|
| 47 | +#include "restrack.h" |
|---|
| 45 | 48 | |
|---|
| 46 | 49 | /* Total number of ports combined across all struct ib_devices's */ |
|---|
| 47 | | -#define RDMA_MAX_PORTS 1024 |
|---|
| 50 | +#define RDMA_MAX_PORTS 8192 |
|---|
| 48 | 51 | |
|---|
| 49 | 52 | struct pkey_index_qp_list { |
|---|
| 50 | 53 | struct list_head pkey_index_list; |
|---|
| .. | .. |
|---|
| 54 | 57 | struct list_head qp_list; |
|---|
| 55 | 58 | }; |
|---|
| 56 | 59 | |
|---|
| 57 | | -#if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) |
|---|
| 58 | | -int cma_configfs_init(void); |
|---|
| 59 | | -void cma_configfs_exit(void); |
|---|
| 60 | | -#else |
|---|
| 61 | | -static inline int cma_configfs_init(void) |
|---|
| 60 | +/** |
|---|
| 61 | + * struct rdma_dev_net - rdma net namespace metadata for a net |
|---|
| 62 | + * @nl_sock: Pointer to netlink socket |
|---|
| 63 | + * @net: Pointer to owner net namespace |
|---|
| 64 | + * @id: xarray id to identify the net namespace. |
|---|
| 65 | + */ |
|---|
| 66 | +struct rdma_dev_net { |
|---|
| 67 | + struct sock *nl_sock; |
|---|
| 68 | + possible_net_t net; |
|---|
| 69 | + u32 id; |
|---|
| 70 | +}; |
|---|
| 71 | + |
|---|
| 72 | +extern const struct attribute_group ib_dev_attr_group; |
|---|
| 73 | +extern bool ib_devices_shared_netns; |
|---|
| 74 | +extern unsigned int rdma_dev_net_id; |
|---|
| 75 | + |
|---|
| 76 | +static inline struct rdma_dev_net *rdma_net_to_dev_net(struct net *net) |
|---|
| 62 | 77 | { |
|---|
| 63 | | - return 0; |
|---|
| 78 | + return net_generic(net, rdma_dev_net_id); |
|---|
| 64 | 79 | } |
|---|
| 65 | 80 | |
|---|
| 66 | | -static inline void cma_configfs_exit(void) |
|---|
| 67 | | -{ |
|---|
| 68 | | -} |
|---|
| 69 | | -#endif |
|---|
| 70 | | -struct cma_device; |
|---|
| 71 | | -void cma_ref_dev(struct cma_device *cma_dev); |
|---|
| 72 | | -void cma_deref_dev(struct cma_device *cma_dev); |
|---|
| 73 | | -typedef bool (*cma_device_filter)(struct ib_device *, void *); |
|---|
| 74 | | -struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, |
|---|
| 75 | | - void *cookie); |
|---|
| 76 | | -int cma_get_default_gid_type(struct cma_device *cma_dev, |
|---|
| 77 | | - unsigned int port); |
|---|
| 78 | | -int cma_set_default_gid_type(struct cma_device *cma_dev, |
|---|
| 79 | | - unsigned int port, |
|---|
| 80 | | - enum ib_gid_type default_gid_type); |
|---|
| 81 | | -int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port); |
|---|
| 82 | | -int cma_set_default_roce_tos(struct cma_device *a_dev, unsigned int port, |
|---|
| 83 | | - u8 default_roce_tos); |
|---|
| 84 | | -struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev); |
|---|
| 85 | | - |
|---|
| 86 | | -int ib_device_register_sysfs(struct ib_device *device, |
|---|
| 87 | | - int (*port_callback)(struct ib_device *, |
|---|
| 88 | | - u8, struct kobject *)); |
|---|
| 81 | +int ib_device_register_sysfs(struct ib_device *device); |
|---|
| 89 | 82 | void ib_device_unregister_sysfs(struct ib_device *device); |
|---|
| 83 | +int ib_device_rename(struct ib_device *ibdev, const char *name); |
|---|
| 84 | +int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim); |
|---|
| 90 | 85 | |
|---|
| 91 | 86 | typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, |
|---|
| 92 | 87 | struct net_device *idev, void *cookie); |
|---|
| 93 | 88 | |
|---|
| 94 | 89 | typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port, |
|---|
| 95 | 90 | struct net_device *idev, void *cookie); |
|---|
| 91 | + |
|---|
| 92 | +struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, |
|---|
| 93 | + unsigned int port); |
|---|
| 96 | 94 | |
|---|
| 97 | 95 | void ib_enum_roce_netdev(struct ib_device *ib_dev, |
|---|
| 98 | 96 | roce_netdev_filter filter, |
|---|
| .. | .. |
|---|
| 111 | 109 | |
|---|
| 112 | 110 | int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, |
|---|
| 113 | 111 | struct netlink_callback *cb); |
|---|
| 112 | + |
|---|
| 113 | +struct ib_client_nl_info { |
|---|
| 114 | + struct sk_buff *nl_msg; |
|---|
| 115 | + struct device *cdev; |
|---|
| 116 | + unsigned int port; |
|---|
| 117 | + u64 abi; |
|---|
| 118 | +}; |
|---|
| 119 | +int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name, |
|---|
| 120 | + struct ib_client_nl_info *res); |
|---|
| 114 | 121 | |
|---|
| 115 | 122 | enum ib_cache_gid_default_mode { |
|---|
| 116 | 123 | IB_CACHE_GID_DEFAULT_MODE_SET, |
|---|
| .. | .. |
|---|
| 143 | 150 | int ib_cache_setup_one(struct ib_device *device); |
|---|
| 144 | 151 | void ib_cache_cleanup_one(struct ib_device *device); |
|---|
| 145 | 152 | void ib_cache_release_one(struct ib_device *device); |
|---|
| 153 | +void ib_dispatch_event_clients(struct ib_event *event); |
|---|
| 146 | 154 | |
|---|
| 147 | 155 | #ifdef CONFIG_CGROUP_RDMA |
|---|
| 148 | | -int ib_device_register_rdmacg(struct ib_device *device); |
|---|
| 156 | +void ib_device_register_rdmacg(struct ib_device *device); |
|---|
| 149 | 157 | void ib_device_unregister_rdmacg(struct ib_device *device); |
|---|
| 150 | 158 | |
|---|
| 151 | 159 | int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj, |
|---|
| .. | .. |
|---|
| 156 | 164 | struct ib_device *device, |
|---|
| 157 | 165 | enum rdmacg_resource_type resource_index); |
|---|
| 158 | 166 | #else |
|---|
| 159 | | -static inline int ib_device_register_rdmacg(struct ib_device *device) |
|---|
| 160 | | -{ return 0; } |
|---|
| 167 | +static inline void ib_device_register_rdmacg(struct ib_device *device) |
|---|
| 168 | +{ |
|---|
| 169 | +} |
|---|
| 161 | 170 | |
|---|
| 162 | 171 | static inline void ib_device_unregister_rdmacg(struct ib_device *device) |
|---|
| 163 | | -{ } |
|---|
| 172 | +{ |
|---|
| 173 | +} |
|---|
| 164 | 174 | |
|---|
| 165 | 175 | static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj, |
|---|
| 166 | 176 | struct ib_device *device, |
|---|
| 167 | 177 | enum rdmacg_resource_type resource_index) |
|---|
| 168 | | -{ return 0; } |
|---|
| 178 | +{ |
|---|
| 179 | + return 0; |
|---|
| 180 | +} |
|---|
| 169 | 181 | |
|---|
| 170 | 182 | static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj, |
|---|
| 171 | 183 | struct ib_device *device, |
|---|
| 172 | 184 | enum rdmacg_resource_type resource_index) |
|---|
| 173 | | -{ } |
|---|
| 185 | +{ |
|---|
| 186 | +} |
|---|
| 174 | 187 | #endif |
|---|
| 175 | 188 | |
|---|
| 176 | 189 | static inline bool rdma_is_upper_dev_rcu(struct net_device *dev, |
|---|
| .. | .. |
|---|
| 188 | 201 | int ib_sa_init(void); |
|---|
| 189 | 202 | void ib_sa_cleanup(void); |
|---|
| 190 | 203 | |
|---|
| 191 | | -int rdma_nl_init(void); |
|---|
| 204 | +void rdma_nl_init(void); |
|---|
| 192 | 205 | void rdma_nl_exit(void); |
|---|
| 193 | 206 | |
|---|
| 194 | 207 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, |
|---|
| .. | .. |
|---|
| 206 | 219 | u64 *sn_pfx); |
|---|
| 207 | 220 | |
|---|
| 208 | 221 | #ifdef CONFIG_SECURITY_INFINIBAND |
|---|
| 209 | | -void ib_security_destroy_port_pkey_list(struct ib_device *device); |
|---|
| 222 | +void ib_security_release_port_pkey_list(struct ib_device *device); |
|---|
| 210 | 223 | |
|---|
| 211 | 224 | void ib_security_cache_change(struct ib_device *device, |
|---|
| 212 | 225 | u8 port_num, |
|---|
| .. | .. |
|---|
| 227 | 240 | enum ib_qp_type qp_type); |
|---|
| 228 | 241 | void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); |
|---|
| 229 | 242 | int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); |
|---|
| 243 | +void ib_mad_agent_security_change(void); |
|---|
| 230 | 244 | #else |
|---|
| 231 | | -static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) |
|---|
| 245 | +static inline void ib_security_release_port_pkey_list(struct ib_device *device) |
|---|
| 232 | 246 | { |
|---|
| 233 | 247 | } |
|---|
| 234 | 248 | |
|---|
| .. | .. |
|---|
| 243 | 257 | int qp_attr_mask, |
|---|
| 244 | 258 | struct ib_udata *udata) |
|---|
| 245 | 259 | { |
|---|
| 246 | | - return qp->device->modify_qp(qp->real_qp, |
|---|
| 247 | | - qp_attr, |
|---|
| 248 | | - qp_attr_mask, |
|---|
| 249 | | - udata); |
|---|
| 260 | + return qp->device->ops.modify_qp(qp->real_qp, |
|---|
| 261 | + qp_attr, |
|---|
| 262 | + qp_attr_mask, |
|---|
| 263 | + udata); |
|---|
| 250 | 264 | } |
|---|
| 251 | 265 | |
|---|
| 252 | 266 | static inline int ib_create_qp_security(struct ib_qp *qp, |
|---|
| .. | .. |
|---|
| 292 | 306 | { |
|---|
| 293 | 307 | return 0; |
|---|
| 294 | 308 | } |
|---|
| 309 | + |
|---|
| 310 | +static inline void ib_mad_agent_security_change(void) |
|---|
| 311 | +{ |
|---|
| 312 | +} |
|---|
| 295 | 313 | #endif |
|---|
| 296 | 314 | |
|---|
| 297 | | -struct ib_device *ib_device_get_by_index(u32 ifindex); |
|---|
| 315 | +struct ib_device *ib_device_get_by_index(const struct net *net, u32 index); |
|---|
| 316 | + |
|---|
| 298 | 317 | /* RDMA device netlink */ |
|---|
| 299 | 318 | void nldev_init(void); |
|---|
| 300 | 319 | void nldev_exit(void); |
|---|
| .. | .. |
|---|
| 303 | 322 | struct ib_pd *pd, |
|---|
| 304 | 323 | struct ib_qp_init_attr *attr, |
|---|
| 305 | 324 | struct ib_udata *udata, |
|---|
| 306 | | - struct ib_uobject *uobj) |
|---|
| 325 | + struct ib_uqp_object *uobj) |
|---|
| 307 | 326 | { |
|---|
| 327 | + enum ib_qp_type qp_type = attr->qp_type; |
|---|
| 308 | 328 | struct ib_qp *qp; |
|---|
| 329 | + bool is_xrc; |
|---|
| 309 | 330 | |
|---|
| 310 | | - if (!dev->create_qp) |
|---|
| 331 | + if (!dev->ops.create_qp) |
|---|
| 311 | 332 | return ERR_PTR(-EOPNOTSUPP); |
|---|
| 312 | 333 | |
|---|
| 313 | | - qp = dev->create_qp(pd, attr, udata); |
|---|
| 334 | + qp = dev->ops.create_qp(pd, attr, udata); |
|---|
| 314 | 335 | if (IS_ERR(qp)) |
|---|
| 315 | 336 | return qp; |
|---|
| 316 | 337 | |
|---|
| 317 | 338 | qp->device = dev; |
|---|
| 318 | 339 | qp->pd = pd; |
|---|
| 319 | 340 | qp->uobject = uobj; |
|---|
| 341 | + qp->real_qp = qp; |
|---|
| 342 | + |
|---|
| 343 | + qp->qp_type = attr->qp_type; |
|---|
| 344 | + qp->rwq_ind_tbl = attr->rwq_ind_tbl; |
|---|
| 345 | + qp->send_cq = attr->send_cq; |
|---|
| 346 | + qp->recv_cq = attr->recv_cq; |
|---|
| 347 | + qp->srq = attr->srq; |
|---|
| 348 | + qp->rwq_ind_tbl = attr->rwq_ind_tbl; |
|---|
| 349 | + qp->event_handler = attr->event_handler; |
|---|
| 350 | + |
|---|
| 351 | + atomic_set(&qp->usecnt, 0); |
|---|
| 352 | + spin_lock_init(&qp->mr_lock); |
|---|
| 353 | + INIT_LIST_HEAD(&qp->rdma_mrs); |
|---|
| 354 | + INIT_LIST_HEAD(&qp->sig_mrs); |
|---|
| 355 | + |
|---|
| 356 | + rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP); |
|---|
| 320 | 357 | /* |
|---|
| 321 | 358 | * We don't track XRC QPs for now, because they don't have PD |
|---|
| 322 | 359 | * and more importantly they are created internaly by driver, |
|---|
| 323 | 360 | * see mlx5 create_dev_resources() as an example. |
|---|
| 324 | 361 | */ |
|---|
| 325 | | - if (attr->qp_type < IB_QPT_XRC_INI) { |
|---|
| 326 | | - qp->res.type = RDMA_RESTRACK_QP; |
|---|
| 362 | + is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT; |
|---|
| 363 | + if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) { |
|---|
| 364 | + rdma_restrack_parent_name(&qp->res, &pd->res); |
|---|
| 327 | 365 | rdma_restrack_add(&qp->res); |
|---|
| 328 | | - } else |
|---|
| 329 | | - qp->res.valid = false; |
|---|
| 330 | | - |
|---|
| 366 | + } |
|---|
| 331 | 367 | return qp; |
|---|
| 332 | 368 | } |
|---|
| 333 | 369 | |
|---|
| .. | .. |
|---|
| 338 | 374 | |
|---|
| 339 | 375 | int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, |
|---|
| 340 | 376 | const union ib_gid *dgid, |
|---|
| 341 | | - u8 *dmac, const struct net_device *ndev, |
|---|
| 377 | + u8 *dmac, const struct ib_gid_attr *sgid_attr, |
|---|
| 342 | 378 | int *hoplimit); |
|---|
| 379 | +void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr, |
|---|
| 380 | + const struct net_device *dev); |
|---|
| 381 | + |
|---|
| 382 | +struct sa_path_rec; |
|---|
| 383 | +int roce_resolve_route_from_path(struct sa_path_rec *rec, |
|---|
| 384 | + const struct ib_gid_attr *attr); |
|---|
| 385 | + |
|---|
| 386 | +struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr); |
|---|
| 387 | + |
|---|
| 388 | +void ib_free_port_attrs(struct ib_core_device *coredev); |
|---|
| 389 | +int ib_setup_port_attrs(struct ib_core_device *coredev); |
|---|
| 390 | + |
|---|
| 391 | +int rdma_compatdev_set(u8 enable); |
|---|
| 392 | + |
|---|
| 393 | +int ib_port_register_module_stat(struct ib_device *device, u8 port_num, |
|---|
| 394 | + struct kobject *kobj, struct kobj_type *ktype, |
|---|
| 395 | + const char *name); |
|---|
| 396 | +void ib_port_unregister_module_stat(struct kobject *kobj); |
|---|
| 397 | + |
|---|
| 398 | +int ib_device_set_netns_put(struct sk_buff *skb, |
|---|
| 399 | + struct ib_device *dev, u32 ns_fd); |
|---|
| 400 | + |
|---|
| 401 | +int rdma_nl_net_init(struct rdma_dev_net *rnet); |
|---|
| 402 | +void rdma_nl_net_exit(struct rdma_dev_net *rnet); |
|---|
| 403 | + |
|---|
| 404 | +struct rdma_umap_priv { |
|---|
| 405 | + struct vm_area_struct *vma; |
|---|
| 406 | + struct list_head list; |
|---|
| 407 | + struct rdma_user_mmap_entry *entry; |
|---|
| 408 | +}; |
|---|
| 409 | + |
|---|
| 410 | +void rdma_umap_priv_init(struct rdma_umap_priv *priv, |
|---|
| 411 | + struct vm_area_struct *vma, |
|---|
| 412 | + struct rdma_user_mmap_entry *entry); |
|---|
| 413 | + |
|---|
| 414 | +void ib_cq_pool_init(struct ib_device *dev); |
|---|
| 415 | +void ib_cq_pool_destroy(struct ib_device *dev); |
|---|
| 343 | 416 | |
|---|
| 344 | 417 | #endif /* _CORE_PRIV_H */ |
|---|