forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/infiniband/hw/mlx4/main.c
....@@ -263,6 +263,8 @@
263263 int hw_update = 0;
264264 int i;
265265 struct gid_entry *gids = NULL;
266
+ u16 vlan_id = 0xffff;
267
+ u8 mac[ETH_ALEN];
266268
267269 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
268270 return -EINVAL;
....@@ -273,12 +275,16 @@
273275 if (!context)
274276 return -EINVAL;
275277
278
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
279
+ if (ret)
280
+ return ret;
276281 port_gid_table = &iboe->gids[attr->port_num - 1];
277282 spin_lock_bh(&iboe->lock);
278283 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
279284 if (!memcmp(&port_gid_table->gids[i].gid,
280285 &attr->gid, sizeof(attr->gid)) &&
281
- port_gid_table->gids[i].gid_type == attr->gid_type) {
286
+ port_gid_table->gids[i].gid_type == attr->gid_type &&
287
+ port_gid_table->gids[i].vlan_id == vlan_id) {
282288 found = i;
283289 break;
284290 }
....@@ -298,6 +304,7 @@
298304 memcpy(&port_gid_table->gids[free].gid,
299305 &attr->gid, sizeof(attr->gid));
300306 port_gid_table->gids[free].gid_type = attr->gid_type;
307
+ port_gid_table->gids[free].vlan_id = vlan_id;
301308 port_gid_table->gids[free].ctx->real_index = free;
302309 port_gid_table->gids[free].ctx->refcount = 1;
303310 hw_update = 1;
....@@ -427,9 +434,6 @@
427434 return real_index;
428435 }
429436
430
-#define field_avail(type, fld, sz) (offsetof(type, fld) + \
431
- sizeof(((type *)0)->fld) <= (sz))
432
-
433437 static int mlx4_ib_query_device(struct ib_device *ibdev,
434438 struct ib_device_attr *props,
435439 struct ib_udata *uhw)
....@@ -440,7 +444,7 @@
440444 int err;
441445 int have_ib_ports;
442446 struct mlx4_uverbs_ex_query_device cmd;
443
- struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
447
+ struct mlx4_uverbs_ex_query_device_resp resp = {};
444448 struct mlx4_clock_params clock_params;
445449
446450 if (uhw->inlen) {
....@@ -554,7 +558,6 @@
554558 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
555559 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
556560 props->max_mcast_grp;
557
- props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
558561 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
559562 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
560563 props->max_ah = INT_MAX;
....@@ -592,7 +595,7 @@
592595 sizeof(struct mlx4_wqe_data_seg);
593596 }
594597
595
- if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
598
+ if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
596599 if (props->rss_caps.supported_qpts) {
597600 resp.rss_caps.rx_hash_function =
598601 MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
....@@ -616,7 +619,7 @@
616619 sizeof(resp.rss_caps);
617620 }
618621
619
- if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
622
+ if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
620623 if (dev->dev->caps.max_gso_sz &&
621624 ((mlx4_ib_port_link_layer(ibdev, 1) ==
622625 IB_LINK_LAYER_ETHERNET) ||
....@@ -743,7 +746,8 @@
743746
744747 static u8 state_to_phys_state(enum ib_port_state state)
745748 {
746
- return state == IB_PORT_ACTIVE ? 5 : 3;
749
+ return state == IB_PORT_ACTIVE ?
750
+ IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
747751 }
748752
749753 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
....@@ -777,7 +781,8 @@
777781 props->ip_gids = true;
778782 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
779783 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
780
- props->pkey_tbl_len = 1;
784
+ if (mdev->dev->caps.pkey_table_len[port])
785
+ props->pkey_tbl_len = 1;
781786 props->max_mtu = IB_MTU_4096;
782787 props->max_vl_num = 2;
783788 props->state = IB_PORT_DOWN;
....@@ -1085,19 +1090,21 @@
10851090 return err;
10861091 }
10871092
1088
-static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
1089
- struct ib_udata *udata)
1093
+static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1094
+ struct ib_udata *udata)
10901095 {
1096
+ struct ib_device *ibdev = uctx->device;
10911097 struct mlx4_ib_dev *dev = to_mdev(ibdev);
1092
- struct mlx4_ib_ucontext *context;
1098
+ struct mlx4_ib_ucontext *context = to_mucontext(uctx);
10931099 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
10941100 struct mlx4_ib_alloc_ucontext_resp resp;
10951101 int err;
10961102
10971103 if (!dev->ib_active)
1098
- return ERR_PTR(-EAGAIN);
1104
+ return -EAGAIN;
10991105
1100
- if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1106
+ if (ibdev->ops.uverbs_abi_ver ==
1107
+ MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
11011108 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
11021109 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
11031110 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
....@@ -1109,15 +1116,9 @@
11091116 resp.cqe_size = dev->dev->caps.cqe_size;
11101117 }
11111118
1112
- context = kzalloc(sizeof(*context), GFP_KERNEL);
1113
- if (!context)
1114
- return ERR_PTR(-ENOMEM);
1115
-
11161119 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1117
- if (err) {
1118
- kfree(context);
1119
- return ERR_PTR(err);
1120
- }
1120
+ if (err)
1121
+ return err;
11211122
11221123 INIT_LIST_HEAD(&context->db_page_list);
11231124 mutex_init(&context->db_page_mutex);
....@@ -1125,254 +1126,139 @@
11251126 INIT_LIST_HEAD(&context->wqn_ranges_list);
11261127 mutex_init(&context->wqn_ranges_mutex);
11271128
1128
- if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1129
+ if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
11291130 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
11301131 else
11311132 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
11321133
11331134 if (err) {
11341135 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1135
- kfree(context);
1136
- return ERR_PTR(-EFAULT);
1136
+ return -EFAULT;
11371137 }
11381138
1139
- return &context->ibucontext;
1139
+ return err;
11401140 }
11411141
1142
-static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1142
+static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
11431143 {
11441144 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
11451145
11461146 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1147
- kfree(context);
1148
-
1149
- return 0;
11501147 }
1151
-
1152
-static void mlx4_ib_vma_open(struct vm_area_struct *area)
1153
-{
1154
- /* vma_open is called when a new VMA is created on top of our VMA.
1155
- * This is done through either mremap flow or split_vma (usually due
1156
- * to mlock, madvise, munmap, etc.). We do not support a clone of the
1157
- * vma, as this VMA is strongly hardware related. Therefore we set the
1158
- * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1159
- * calling us again and trying to do incorrect actions. We assume that
1160
- * the original vma size is exactly a single page that there will be no
1161
- * "splitting" operations on.
1162
- */
1163
- area->vm_ops = NULL;
1164
-}
1165
-
1166
-static void mlx4_ib_vma_close(struct vm_area_struct *area)
1167
-{
1168
- struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
1169
-
1170
- /* It's guaranteed that all VMAs opened on a FD are closed before the
1171
- * file itself is closed, therefore no sync is needed with the regular
1172
- * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
1173
- * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
1174
- * The close operation is usually called under mm->mmap_sem except when
1175
- * process is exiting. The exiting case is handled explicitly as part
1176
- * of mlx4_ib_disassociate_ucontext.
1177
- */
1178
- mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
1179
- area->vm_private_data;
1180
-
1181
- /* set the vma context pointer to null in the mlx4_ib driver's private
1182
- * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
1183
- */
1184
- mlx4_ib_vma_priv_data->vma = NULL;
1185
-}
1186
-
1187
-static const struct vm_operations_struct mlx4_ib_vm_ops = {
1188
- .open = mlx4_ib_vma_open,
1189
- .close = mlx4_ib_vma_close
1190
-};
11911148
11921149 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
11931150 {
1194
- int i;
1195
- struct vm_area_struct *vma;
1196
- struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1197
-
1198
- /* need to protect from a race on closing the vma as part of
1199
- * mlx4_ib_vma_close().
1200
- */
1201
- for (i = 0; i < HW_BAR_COUNT; i++) {
1202
- vma = context->hw_bar_info[i].vma;
1203
- if (!vma)
1204
- continue;
1205
-
1206
- zap_vma_ptes(context->hw_bar_info[i].vma,
1207
- context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);
1208
-
1209
- context->hw_bar_info[i].vma->vm_flags &=
1210
- ~(VM_SHARED | VM_MAYSHARE);
1211
- /* context going to be destroyed, should not access ops any more */
1212
- context->hw_bar_info[i].vma->vm_ops = NULL;
1213
- }
1214
-}
1215
-
1216
-static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
1217
- struct mlx4_ib_vma_private_data *vma_private_data)
1218
-{
1219
- vma_private_data->vma = vma;
1220
- vma->vm_private_data = vma_private_data;
1221
- vma->vm_ops = &mlx4_ib_vm_ops;
12221151 }
12231152
12241153 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
12251154 {
12261155 struct mlx4_ib_dev *dev = to_mdev(context->device);
1227
- struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
12281156
1229
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1230
- return -EINVAL;
1157
+ switch (vma->vm_pgoff) {
1158
+ case 0:
1159
+ return rdma_user_mmap_io(context, vma,
1160
+ to_mucontext(context)->uar.pfn,
1161
+ PAGE_SIZE,
1162
+ pgprot_noncached(vma->vm_page_prot),
1163
+ NULL);
12311164
1232
- if (vma->vm_pgoff == 0) {
1233
- /* We prevent double mmaping on same context */
1234
- if (mucontext->hw_bar_info[HW_BAR_DB].vma)
1165
+ case 1:
1166
+ if (dev->dev->caps.bf_reg_size == 0)
12351167 return -EINVAL;
1168
+ return rdma_user_mmap_io(
1169
+ context, vma,
1170
+ to_mucontext(context)->uar.pfn +
1171
+ dev->dev->caps.num_uars,
1172
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
1173
+ NULL);
12361174
1237
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1238
-
1239
- if (io_remap_pfn_range(vma, vma->vm_start,
1240
- to_mucontext(context)->uar.pfn,
1241
- PAGE_SIZE, vma->vm_page_prot))
1242
- return -EAGAIN;
1243
-
1244
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
1245
-
1246
- } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
1247
- /* We prevent double mmaping on same context */
1248
- if (mucontext->hw_bar_info[HW_BAR_BF].vma)
1249
- return -EINVAL;
1250
-
1251
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1252
-
1253
- if (io_remap_pfn_range(vma, vma->vm_start,
1254
- to_mucontext(context)->uar.pfn +
1255
- dev->dev->caps.num_uars,
1256
- PAGE_SIZE, vma->vm_page_prot))
1257
- return -EAGAIN;
1258
-
1259
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
1260
-
1261
- } else if (vma->vm_pgoff == 3) {
1175
+ case 3: {
12621176 struct mlx4_clock_params params;
12631177 int ret;
12641178
1265
- /* We prevent double mmaping on same context */
1266
- if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
1267
- return -EINVAL;
1268
-
12691179 ret = mlx4_get_internal_clock_params(dev->dev, &params);
1270
-
12711180 if (ret)
12721181 return ret;
12731182
1274
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1275
- if (io_remap_pfn_range(vma, vma->vm_start,
1276
- (pci_resource_start(dev->dev->persist->pdev,
1277
- params.bar) +
1278
- params.offset)
1279
- >> PAGE_SHIFT,
1280
- PAGE_SIZE, vma->vm_page_prot))
1281
- return -EAGAIN;
1183
+ return rdma_user_mmap_io(
1184
+ context, vma,
1185
+ (pci_resource_start(dev->dev->persist->pdev,
1186
+ params.bar) +
1187
+ params.offset) >>
1188
+ PAGE_SHIFT,
1189
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
1190
+ NULL);
1191
+ }
12821192
1283
- mlx4_ib_set_vma_data(vma,
1284
- &mucontext->hw_bar_info[HW_BAR_CLOCK]);
1285
- } else {
1193
+ default:
12861194 return -EINVAL;
12871195 }
1288
-
1289
- return 0;
12901196 }
12911197
1292
-static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1293
- struct ib_ucontext *context,
1294
- struct ib_udata *udata)
1198
+static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
12951199 {
1296
- struct mlx4_ib_pd *pd;
1200
+ struct mlx4_ib_pd *pd = to_mpd(ibpd);
1201
+ struct ib_device *ibdev = ibpd->device;
12971202 int err;
12981203
1299
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
1300
- if (!pd)
1301
- return ERR_PTR(-ENOMEM);
1302
-
13031204 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1304
- if (err) {
1305
- kfree(pd);
1306
- return ERR_PTR(err);
1205
+ if (err)
1206
+ return err;
1207
+
1208
+ if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1209
+ mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1210
+ return -EFAULT;
13071211 }
1308
-
1309
- if (context)
1310
- if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
1311
- mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1312
- kfree(pd);
1313
- return ERR_PTR(-EFAULT);
1314
- }
1315
- return &pd->ibpd;
1316
-}
1317
-
1318
-static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
1319
-{
1320
- mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1321
- kfree(pd);
1322
-
13231212 return 0;
13241213 }
13251214
1326
-static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1327
- struct ib_ucontext *context,
1328
- struct ib_udata *udata)
1215
+static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
13291216 {
1330
- struct mlx4_ib_xrcd *xrcd;
1217
+ mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1218
+ return 0;
1219
+}
1220
+
1221
+static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
1222
+{
1223
+ struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
1224
+ struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
13311225 struct ib_cq_init_attr cq_attr = {};
13321226 int err;
13331227
1334
- if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1335
- return ERR_PTR(-ENOSYS);
1228
+ if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1229
+ return -EOPNOTSUPP;
13361230
1337
- xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1338
- if (!xrcd)
1339
- return ERR_PTR(-ENOMEM);
1340
-
1341
- err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1231
+ err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
13421232 if (err)
1343
- goto err1;
1233
+ return err;
13441234
1345
- xrcd->pd = ib_alloc_pd(ibdev, 0);
1235
+ xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
13461236 if (IS_ERR(xrcd->pd)) {
13471237 err = PTR_ERR(xrcd->pd);
13481238 goto err2;
13491239 }
13501240
13511241 cq_attr.cqe = 1;
1352
- xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
1242
+ xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
13531243 if (IS_ERR(xrcd->cq)) {
13541244 err = PTR_ERR(xrcd->cq);
13551245 goto err3;
13561246 }
13571247
1358
- return &xrcd->ibxrcd;
1248
+ return 0;
13591249
13601250 err3:
13611251 ib_dealloc_pd(xrcd->pd);
13621252 err2:
1363
- mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1364
-err1:
1365
- kfree(xrcd);
1366
- return ERR_PTR(err);
1253
+ mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
1254
+ return err;
13671255 }
13681256
1369
-static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1257
+static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
13701258 {
13711259 ib_destroy_cq(to_mxrcd(xrcd)->cq);
13721260 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
13731261 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1374
- kfree(xrcd);
1375
-
13761262 return 0;
13771263 }
13781264
....@@ -1646,20 +1532,8 @@
16461532 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
16471533 int default_flow;
16481534
1649
- static const u16 __mlx4_domain[] = {
1650
- [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1651
- [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1652
- [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1653
- [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1654
- };
1655
-
16561535 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
16571536 pr_err("Invalid priority value %d\n", flow_attr->priority);
1658
- return -EINVAL;
1659
- }
1660
-
1661
- if (domain >= IB_FLOW_DOMAIN_NUM) {
1662
- pr_err("Invalid domain value %d\n", domain);
16631537 return -EINVAL;
16641538 }
16651539
....@@ -1671,8 +1545,7 @@
16711545 return PTR_ERR(mailbox);
16721546 ctrl = mailbox->buf;
16731547
1674
- ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1675
- flow_attr->priority);
1548
+ ctrl->prio = cpu_to_be16(domain | flow_attr->priority);
16761549 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
16771550 ctrl->port = flow_attr->port;
16781551 ctrl->qpn = cpu_to_be32(qp->qp_num);
....@@ -1814,8 +1687,8 @@
18141687 }
18151688
18161689 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1817
- struct ib_flow_attr *flow_attr,
1818
- int domain, struct ib_udata *udata)
1690
+ struct ib_flow_attr *flow_attr,
1691
+ struct ib_udata *udata)
18191692 {
18201693 int err = 0, i = 0, j = 0;
18211694 struct mlx4_ib_flow *mflow;
....@@ -1881,8 +1754,8 @@
18811754 }
18821755
18831756 while (i < ARRAY_SIZE(type) && type[i]) {
1884
- err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1885
- &mflow->reg_id[i].id);
1757
+ err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS,
1758
+ type[i], &mflow->reg_id[i].id);
18861759 if (err)
18871760 goto err_create_flow;
18881761 if (is_bonded) {
....@@ -1891,7 +1764,7 @@
18911764 */
18921765 flow_attr->port = 2;
18931766 err = __mlx4_ib_create_flow(qp, flow_attr,
1894
- domain, type[j],
1767
+ MLX4_DOMAIN_UVERBS, type[j],
18951768 &mflow->reg_id[j].mirror);
18961769 flow_attr->port = 1;
18971770 if (err)
....@@ -2143,39 +2016,44 @@
21432016 return err;
21442017 }
21452018
2146
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2147
- char *buf)
2019
+static ssize_t hca_type_show(struct device *device,
2020
+ struct device_attribute *attr, char *buf)
21482021 {
21492022 struct mlx4_ib_dev *dev =
2150
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2023
+ rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
21512024 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
21522025 }
2026
+static DEVICE_ATTR_RO(hca_type);
21532027
2154
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2155
- char *buf)
2028
+static ssize_t hw_rev_show(struct device *device,
2029
+ struct device_attribute *attr, char *buf)
21562030 {
21572031 struct mlx4_ib_dev *dev =
2158
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2032
+ rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
21592033 return sprintf(buf, "%x\n", dev->dev->rev_id);
21602034 }
2035
+static DEVICE_ATTR_RO(hw_rev);
21612036
2162
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
2163
- char *buf)
2037
+static ssize_t board_id_show(struct device *device,
2038
+ struct device_attribute *attr, char *buf)
21642039 {
21652040 struct mlx4_ib_dev *dev =
2166
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2041
+ rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2042
+
21672043 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
21682044 dev->dev->board_id);
21692045 }
2046
+static DEVICE_ATTR_RO(board_id);
21702047
2171
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
2172
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2173
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
2048
+static struct attribute *mlx4_class_attributes[] = {
2049
+ &dev_attr_hw_rev.attr,
2050
+ &dev_attr_hca_type.attr,
2051
+ &dev_attr_board_id.attr,
2052
+ NULL
2053
+};
21742054
2175
-static struct device_attribute *mlx4_class_attributes[] = {
2176
- &dev_attr_hw_rev,
2177
- &dev_attr_hca_type,
2178
- &dev_attr_board_id
2055
+static const struct attribute_group mlx4_attr_group = {
2056
+ .attrs = mlx4_class_attributes,
21792057 };
21802058
21812059 struct diag_counter {
....@@ -2320,6 +2198,11 @@
23202198 }
23212199 }
23222200
2201
+static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2202
+ .alloc_hw_stats = mlx4_ib_alloc_hw_stats,
2203
+ .get_hw_stats = mlx4_ib_get_hw_stats,
2204
+};
2205
+
23232206 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
23242207 {
23252208 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
....@@ -2346,8 +2229,7 @@
23462229 diag[i].offset, i);
23472230 }
23482231
2349
- ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats;
2350
- ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats;
2232
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
23512233
23522234 return 0;
23532235
....@@ -2451,6 +2333,32 @@
24512333 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
24522334 event == NETDEV_UP || event == NETDEV_CHANGE))
24532335 update_qps_port = port;
2336
+
2337
+ if (dev == iboe->netdevs[port - 1] &&
2338
+ (event == NETDEV_UP || event == NETDEV_DOWN)) {
2339
+ enum ib_port_state port_state;
2340
+ struct ib_event ibev = { };
2341
+
2342
+ if (ib_get_cached_port_state(&ibdev->ib_dev, port,
2343
+ &port_state))
2344
+ continue;
2345
+
2346
+ if (event == NETDEV_UP &&
2347
+ (port_state != IB_PORT_ACTIVE ||
2348
+ iboe->last_port_state[port - 1] != IB_PORT_DOWN))
2349
+ continue;
2350
+ if (event == NETDEV_DOWN &&
2351
+ (port_state != IB_PORT_DOWN ||
2352
+ iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
2353
+ continue;
2354
+ iboe->last_port_state[port - 1] = port_state;
2355
+
2356
+ ibev.device = &ibdev->ib_dev;
2357
+ ibev.element.port_num = port;
2358
+ ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
2359
+ IB_EVENT_PORT_ERR;
2360
+ ib_dispatch_event(&ibev);
2361
+ }
24542362
24552363 }
24562364 spin_unlock_bh(&iboe->lock);
....@@ -2599,6 +2507,98 @@
25992507 (int) dev->dev->caps.fw_ver & 0xffff);
26002508 }
26012509
2510
+static const struct ib_device_ops mlx4_ib_dev_ops = {
2511
+ .owner = THIS_MODULE,
2512
+ .driver_id = RDMA_DRIVER_MLX4,
2513
+ .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
2514
+
2515
+ .add_gid = mlx4_ib_add_gid,
2516
+ .alloc_mr = mlx4_ib_alloc_mr,
2517
+ .alloc_pd = mlx4_ib_alloc_pd,
2518
+ .alloc_ucontext = mlx4_ib_alloc_ucontext,
2519
+ .attach_mcast = mlx4_ib_mcg_attach,
2520
+ .create_ah = mlx4_ib_create_ah,
2521
+ .create_cq = mlx4_ib_create_cq,
2522
+ .create_qp = mlx4_ib_create_qp,
2523
+ .create_srq = mlx4_ib_create_srq,
2524
+ .dealloc_pd = mlx4_ib_dealloc_pd,
2525
+ .dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2526
+ .del_gid = mlx4_ib_del_gid,
2527
+ .dereg_mr = mlx4_ib_dereg_mr,
2528
+ .destroy_ah = mlx4_ib_destroy_ah,
2529
+ .destroy_cq = mlx4_ib_destroy_cq,
2530
+ .destroy_qp = mlx4_ib_destroy_qp,
2531
+ .destroy_srq = mlx4_ib_destroy_srq,
2532
+ .detach_mcast = mlx4_ib_mcg_detach,
2533
+ .disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2534
+ .drain_rq = mlx4_ib_drain_rq,
2535
+ .drain_sq = mlx4_ib_drain_sq,
2536
+ .get_dev_fw_str = get_fw_ver_str,
2537
+ .get_dma_mr = mlx4_ib_get_dma_mr,
2538
+ .get_link_layer = mlx4_ib_port_link_layer,
2539
+ .get_netdev = mlx4_ib_get_netdev,
2540
+ .get_port_immutable = mlx4_port_immutable,
2541
+ .map_mr_sg = mlx4_ib_map_mr_sg,
2542
+ .mmap = mlx4_ib_mmap,
2543
+ .modify_cq = mlx4_ib_modify_cq,
2544
+ .modify_device = mlx4_ib_modify_device,
2545
+ .modify_port = mlx4_ib_modify_port,
2546
+ .modify_qp = mlx4_ib_modify_qp,
2547
+ .modify_srq = mlx4_ib_modify_srq,
2548
+ .poll_cq = mlx4_ib_poll_cq,
2549
+ .post_recv = mlx4_ib_post_recv,
2550
+ .post_send = mlx4_ib_post_send,
2551
+ .post_srq_recv = mlx4_ib_post_srq_recv,
2552
+ .process_mad = mlx4_ib_process_mad,
2553
+ .query_ah = mlx4_ib_query_ah,
2554
+ .query_device = mlx4_ib_query_device,
2555
+ .query_gid = mlx4_ib_query_gid,
2556
+ .query_pkey = mlx4_ib_query_pkey,
2557
+ .query_port = mlx4_ib_query_port,
2558
+ .query_qp = mlx4_ib_query_qp,
2559
+ .query_srq = mlx4_ib_query_srq,
2560
+ .reg_user_mr = mlx4_ib_reg_user_mr,
2561
+ .req_notify_cq = mlx4_ib_arm_cq,
2562
+ .rereg_user_mr = mlx4_ib_rereg_user_mr,
2563
+ .resize_cq = mlx4_ib_resize_cq,
2564
+
2565
+ INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
2566
+ INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
2567
+ INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
2568
+ INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
2569
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
2570
+};
2571
+
2572
+static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2573
+ .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2574
+ .create_wq = mlx4_ib_create_wq,
2575
+ .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2576
+ .destroy_wq = mlx4_ib_destroy_wq,
2577
+ .modify_wq = mlx4_ib_modify_wq,
2578
+
2579
+ INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
2580
+ ib_rwq_ind_tbl),
2581
+};
2582
+
2583
+static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2584
+ .alloc_mw = mlx4_ib_alloc_mw,
2585
+ .dealloc_mw = mlx4_ib_dealloc_mw,
2586
+
2587
+ INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
2588
+};
2589
+
2590
+static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2591
+ .alloc_xrcd = mlx4_ib_alloc_xrcd,
2592
+ .dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2593
+
2594
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
2595
+};
2596
+
2597
+static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2598
+ .create_flow = mlx4_ib_create_flow,
2599
+ .destroy_flow = mlx4_ib_destroy_flow,
2600
+};
2601
+
26022602 static void *mlx4_ib_add(struct mlx4_dev *dev)
26032603 {
26042604 struct mlx4_ib_dev *ibdev;
....@@ -2622,7 +2622,7 @@
26222622 if (num_ports == 0)
26232623 return NULL;
26242624
2625
- ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2625
+ ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
26262626 if (!ibdev) {
26272627 dev_err(&dev->persist->pdev->dev,
26282628 "Device struct alloc failed\n");
....@@ -2646,8 +2646,6 @@
26462646 ibdev->dev = dev;
26472647 ibdev->bond_next_port = 0;
26482648
2649
- strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2650
- ibdev->ib_dev.owner = THIS_MODULE;
26512649 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
26522650 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
26532651 ibdev->num_ports = num_ports;
....@@ -2655,14 +2653,6 @@
26552653 1 : ibdev->num_ports;
26562654 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
26572655 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
2658
- ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2659
- ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
2660
- ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
2661
-
2662
- if (dev->caps.userspace_caps)
2663
- ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2664
- else
2665
- ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
26662656
26672657 ibdev->ib_dev.uverbs_cmd_mask =
26682658 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
....@@ -2690,115 +2680,41 @@
26902680 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
26912681 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
26922682
2693
- ibdev->ib_dev.query_device = mlx4_ib_query_device;
2694
- ibdev->ib_dev.query_port = mlx4_ib_query_port;
2695
- ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
2696
- ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2697
- ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2698
- ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2699
- ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2700
- ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2701
- ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2702
- ibdev->ib_dev.mmap = mlx4_ib_mmap;
2703
- ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2704
- ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2705
- ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2706
- ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2707
- ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2708
- ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2709
- ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
2710
- ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
2711
- ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2712
- ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2713
- ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2714
- ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
2715
- ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
2716
- ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2717
- ibdev->ib_dev.drain_sq = mlx4_ib_drain_sq;
2718
- ibdev->ib_dev.drain_rq = mlx4_ib_drain_rq;
2719
- ibdev->ib_dev.post_send = mlx4_ib_post_send;
2720
- ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2721
- ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
2722
- ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
2723
- ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
2724
- ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2725
- ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2726
- ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2727
- ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2728
- ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
2729
- ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
2730
- ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
2731
- ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
2732
- ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg;
2733
- ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2734
- ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2735
- ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
2736
- ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
2737
- ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
2738
- ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
2739
-
2683
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
27402684 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2741
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
2685
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2686
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
27422687
27432688 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
27442689 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
27452690 IB_LINK_LAYER_ETHERNET) ||
27462691 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2747
- IB_LINK_LAYER_ETHERNET))) {
2748
- ibdev->ib_dev.create_wq = mlx4_ib_create_wq;
2749
- ibdev->ib_dev.modify_wq = mlx4_ib_modify_wq;
2750
- ibdev->ib_dev.destroy_wq = mlx4_ib_destroy_wq;
2751
- ibdev->ib_dev.create_rwq_ind_table =
2752
- mlx4_ib_create_rwq_ind_table;
2753
- ibdev->ib_dev.destroy_rwq_ind_table =
2754
- mlx4_ib_destroy_rwq_ind_table;
2755
- ibdev->ib_dev.uverbs_ex_cmd_mask |=
2756
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
2757
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
2758
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
2759
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
2760
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
2761
- }
2762
-
2763
- if (!mlx4_is_slave(ibdev->dev)) {
2764
- ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2765
- ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2766
- ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2767
- ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2768
- }
2692
+ IB_LINK_LAYER_ETHERNET)))
2693
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
27692694
27702695 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
27712696 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2772
- ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2773
- ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2774
-
27752697 ibdev->ib_dev.uverbs_cmd_mask |=
27762698 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
27772699 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2700
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
27782701 }
27792702
27802703 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2781
- ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2782
- ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
27832704 ibdev->ib_dev.uverbs_cmd_mask |=
27842705 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
27852706 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2707
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
27862708 }
27872709
27882710 if (check_flow_steering_support(dev)) {
27892711 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2790
- ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2791
- ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2792
-
2793
- ibdev->ib_dev.uverbs_ex_cmd_mask |=
2794
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2795
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2712
+ ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
27962713 }
27972714
2798
- ibdev->ib_dev.uverbs_ex_cmd_mask |=
2799
- (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2800
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2801
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2715
+ if (!dev->caps.userspace_caps)
2716
+ ibdev->ib_dev.ops.uverbs_abi_ver =
2717
+ MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
28022718
28032719 mlx4_ib_alloc_eqs(dev, ibdev);
28042720
....@@ -2811,6 +2727,7 @@
28112727 for (i = 0; i < ibdev->num_ports; ++i) {
28122728 mutex_init(&ibdev->counters_table[i].mutex);
28132729 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2730
+ iboe->last_port_state[i] = IB_PORT_DOWN;
28142731 }
28152732
28162733 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
....@@ -2908,8 +2825,9 @@
29082825 if (mlx4_ib_alloc_diag_counters(ibdev))
29092826 goto err_steer_free_bitmap;
29102827
2911
- ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4;
2912
- if (ib_register_device(&ibdev->ib_dev, NULL))
2828
+ rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
2829
+ if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
2830
+ &dev->persist->pdev->dev))
29132831 goto err_diag_counters;
29142832
29152833 if (mlx4_ib_mad_init(ibdev))
....@@ -2929,12 +2847,6 @@
29292847 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
29302848 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
29312849 if (err)
2932
- goto err_notif;
2933
- }
2934
-
2935
- for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2936
- if (device_create_file(&ibdev->ib_dev.dev,
2937
- mlx4_class_attributes[j]))
29382850 goto err_notif;
29392851 }
29402852
....@@ -3057,10 +2969,8 @@
30572969 /* Add an empty rule for IB L2 */
30582970 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
30592971
3060
- err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
3061
- IB_FLOW_DOMAIN_NIC,
3062
- MLX4_FS_REGULAR,
3063
- &mqp->reg_id);
2972
+ err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
2973
+ MLX4_FS_REGULAR, &mqp->reg_id);
30642974 } else {
30652975 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
30662976 }