.. | .. |
---|
263 | 263 | int hw_update = 0; |
---|
264 | 264 | int i; |
---|
265 | 265 | struct gid_entry *gids = NULL; |
---|
| 266 | + u16 vlan_id = 0xffff; |
---|
| 267 | + u8 mac[ETH_ALEN]; |
---|
266 | 268 | |
---|
267 | 269 | if (!rdma_cap_roce_gid_table(attr->device, attr->port_num)) |
---|
268 | 270 | return -EINVAL; |
---|
.. | .. |
---|
273 | 275 | if (!context) |
---|
274 | 276 | return -EINVAL; |
---|
275 | 277 | |
---|
| 278 | + ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]); |
---|
| 279 | + if (ret) |
---|
| 280 | + return ret; |
---|
276 | 281 | port_gid_table = &iboe->gids[attr->port_num - 1]; |
---|
277 | 282 | spin_lock_bh(&iboe->lock); |
---|
278 | 283 | for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { |
---|
279 | 284 | if (!memcmp(&port_gid_table->gids[i].gid, |
---|
280 | 285 | &attr->gid, sizeof(attr->gid)) && |
---|
281 | | - port_gid_table->gids[i].gid_type == attr->gid_type) { |
---|
| 286 | + port_gid_table->gids[i].gid_type == attr->gid_type && |
---|
| 287 | + port_gid_table->gids[i].vlan_id == vlan_id) { |
---|
282 | 288 | found = i; |
---|
283 | 289 | break; |
---|
284 | 290 | } |
---|
.. | .. |
---|
298 | 304 | memcpy(&port_gid_table->gids[free].gid, |
---|
299 | 305 | &attr->gid, sizeof(attr->gid)); |
---|
300 | 306 | port_gid_table->gids[free].gid_type = attr->gid_type; |
---|
| 307 | + port_gid_table->gids[free].vlan_id = vlan_id; |
---|
301 | 308 | port_gid_table->gids[free].ctx->real_index = free; |
---|
302 | 309 | port_gid_table->gids[free].ctx->refcount = 1; |
---|
303 | 310 | hw_update = 1; |
---|
.. | .. |
---|
427 | 434 | return real_index; |
---|
428 | 435 | } |
---|
429 | 436 | |
---|
430 | | -#define field_avail(type, fld, sz) (offsetof(type, fld) + \ |
---|
431 | | - sizeof(((type *)0)->fld) <= (sz)) |
---|
432 | | - |
---|
433 | 437 | static int mlx4_ib_query_device(struct ib_device *ibdev, |
---|
434 | 438 | struct ib_device_attr *props, |
---|
435 | 439 | struct ib_udata *uhw) |
---|
.. | .. |
---|
440 | 444 | int err; |
---|
441 | 445 | int have_ib_ports; |
---|
442 | 446 | struct mlx4_uverbs_ex_query_device cmd; |
---|
443 | | - struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0}; |
---|
| 447 | + struct mlx4_uverbs_ex_query_device_resp resp = {}; |
---|
444 | 448 | struct mlx4_clock_params clock_params; |
---|
445 | 449 | |
---|
446 | 450 | if (uhw->inlen) { |
---|
.. | .. |
---|
554 | 558 | props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; |
---|
555 | 559 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * |
---|
556 | 560 | props->max_mcast_grp; |
---|
557 | | - props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; |
---|
558 | 561 | props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; |
---|
559 | 562 | props->timestamp_mask = 0xFFFFFFFFFFFFULL; |
---|
560 | 563 | props->max_ah = INT_MAX; |
---|
.. | .. |
---|
592 | 595 | sizeof(struct mlx4_wqe_data_seg); |
---|
593 | 596 | } |
---|
594 | 597 | |
---|
595 | | - if (field_avail(typeof(resp), rss_caps, uhw->outlen)) { |
---|
| 598 | + if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) { |
---|
596 | 599 | if (props->rss_caps.supported_qpts) { |
---|
597 | 600 | resp.rss_caps.rx_hash_function = |
---|
598 | 601 | MLX4_IB_RX_HASH_FUNC_TOEPLITZ; |
---|
.. | .. |
---|
616 | 619 | sizeof(resp.rss_caps); |
---|
617 | 620 | } |
---|
618 | 621 | |
---|
619 | | - if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { |
---|
| 622 | + if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) { |
---|
620 | 623 | if (dev->dev->caps.max_gso_sz && |
---|
621 | 624 | ((mlx4_ib_port_link_layer(ibdev, 1) == |
---|
622 | 625 | IB_LINK_LAYER_ETHERNET) || |
---|
.. | .. |
---|
743 | 746 | |
---|
744 | 747 | static u8 state_to_phys_state(enum ib_port_state state) |
---|
745 | 748 | { |
---|
746 | | - return state == IB_PORT_ACTIVE ? 5 : 3; |
---|
| 749 | + return state == IB_PORT_ACTIVE ? |
---|
| 750 | + IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; |
---|
747 | 751 | } |
---|
748 | 752 | |
---|
749 | 753 | static int eth_link_query_port(struct ib_device *ibdev, u8 port, |
---|
.. | .. |
---|
777 | 781 | props->ip_gids = true; |
---|
778 | 782 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; |
---|
779 | 783 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; |
---|
780 | | - props->pkey_tbl_len = 1; |
---|
| 784 | + if (mdev->dev->caps.pkey_table_len[port]) |
---|
| 785 | + props->pkey_tbl_len = 1; |
---|
781 | 786 | props->max_mtu = IB_MTU_4096; |
---|
782 | 787 | props->max_vl_num = 2; |
---|
783 | 788 | props->state = IB_PORT_DOWN; |
---|
.. | .. |
---|
1085 | 1090 | return err; |
---|
1086 | 1091 | } |
---|
1087 | 1092 | |
---|
1088 | | -static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, |
---|
1089 | | - struct ib_udata *udata) |
---|
| 1093 | +static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx, |
---|
| 1094 | + struct ib_udata *udata) |
---|
1090 | 1095 | { |
---|
| 1096 | + struct ib_device *ibdev = uctx->device; |
---|
1091 | 1097 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
---|
1092 | | - struct mlx4_ib_ucontext *context; |
---|
| 1098 | + struct mlx4_ib_ucontext *context = to_mucontext(uctx); |
---|
1093 | 1099 | struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3; |
---|
1094 | 1100 | struct mlx4_ib_alloc_ucontext_resp resp; |
---|
1095 | 1101 | int err; |
---|
1096 | 1102 | |
---|
1097 | 1103 | if (!dev->ib_active) |
---|
1098 | | - return ERR_PTR(-EAGAIN); |
---|
| 1104 | + return -EAGAIN; |
---|
1099 | 1105 | |
---|
1100 | | - if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) { |
---|
| 1106 | + if (ibdev->ops.uverbs_abi_ver == |
---|
| 1107 | + MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) { |
---|
1101 | 1108 | resp_v3.qp_tab_size = dev->dev->caps.num_qps; |
---|
1102 | 1109 | resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size; |
---|
1103 | 1110 | resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; |
---|
.. | .. |
---|
1109 | 1116 | resp.cqe_size = dev->dev->caps.cqe_size; |
---|
1110 | 1117 | } |
---|
1111 | 1118 | |
---|
1112 | | - context = kzalloc(sizeof(*context), GFP_KERNEL); |
---|
1113 | | - if (!context) |
---|
1114 | | - return ERR_PTR(-ENOMEM); |
---|
1115 | | - |
---|
1116 | 1119 | err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); |
---|
1117 | | - if (err) { |
---|
1118 | | - kfree(context); |
---|
1119 | | - return ERR_PTR(err); |
---|
1120 | | - } |
---|
| 1120 | + if (err) |
---|
| 1121 | + return err; |
---|
1121 | 1122 | |
---|
1122 | 1123 | INIT_LIST_HEAD(&context->db_page_list); |
---|
1123 | 1124 | mutex_init(&context->db_page_mutex); |
---|
.. | .. |
---|
1125 | 1126 | INIT_LIST_HEAD(&context->wqn_ranges_list); |
---|
1126 | 1127 | mutex_init(&context->wqn_ranges_mutex); |
---|
1127 | 1128 | |
---|
1128 | | - if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) |
---|
| 1129 | + if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) |
---|
1129 | 1130 | err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3)); |
---|
1130 | 1131 | else |
---|
1131 | 1132 | err = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
---|
1132 | 1133 | |
---|
1133 | 1134 | if (err) { |
---|
1134 | 1135 | mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); |
---|
1135 | | - kfree(context); |
---|
1136 | | - return ERR_PTR(-EFAULT); |
---|
| 1136 | + return -EFAULT; |
---|
1137 | 1137 | } |
---|
1138 | 1138 | |
---|
1139 | | - return &context->ibucontext; |
---|
| 1139 | + return err; |
---|
1140 | 1140 | } |
---|
1141 | 1141 | |
---|
1142 | | -static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) |
---|
| 1142 | +static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) |
---|
1143 | 1143 | { |
---|
1144 | 1144 | struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); |
---|
1145 | 1145 | |
---|
1146 | 1146 | mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar); |
---|
1147 | | - kfree(context); |
---|
1148 | | - |
---|
1149 | | - return 0; |
---|
1150 | 1147 | } |
---|
1151 | | - |
---|
1152 | | -static void mlx4_ib_vma_open(struct vm_area_struct *area) |
---|
1153 | | -{ |
---|
1154 | | - /* vma_open is called when a new VMA is created on top of our VMA. |
---|
1155 | | - * This is done through either mremap flow or split_vma (usually due |
---|
1156 | | - * to mlock, madvise, munmap, etc.). We do not support a clone of the |
---|
1157 | | - * vma, as this VMA is strongly hardware related. Therefore we set the |
---|
1158 | | - * vm_ops of the newly created/cloned VMA to NULL, to prevent it from |
---|
1159 | | - * calling us again and trying to do incorrect actions. We assume that |
---|
1160 | | - * the original vma size is exactly a single page that there will be no |
---|
1161 | | - * "splitting" operations on. |
---|
1162 | | - */ |
---|
1163 | | - area->vm_ops = NULL; |
---|
1164 | | -} |
---|
1165 | | - |
---|
1166 | | -static void mlx4_ib_vma_close(struct vm_area_struct *area) |
---|
1167 | | -{ |
---|
1168 | | - struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data; |
---|
1169 | | - |
---|
1170 | | - /* It's guaranteed that all VMAs opened on a FD are closed before the |
---|
1171 | | - * file itself is closed, therefore no sync is needed with the regular |
---|
1172 | | - * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync |
---|
1173 | | - * with accessing the vma as part of mlx4_ib_disassociate_ucontext. |
---|
1174 | | - * The close operation is usually called under mm->mmap_sem except when |
---|
1175 | | - * process is exiting. The exiting case is handled explicitly as part |
---|
1176 | | - * of mlx4_ib_disassociate_ucontext. |
---|
1177 | | - */ |
---|
1178 | | - mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *) |
---|
1179 | | - area->vm_private_data; |
---|
1180 | | - |
---|
1181 | | - /* set the vma context pointer to null in the mlx4_ib driver's private |
---|
1182 | | - * data to protect against a race condition in mlx4_ib_dissassociate_ucontext(). |
---|
1183 | | - */ |
---|
1184 | | - mlx4_ib_vma_priv_data->vma = NULL; |
---|
1185 | | -} |
---|
1186 | | - |
---|
1187 | | -static const struct vm_operations_struct mlx4_ib_vm_ops = { |
---|
1188 | | - .open = mlx4_ib_vma_open, |
---|
1189 | | - .close = mlx4_ib_vma_close |
---|
1190 | | -}; |
---|
1191 | 1148 | |
---|
1192 | 1149 | static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) |
---|
1193 | 1150 | { |
---|
1194 | | - int i; |
---|
1195 | | - struct vm_area_struct *vma; |
---|
1196 | | - struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); |
---|
1197 | | - |
---|
1198 | | - /* need to protect from a race on closing the vma as part of |
---|
1199 | | - * mlx4_ib_vma_close(). |
---|
1200 | | - */ |
---|
1201 | | - for (i = 0; i < HW_BAR_COUNT; i++) { |
---|
1202 | | - vma = context->hw_bar_info[i].vma; |
---|
1203 | | - if (!vma) |
---|
1204 | | - continue; |
---|
1205 | | - |
---|
1206 | | - zap_vma_ptes(context->hw_bar_info[i].vma, |
---|
1207 | | - context->hw_bar_info[i].vma->vm_start, PAGE_SIZE); |
---|
1208 | | - |
---|
1209 | | - context->hw_bar_info[i].vma->vm_flags &= |
---|
1210 | | - ~(VM_SHARED | VM_MAYSHARE); |
---|
1211 | | - /* context going to be destroyed, should not access ops any more */ |
---|
1212 | | - context->hw_bar_info[i].vma->vm_ops = NULL; |
---|
1213 | | - } |
---|
1214 | | -} |
---|
1215 | | - |
---|
1216 | | -static void mlx4_ib_set_vma_data(struct vm_area_struct *vma, |
---|
1217 | | - struct mlx4_ib_vma_private_data *vma_private_data) |
---|
1218 | | -{ |
---|
1219 | | - vma_private_data->vma = vma; |
---|
1220 | | - vma->vm_private_data = vma_private_data; |
---|
1221 | | - vma->vm_ops = &mlx4_ib_vm_ops; |
---|
1222 | 1151 | } |
---|
1223 | 1152 | |
---|
1224 | 1153 | static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) |
---|
1225 | 1154 | { |
---|
1226 | 1155 | struct mlx4_ib_dev *dev = to_mdev(context->device); |
---|
1227 | | - struct mlx4_ib_ucontext *mucontext = to_mucontext(context); |
---|
1228 | 1156 | |
---|
1229 | | - if (vma->vm_end - vma->vm_start != PAGE_SIZE) |
---|
1230 | | - return -EINVAL; |
---|
| 1157 | + switch (vma->vm_pgoff) { |
---|
| 1158 | + case 0: |
---|
| 1159 | + return rdma_user_mmap_io(context, vma, |
---|
| 1160 | + to_mucontext(context)->uar.pfn, |
---|
| 1161 | + PAGE_SIZE, |
---|
| 1162 | + pgprot_noncached(vma->vm_page_prot), |
---|
| 1163 | + NULL); |
---|
1231 | 1164 | |
---|
1232 | | - if (vma->vm_pgoff == 0) { |
---|
1233 | | - /* We prevent double mmaping on same context */ |
---|
1234 | | - if (mucontext->hw_bar_info[HW_BAR_DB].vma) |
---|
| 1165 | + case 1: |
---|
| 1166 | + if (dev->dev->caps.bf_reg_size == 0) |
---|
1235 | 1167 | return -EINVAL; |
---|
| 1168 | + return rdma_user_mmap_io( |
---|
| 1169 | + context, vma, |
---|
| 1170 | + to_mucontext(context)->uar.pfn + |
---|
| 1171 | + dev->dev->caps.num_uars, |
---|
| 1172 | + PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot), |
---|
| 1173 | + NULL); |
---|
1236 | 1174 | |
---|
1237 | | - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
---|
1238 | | - |
---|
1239 | | - if (io_remap_pfn_range(vma, vma->vm_start, |
---|
1240 | | - to_mucontext(context)->uar.pfn, |
---|
1241 | | - PAGE_SIZE, vma->vm_page_prot)) |
---|
1242 | | - return -EAGAIN; |
---|
1243 | | - |
---|
1244 | | - mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]); |
---|
1245 | | - |
---|
1246 | | - } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { |
---|
1247 | | - /* We prevent double mmaping on same context */ |
---|
1248 | | - if (mucontext->hw_bar_info[HW_BAR_BF].vma) |
---|
1249 | | - return -EINVAL; |
---|
1250 | | - |
---|
1251 | | - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
---|
1252 | | - |
---|
1253 | | - if (io_remap_pfn_range(vma, vma->vm_start, |
---|
1254 | | - to_mucontext(context)->uar.pfn + |
---|
1255 | | - dev->dev->caps.num_uars, |
---|
1256 | | - PAGE_SIZE, vma->vm_page_prot)) |
---|
1257 | | - return -EAGAIN; |
---|
1258 | | - |
---|
1259 | | - mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]); |
---|
1260 | | - |
---|
1261 | | - } else if (vma->vm_pgoff == 3) { |
---|
| 1175 | + case 3: { |
---|
1262 | 1176 | struct mlx4_clock_params params; |
---|
1263 | 1177 | int ret; |
---|
1264 | 1178 | |
---|
1265 | | - /* We prevent double mmaping on same context */ |
---|
1266 | | - if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma) |
---|
1267 | | - return -EINVAL; |
---|
1268 | | - |
---|
1269 | 1179 | ret = mlx4_get_internal_clock_params(dev->dev, ¶ms); |
---|
1270 | | - |
---|
1271 | 1180 | if (ret) |
---|
1272 | 1181 | return ret; |
---|
1273 | 1182 | |
---|
1274 | | - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
---|
1275 | | - if (io_remap_pfn_range(vma, vma->vm_start, |
---|
1276 | | - (pci_resource_start(dev->dev->persist->pdev, |
---|
1277 | | - params.bar) + |
---|
1278 | | - params.offset) |
---|
1279 | | - >> PAGE_SHIFT, |
---|
1280 | | - PAGE_SIZE, vma->vm_page_prot)) |
---|
1281 | | - return -EAGAIN; |
---|
| 1183 | + return rdma_user_mmap_io( |
---|
| 1184 | + context, vma, |
---|
| 1185 | + (pci_resource_start(dev->dev->persist->pdev, |
---|
| 1186 | + params.bar) + |
---|
| 1187 | + params.offset) >> |
---|
| 1188 | + PAGE_SHIFT, |
---|
| 1189 | + PAGE_SIZE, pgprot_noncached(vma->vm_page_prot), |
---|
| 1190 | + NULL); |
---|
| 1191 | + } |
---|
1282 | 1192 | |
---|
1283 | | - mlx4_ib_set_vma_data(vma, |
---|
1284 | | - &mucontext->hw_bar_info[HW_BAR_CLOCK]); |
---|
1285 | | - } else { |
---|
| 1193 | + default: |
---|
1286 | 1194 | return -EINVAL; |
---|
1287 | 1195 | } |
---|
1288 | | - |
---|
1289 | | - return 0; |
---|
1290 | 1196 | } |
---|
1291 | 1197 | |
---|
1292 | | -static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev, |
---|
1293 | | - struct ib_ucontext *context, |
---|
1294 | | - struct ib_udata *udata) |
---|
| 1198 | +static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
---|
1295 | 1199 | { |
---|
1296 | | - struct mlx4_ib_pd *pd; |
---|
| 1200 | + struct mlx4_ib_pd *pd = to_mpd(ibpd); |
---|
| 1201 | + struct ib_device *ibdev = ibpd->device; |
---|
1297 | 1202 | int err; |
---|
1298 | 1203 | |
---|
1299 | | - pd = kzalloc(sizeof(*pd), GFP_KERNEL); |
---|
1300 | | - if (!pd) |
---|
1301 | | - return ERR_PTR(-ENOMEM); |
---|
1302 | | - |
---|
1303 | 1204 | err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); |
---|
1304 | | - if (err) { |
---|
1305 | | - kfree(pd); |
---|
1306 | | - return ERR_PTR(err); |
---|
| 1205 | + if (err) |
---|
| 1206 | + return err; |
---|
| 1207 | + |
---|
| 1208 | + if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { |
---|
| 1209 | + mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); |
---|
| 1210 | + return -EFAULT; |
---|
1307 | 1211 | } |
---|
1308 | | - |
---|
1309 | | - if (context) |
---|
1310 | | - if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) { |
---|
1311 | | - mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); |
---|
1312 | | - kfree(pd); |
---|
1313 | | - return ERR_PTR(-EFAULT); |
---|
1314 | | - } |
---|
1315 | | - return &pd->ibpd; |
---|
1316 | | -} |
---|
1317 | | - |
---|
1318 | | -static int mlx4_ib_dealloc_pd(struct ib_pd *pd) |
---|
1319 | | -{ |
---|
1320 | | - mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); |
---|
1321 | | - kfree(pd); |
---|
1322 | | - |
---|
1323 | 1212 | return 0; |
---|
1324 | 1213 | } |
---|
1325 | 1214 | |
---|
1326 | | -static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, |
---|
1327 | | - struct ib_ucontext *context, |
---|
1328 | | - struct ib_udata *udata) |
---|
| 1215 | +static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) |
---|
1329 | 1216 | { |
---|
1330 | | - struct mlx4_ib_xrcd *xrcd; |
---|
| 1217 | + mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); |
---|
| 1218 | + return 0; |
---|
| 1219 | +} |
---|
| 1220 | + |
---|
| 1221 | +static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) |
---|
| 1222 | +{ |
---|
| 1223 | + struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device); |
---|
| 1224 | + struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd); |
---|
1331 | 1225 | struct ib_cq_init_attr cq_attr = {}; |
---|
1332 | 1226 | int err; |
---|
1333 | 1227 | |
---|
1334 | | - if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) |
---|
1335 | | - return ERR_PTR(-ENOSYS); |
---|
| 1228 | + if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) |
---|
| 1229 | + return -EOPNOTSUPP; |
---|
1336 | 1230 | |
---|
1337 | | - xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL); |
---|
1338 | | - if (!xrcd) |
---|
1339 | | - return ERR_PTR(-ENOMEM); |
---|
1340 | | - |
---|
1341 | | - err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn); |
---|
| 1231 | + err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn); |
---|
1342 | 1232 | if (err) |
---|
1343 | | - goto err1; |
---|
| 1233 | + return err; |
---|
1344 | 1234 | |
---|
1345 | | - xrcd->pd = ib_alloc_pd(ibdev, 0); |
---|
| 1235 | + xrcd->pd = ib_alloc_pd(ibxrcd->device, 0); |
---|
1346 | 1236 | if (IS_ERR(xrcd->pd)) { |
---|
1347 | 1237 | err = PTR_ERR(xrcd->pd); |
---|
1348 | 1238 | goto err2; |
---|
1349 | 1239 | } |
---|
1350 | 1240 | |
---|
1351 | 1241 | cq_attr.cqe = 1; |
---|
1352 | | - xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); |
---|
| 1242 | + xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr); |
---|
1353 | 1243 | if (IS_ERR(xrcd->cq)) { |
---|
1354 | 1244 | err = PTR_ERR(xrcd->cq); |
---|
1355 | 1245 | goto err3; |
---|
1356 | 1246 | } |
---|
1357 | 1247 | |
---|
1358 | | - return &xrcd->ibxrcd; |
---|
| 1248 | + return 0; |
---|
1359 | 1249 | |
---|
1360 | 1250 | err3: |
---|
1361 | 1251 | ib_dealloc_pd(xrcd->pd); |
---|
1362 | 1252 | err2: |
---|
1363 | | - mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn); |
---|
1364 | | -err1: |
---|
1365 | | - kfree(xrcd); |
---|
1366 | | - return ERR_PTR(err); |
---|
| 1253 | + mlx4_xrcd_free(dev->dev, xrcd->xrcdn); |
---|
| 1254 | + return err; |
---|
1367 | 1255 | } |
---|
1368 | 1256 | |
---|
1369 | | -static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd) |
---|
| 1257 | +static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) |
---|
1370 | 1258 | { |
---|
1371 | 1259 | ib_destroy_cq(to_mxrcd(xrcd)->cq); |
---|
1372 | 1260 | ib_dealloc_pd(to_mxrcd(xrcd)->pd); |
---|
1373 | 1261 | mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn); |
---|
1374 | | - kfree(xrcd); |
---|
1375 | | - |
---|
1376 | 1262 | return 0; |
---|
1377 | 1263 | } |
---|
1378 | 1264 | |
---|
.. | .. |
---|
1646 | 1532 | struct mlx4_net_trans_rule_hw_ctrl *ctrl; |
---|
1647 | 1533 | int default_flow; |
---|
1648 | 1534 | |
---|
1649 | | - static const u16 __mlx4_domain[] = { |
---|
1650 | | - [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS, |
---|
1651 | | - [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL, |
---|
1652 | | - [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS, |
---|
1653 | | - [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC, |
---|
1654 | | - }; |
---|
1655 | | - |
---|
1656 | 1535 | if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) { |
---|
1657 | 1536 | pr_err("Invalid priority value %d\n", flow_attr->priority); |
---|
1658 | | - return -EINVAL; |
---|
1659 | | - } |
---|
1660 | | - |
---|
1661 | | - if (domain >= IB_FLOW_DOMAIN_NUM) { |
---|
1662 | | - pr_err("Invalid domain value %d\n", domain); |
---|
1663 | 1537 | return -EINVAL; |
---|
1664 | 1538 | } |
---|
1665 | 1539 | |
---|
.. | .. |
---|
1671 | 1545 | return PTR_ERR(mailbox); |
---|
1672 | 1546 | ctrl = mailbox->buf; |
---|
1673 | 1547 | |
---|
1674 | | - ctrl->prio = cpu_to_be16(__mlx4_domain[domain] | |
---|
1675 | | - flow_attr->priority); |
---|
| 1548 | + ctrl->prio = cpu_to_be16(domain | flow_attr->priority); |
---|
1676 | 1549 | ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type); |
---|
1677 | 1550 | ctrl->port = flow_attr->port; |
---|
1678 | 1551 | ctrl->qpn = cpu_to_be32(qp->qp_num); |
---|
.. | .. |
---|
1814 | 1687 | } |
---|
1815 | 1688 | |
---|
1816 | 1689 | static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, |
---|
1817 | | - struct ib_flow_attr *flow_attr, |
---|
1818 | | - int domain, struct ib_udata *udata) |
---|
| 1690 | + struct ib_flow_attr *flow_attr, |
---|
| 1691 | + struct ib_udata *udata) |
---|
1819 | 1692 | { |
---|
1820 | 1693 | int err = 0, i = 0, j = 0; |
---|
1821 | 1694 | struct mlx4_ib_flow *mflow; |
---|
.. | .. |
---|
1881 | 1754 | } |
---|
1882 | 1755 | |
---|
1883 | 1756 | while (i < ARRAY_SIZE(type) && type[i]) { |
---|
1884 | | - err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], |
---|
1885 | | - &mflow->reg_id[i].id); |
---|
| 1757 | + err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS, |
---|
| 1758 | + type[i], &mflow->reg_id[i].id); |
---|
1886 | 1759 | if (err) |
---|
1887 | 1760 | goto err_create_flow; |
---|
1888 | 1761 | if (is_bonded) { |
---|
.. | .. |
---|
1891 | 1764 | */ |
---|
1892 | 1765 | flow_attr->port = 2; |
---|
1893 | 1766 | err = __mlx4_ib_create_flow(qp, flow_attr, |
---|
1894 | | - domain, type[j], |
---|
| 1767 | + MLX4_DOMAIN_UVERBS, type[j], |
---|
1895 | 1768 | &mflow->reg_id[j].mirror); |
---|
1896 | 1769 | flow_attr->port = 1; |
---|
1897 | 1770 | if (err) |
---|
.. | .. |
---|
2143 | 2016 | return err; |
---|
2144 | 2017 | } |
---|
2145 | 2018 | |
---|
2146 | | -static ssize_t show_hca(struct device *device, struct device_attribute *attr, |
---|
2147 | | - char *buf) |
---|
| 2019 | +static ssize_t hca_type_show(struct device *device, |
---|
| 2020 | + struct device_attribute *attr, char *buf) |
---|
2148 | 2021 | { |
---|
2149 | 2022 | struct mlx4_ib_dev *dev = |
---|
2150 | | - container_of(device, struct mlx4_ib_dev, ib_dev.dev); |
---|
| 2023 | + rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); |
---|
2151 | 2024 | return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device); |
---|
2152 | 2025 | } |
---|
| 2026 | +static DEVICE_ATTR_RO(hca_type); |
---|
2153 | 2027 | |
---|
2154 | | -static ssize_t show_rev(struct device *device, struct device_attribute *attr, |
---|
2155 | | - char *buf) |
---|
| 2028 | +static ssize_t hw_rev_show(struct device *device, |
---|
| 2029 | + struct device_attribute *attr, char *buf) |
---|
2156 | 2030 | { |
---|
2157 | 2031 | struct mlx4_ib_dev *dev = |
---|
2158 | | - container_of(device, struct mlx4_ib_dev, ib_dev.dev); |
---|
| 2032 | + rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); |
---|
2159 | 2033 | return sprintf(buf, "%x\n", dev->dev->rev_id); |
---|
2160 | 2034 | } |
---|
| 2035 | +static DEVICE_ATTR_RO(hw_rev); |
---|
2161 | 2036 | |
---|
2162 | | -static ssize_t show_board(struct device *device, struct device_attribute *attr, |
---|
2163 | | - char *buf) |
---|
| 2037 | +static ssize_t board_id_show(struct device *device, |
---|
| 2038 | + struct device_attribute *attr, char *buf) |
---|
2164 | 2039 | { |
---|
2165 | 2040 | struct mlx4_ib_dev *dev = |
---|
2166 | | - container_of(device, struct mlx4_ib_dev, ib_dev.dev); |
---|
| 2041 | + rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); |
---|
| 2042 | + |
---|
2167 | 2043 | return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN, |
---|
2168 | 2044 | dev->dev->board_id); |
---|
2169 | 2045 | } |
---|
| 2046 | +static DEVICE_ATTR_RO(board_id); |
---|
2170 | 2047 | |
---|
2171 | | -static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); |
---|
2172 | | -static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); |
---|
2173 | | -static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); |
---|
| 2048 | +static struct attribute *mlx4_class_attributes[] = { |
---|
| 2049 | + &dev_attr_hw_rev.attr, |
---|
| 2050 | + &dev_attr_hca_type.attr, |
---|
| 2051 | + &dev_attr_board_id.attr, |
---|
| 2052 | + NULL |
---|
| 2053 | +}; |
---|
2174 | 2054 | |
---|
2175 | | -static struct device_attribute *mlx4_class_attributes[] = { |
---|
2176 | | - &dev_attr_hw_rev, |
---|
2177 | | - &dev_attr_hca_type, |
---|
2178 | | - &dev_attr_board_id |
---|
| 2055 | +static const struct attribute_group mlx4_attr_group = { |
---|
| 2056 | + .attrs = mlx4_class_attributes, |
---|
2179 | 2057 | }; |
---|
2180 | 2058 | |
---|
2181 | 2059 | struct diag_counter { |
---|
.. | .. |
---|
2320 | 2198 | } |
---|
2321 | 2199 | } |
---|
2322 | 2200 | |
---|
| 2201 | +static const struct ib_device_ops mlx4_ib_hw_stats_ops = { |
---|
| 2202 | + .alloc_hw_stats = mlx4_ib_alloc_hw_stats, |
---|
| 2203 | + .get_hw_stats = mlx4_ib_get_hw_stats, |
---|
| 2204 | +}; |
---|
| 2205 | + |
---|
2323 | 2206 | static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) |
---|
2324 | 2207 | { |
---|
2325 | 2208 | struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; |
---|
.. | .. |
---|
2346 | 2229 | diag[i].offset, i); |
---|
2347 | 2230 | } |
---|
2348 | 2231 | |
---|
2349 | | - ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats; |
---|
2350 | | - ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats; |
---|
| 2232 | + ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops); |
---|
2351 | 2233 | |
---|
2352 | 2234 | return 0; |
---|
2353 | 2235 | |
---|
.. | .. |
---|
2451 | 2333 | (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || |
---|
2452 | 2334 | event == NETDEV_UP || event == NETDEV_CHANGE)) |
---|
2453 | 2335 | update_qps_port = port; |
---|
| 2336 | + |
---|
| 2337 | + if (dev == iboe->netdevs[port - 1] && |
---|
| 2338 | + (event == NETDEV_UP || event == NETDEV_DOWN)) { |
---|
| 2339 | + enum ib_port_state port_state; |
---|
| 2340 | + struct ib_event ibev = { }; |
---|
| 2341 | + |
---|
| 2342 | + if (ib_get_cached_port_state(&ibdev->ib_dev, port, |
---|
| 2343 | + &port_state)) |
---|
| 2344 | + continue; |
---|
| 2345 | + |
---|
| 2346 | + if (event == NETDEV_UP && |
---|
| 2347 | + (port_state != IB_PORT_ACTIVE || |
---|
| 2348 | + iboe->last_port_state[port - 1] != IB_PORT_DOWN)) |
---|
| 2349 | + continue; |
---|
| 2350 | + if (event == NETDEV_DOWN && |
---|
| 2351 | + (port_state != IB_PORT_DOWN || |
---|
| 2352 | + iboe->last_port_state[port - 1] != IB_PORT_ACTIVE)) |
---|
| 2353 | + continue; |
---|
| 2354 | + iboe->last_port_state[port - 1] = port_state; |
---|
| 2355 | + |
---|
| 2356 | + ibev.device = &ibdev->ib_dev; |
---|
| 2357 | + ibev.element.port_num = port; |
---|
| 2358 | + ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE : |
---|
| 2359 | + IB_EVENT_PORT_ERR; |
---|
| 2360 | + ib_dispatch_event(&ibev); |
---|
| 2361 | + } |
---|
2454 | 2362 | |
---|
2455 | 2363 | } |
---|
2456 | 2364 | spin_unlock_bh(&iboe->lock); |
---|
.. | .. |
---|
2599 | 2507 | (int) dev->dev->caps.fw_ver & 0xffff); |
---|
2600 | 2508 | } |
---|
2601 | 2509 | |
---|
| 2510 | +static const struct ib_device_ops mlx4_ib_dev_ops = { |
---|
| 2511 | + .owner = THIS_MODULE, |
---|
| 2512 | + .driver_id = RDMA_DRIVER_MLX4, |
---|
| 2513 | + .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION, |
---|
| 2514 | + |
---|
| 2515 | + .add_gid = mlx4_ib_add_gid, |
---|
| 2516 | + .alloc_mr = mlx4_ib_alloc_mr, |
---|
| 2517 | + .alloc_pd = mlx4_ib_alloc_pd, |
---|
| 2518 | + .alloc_ucontext = mlx4_ib_alloc_ucontext, |
---|
| 2519 | + .attach_mcast = mlx4_ib_mcg_attach, |
---|
| 2520 | + .create_ah = mlx4_ib_create_ah, |
---|
| 2521 | + .create_cq = mlx4_ib_create_cq, |
---|
| 2522 | + .create_qp = mlx4_ib_create_qp, |
---|
| 2523 | + .create_srq = mlx4_ib_create_srq, |
---|
| 2524 | + .dealloc_pd = mlx4_ib_dealloc_pd, |
---|
| 2525 | + .dealloc_ucontext = mlx4_ib_dealloc_ucontext, |
---|
| 2526 | + .del_gid = mlx4_ib_del_gid, |
---|
| 2527 | + .dereg_mr = mlx4_ib_dereg_mr, |
---|
| 2528 | + .destroy_ah = mlx4_ib_destroy_ah, |
---|
| 2529 | + .destroy_cq = mlx4_ib_destroy_cq, |
---|
| 2530 | + .destroy_qp = mlx4_ib_destroy_qp, |
---|
| 2531 | + .destroy_srq = mlx4_ib_destroy_srq, |
---|
| 2532 | + .detach_mcast = mlx4_ib_mcg_detach, |
---|
| 2533 | + .disassociate_ucontext = mlx4_ib_disassociate_ucontext, |
---|
| 2534 | + .drain_rq = mlx4_ib_drain_rq, |
---|
| 2535 | + .drain_sq = mlx4_ib_drain_sq, |
---|
| 2536 | + .get_dev_fw_str = get_fw_ver_str, |
---|
| 2537 | + .get_dma_mr = mlx4_ib_get_dma_mr, |
---|
| 2538 | + .get_link_layer = mlx4_ib_port_link_layer, |
---|
| 2539 | + .get_netdev = mlx4_ib_get_netdev, |
---|
| 2540 | + .get_port_immutable = mlx4_port_immutable, |
---|
| 2541 | + .map_mr_sg = mlx4_ib_map_mr_sg, |
---|
| 2542 | + .mmap = mlx4_ib_mmap, |
---|
| 2543 | + .modify_cq = mlx4_ib_modify_cq, |
---|
| 2544 | + .modify_device = mlx4_ib_modify_device, |
---|
| 2545 | + .modify_port = mlx4_ib_modify_port, |
---|
| 2546 | + .modify_qp = mlx4_ib_modify_qp, |
---|
| 2547 | + .modify_srq = mlx4_ib_modify_srq, |
---|
| 2548 | + .poll_cq = mlx4_ib_poll_cq, |
---|
| 2549 | + .post_recv = mlx4_ib_post_recv, |
---|
| 2550 | + .post_send = mlx4_ib_post_send, |
---|
| 2551 | + .post_srq_recv = mlx4_ib_post_srq_recv, |
---|
| 2552 | + .process_mad = mlx4_ib_process_mad, |
---|
| 2553 | + .query_ah = mlx4_ib_query_ah, |
---|
| 2554 | + .query_device = mlx4_ib_query_device, |
---|
| 2555 | + .query_gid = mlx4_ib_query_gid, |
---|
| 2556 | + .query_pkey = mlx4_ib_query_pkey, |
---|
| 2557 | + .query_port = mlx4_ib_query_port, |
---|
| 2558 | + .query_qp = mlx4_ib_query_qp, |
---|
| 2559 | + .query_srq = mlx4_ib_query_srq, |
---|
| 2560 | + .reg_user_mr = mlx4_ib_reg_user_mr, |
---|
| 2561 | + .req_notify_cq = mlx4_ib_arm_cq, |
---|
| 2562 | + .rereg_user_mr = mlx4_ib_rereg_user_mr, |
---|
| 2563 | + .resize_cq = mlx4_ib_resize_cq, |
---|
| 2564 | + |
---|
| 2565 | + INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah), |
---|
| 2566 | + INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq), |
---|
| 2567 | + INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd), |
---|
| 2568 | + INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq), |
---|
| 2569 | + INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext), |
---|
| 2570 | +}; |
---|
| 2571 | + |
---|
| 2572 | +static const struct ib_device_ops mlx4_ib_dev_wq_ops = { |
---|
| 2573 | + .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table, |
---|
| 2574 | + .create_wq = mlx4_ib_create_wq, |
---|
| 2575 | + .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table, |
---|
| 2576 | + .destroy_wq = mlx4_ib_destroy_wq, |
---|
| 2577 | + .modify_wq = mlx4_ib_modify_wq, |
---|
| 2578 | + |
---|
| 2579 | + INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table, |
---|
| 2580 | + ib_rwq_ind_tbl), |
---|
| 2581 | +}; |
---|
| 2582 | + |
---|
| 2583 | +static const struct ib_device_ops mlx4_ib_dev_mw_ops = { |
---|
| 2584 | + .alloc_mw = mlx4_ib_alloc_mw, |
---|
| 2585 | + .dealloc_mw = mlx4_ib_dealloc_mw, |
---|
| 2586 | + |
---|
| 2587 | + INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw), |
---|
| 2588 | +}; |
---|
| 2589 | + |
---|
| 2590 | +static const struct ib_device_ops mlx4_ib_dev_xrc_ops = { |
---|
| 2591 | + .alloc_xrcd = mlx4_ib_alloc_xrcd, |
---|
| 2592 | + .dealloc_xrcd = mlx4_ib_dealloc_xrcd, |
---|
| 2593 | + |
---|
| 2594 | + INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd), |
---|
| 2595 | +}; |
---|
| 2596 | + |
---|
| 2597 | +static const struct ib_device_ops mlx4_ib_dev_fs_ops = { |
---|
| 2598 | + .create_flow = mlx4_ib_create_flow, |
---|
| 2599 | + .destroy_flow = mlx4_ib_destroy_flow, |
---|
| 2600 | +}; |
---|
| 2601 | + |
---|
2602 | 2602 | static void *mlx4_ib_add(struct mlx4_dev *dev) |
---|
2603 | 2603 | { |
---|
2604 | 2604 | struct mlx4_ib_dev *ibdev; |
---|
.. | .. |
---|
2622 | 2622 | if (num_ports == 0) |
---|
2623 | 2623 | return NULL; |
---|
2624 | 2624 | |
---|
2625 | | - ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); |
---|
| 2625 | + ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev); |
---|
2626 | 2626 | if (!ibdev) { |
---|
2627 | 2627 | dev_err(&dev->persist->pdev->dev, |
---|
2628 | 2628 | "Device struct alloc failed\n"); |
---|
.. | .. |
---|
2646 | 2646 | ibdev->dev = dev; |
---|
2647 | 2647 | ibdev->bond_next_port = 0; |
---|
2648 | 2648 | |
---|
2649 | | - strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); |
---|
2650 | | - ibdev->ib_dev.owner = THIS_MODULE; |
---|
2651 | 2649 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; |
---|
2652 | 2650 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; |
---|
2653 | 2651 | ibdev->num_ports = num_ports; |
---|
.. | .. |
---|
2655 | 2653 | 1 : ibdev->num_ports; |
---|
2656 | 2654 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; |
---|
2657 | 2655 | ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; |
---|
2658 | | - ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev; |
---|
2659 | | - ibdev->ib_dev.add_gid = mlx4_ib_add_gid; |
---|
2660 | | - ibdev->ib_dev.del_gid = mlx4_ib_del_gid; |
---|
2661 | | - |
---|
2662 | | - if (dev->caps.userspace_caps) |
---|
2663 | | - ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; |
---|
2664 | | - else |
---|
2665 | | - ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION; |
---|
2666 | 2656 | |
---|
2667 | 2657 | ibdev->ib_dev.uverbs_cmd_mask = |
---|
2668 | 2658 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | |
---|
.. | .. |
---|
2690 | 2680 | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | |
---|
2691 | 2681 | (1ull << IB_USER_VERBS_CMD_OPEN_QP); |
---|
2692 | 2682 | |
---|
2693 | | - ibdev->ib_dev.query_device = mlx4_ib_query_device; |
---|
2694 | | - ibdev->ib_dev.query_port = mlx4_ib_query_port; |
---|
2695 | | - ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer; |
---|
2696 | | - ibdev->ib_dev.query_gid = mlx4_ib_query_gid; |
---|
2697 | | - ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; |
---|
2698 | | - ibdev->ib_dev.modify_device = mlx4_ib_modify_device; |
---|
2699 | | - ibdev->ib_dev.modify_port = mlx4_ib_modify_port; |
---|
2700 | | - ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext; |
---|
2701 | | - ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext; |
---|
2702 | | - ibdev->ib_dev.mmap = mlx4_ib_mmap; |
---|
2703 | | - ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd; |
---|
2704 | | - ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd; |
---|
2705 | | - ibdev->ib_dev.create_ah = mlx4_ib_create_ah; |
---|
2706 | | - ibdev->ib_dev.query_ah = mlx4_ib_query_ah; |
---|
2707 | | - ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah; |
---|
2708 | | - ibdev->ib_dev.create_srq = mlx4_ib_create_srq; |
---|
2709 | | - ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq; |
---|
2710 | | - ibdev->ib_dev.query_srq = mlx4_ib_query_srq; |
---|
2711 | | - ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq; |
---|
2712 | | - ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; |
---|
2713 | | - ibdev->ib_dev.create_qp = mlx4_ib_create_qp; |
---|
2714 | | - ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; |
---|
2715 | | - ibdev->ib_dev.query_qp = mlx4_ib_query_qp; |
---|
2716 | | - ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; |
---|
2717 | | - ibdev->ib_dev.drain_sq = mlx4_ib_drain_sq; |
---|
2718 | | - ibdev->ib_dev.drain_rq = mlx4_ib_drain_rq; |
---|
2719 | | - ibdev->ib_dev.post_send = mlx4_ib_post_send; |
---|
2720 | | - ibdev->ib_dev.post_recv = mlx4_ib_post_recv; |
---|
2721 | | - ibdev->ib_dev.create_cq = mlx4_ib_create_cq; |
---|
2722 | | - ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq; |
---|
2723 | | - ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq; |
---|
2724 | | - ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq; |
---|
2725 | | - ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq; |
---|
2726 | | - ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; |
---|
2727 | | - ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; |
---|
2728 | | - ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; |
---|
2729 | | - ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr; |
---|
2730 | | - ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; |
---|
2731 | | - ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr; |
---|
2732 | | - ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg; |
---|
2733 | | - ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; |
---|
2734 | | - ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; |
---|
2735 | | - ibdev->ib_dev.process_mad = mlx4_ib_process_mad; |
---|
2736 | | - ibdev->ib_dev.get_port_immutable = mlx4_port_immutable; |
---|
2737 | | - ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str; |
---|
2738 | | - ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext; |
---|
2739 | | - |
---|
| 2683 | + ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops); |
---|
2740 | 2684 | ibdev->ib_dev.uverbs_ex_cmd_mask |= |
---|
2741 | | - (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ); |
---|
| 2685 | + (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | |
---|
| 2686 | + (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); |
---|
2742 | 2687 | |
---|
2743 | 2688 | if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) && |
---|
2744 | 2689 | ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) == |
---|
2745 | 2690 | IB_LINK_LAYER_ETHERNET) || |
---|
2746 | 2691 | (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) == |
---|
2747 | | - IB_LINK_LAYER_ETHERNET))) { |
---|
2748 | | - ibdev->ib_dev.create_wq = mlx4_ib_create_wq; |
---|
2749 | | - ibdev->ib_dev.modify_wq = mlx4_ib_modify_wq; |
---|
2750 | | - ibdev->ib_dev.destroy_wq = mlx4_ib_destroy_wq; |
---|
2751 | | - ibdev->ib_dev.create_rwq_ind_table = |
---|
2752 | | - mlx4_ib_create_rwq_ind_table; |
---|
2753 | | - ibdev->ib_dev.destroy_rwq_ind_table = |
---|
2754 | | - mlx4_ib_destroy_rwq_ind_table; |
---|
2755 | | - ibdev->ib_dev.uverbs_ex_cmd_mask |= |
---|
2756 | | - (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | |
---|
2757 | | - (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | |
---|
2758 | | - (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | |
---|
2759 | | - (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | |
---|
2760 | | - (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); |
---|
2761 | | - } |
---|
2762 | | - |
---|
2763 | | - if (!mlx4_is_slave(ibdev->dev)) { |
---|
2764 | | - ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; |
---|
2765 | | - ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr; |
---|
2766 | | - ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; |
---|
2767 | | - ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; |
---|
2768 | | - } |
---|
| 2692 | + IB_LINK_LAYER_ETHERNET))) |
---|
| 2693 | + ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops); |
---|
2769 | 2694 | |
---|
2770 | 2695 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || |
---|
2771 | 2696 | dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { |
---|
2772 | | - ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw; |
---|
2773 | | - ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw; |
---|
2774 | | - |
---|
2775 | 2697 | ibdev->ib_dev.uverbs_cmd_mask |= |
---|
2776 | 2698 | (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | |
---|
2777 | 2699 | (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); |
---|
| 2700 | + ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops); |
---|
2778 | 2701 | } |
---|
2779 | 2702 | |
---|
2780 | 2703 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { |
---|
2781 | | - ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd; |
---|
2782 | | - ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd; |
---|
2783 | 2704 | ibdev->ib_dev.uverbs_cmd_mask |= |
---|
2784 | 2705 | (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | |
---|
2785 | 2706 | (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); |
---|
| 2707 | + ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops); |
---|
2786 | 2708 | } |
---|
2787 | 2709 | |
---|
2788 | 2710 | if (check_flow_steering_support(dev)) { |
---|
2789 | 2711 | ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED; |
---|
2790 | | - ibdev->ib_dev.create_flow = mlx4_ib_create_flow; |
---|
2791 | | - ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; |
---|
2792 | | - |
---|
2793 | | - ibdev->ib_dev.uverbs_ex_cmd_mask |= |
---|
2794 | | - (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | |
---|
2795 | | - (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); |
---|
| 2712 | + ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops); |
---|
2796 | 2713 | } |
---|
2797 | 2714 | |
---|
2798 | | - ibdev->ib_dev.uverbs_ex_cmd_mask |= |
---|
2799 | | - (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | |
---|
2800 | | - (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | |
---|
2801 | | - (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); |
---|
| 2715 | + if (!dev->caps.userspace_caps) |
---|
| 2716 | + ibdev->ib_dev.ops.uverbs_abi_ver = |
---|
| 2717 | + MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION; |
---|
2802 | 2718 | |
---|
2803 | 2719 | mlx4_ib_alloc_eqs(dev, ibdev); |
---|
2804 | 2720 | |
---|
.. | .. |
---|
2811 | 2727 | for (i = 0; i < ibdev->num_ports; ++i) { |
---|
2812 | 2728 | mutex_init(&ibdev->counters_table[i].mutex); |
---|
2813 | 2729 | INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list); |
---|
| 2730 | + iboe->last_port_state[i] = IB_PORT_DOWN; |
---|
2814 | 2731 | } |
---|
2815 | 2732 | |
---|
2816 | 2733 | num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; |
---|
.. | .. |
---|
2908 | 2825 | if (mlx4_ib_alloc_diag_counters(ibdev)) |
---|
2909 | 2826 | goto err_steer_free_bitmap; |
---|
2910 | 2827 | |
---|
2911 | | - ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4; |
---|
2912 | | - if (ib_register_device(&ibdev->ib_dev, NULL)) |
---|
| 2828 | + rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group); |
---|
| 2829 | + if (ib_register_device(&ibdev->ib_dev, "mlx4_%d", |
---|
| 2830 | + &dev->persist->pdev->dev)) |
---|
2913 | 2831 | goto err_diag_counters; |
---|
2914 | 2832 | |
---|
2915 | 2833 | if (mlx4_ib_mad_init(ibdev)) |
---|
.. | .. |
---|
2929 | 2847 | if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) { |
---|
2930 | 2848 | err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT); |
---|
2931 | 2849 | if (err) |
---|
2932 | | - goto err_notif; |
---|
2933 | | - } |
---|
2934 | | - |
---|
2935 | | - for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { |
---|
2936 | | - if (device_create_file(&ibdev->ib_dev.dev, |
---|
2937 | | - mlx4_class_attributes[j])) |
---|
2938 | 2850 | goto err_notif; |
---|
2939 | 2851 | } |
---|
2940 | 2852 | |
---|
.. | .. |
---|
3057 | 2969 | /* Add an empty rule for IB L2 */ |
---|
3058 | 2970 | memset(&ib_spec->mask, 0, sizeof(ib_spec->mask)); |
---|
3059 | 2971 | |
---|
3060 | | - err = __mlx4_ib_create_flow(&mqp->ibqp, flow, |
---|
3061 | | - IB_FLOW_DOMAIN_NIC, |
---|
3062 | | - MLX4_FS_REGULAR, |
---|
3063 | | - &mqp->reg_id); |
---|
| 2972 | + err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC, |
---|
| 2973 | + MLX4_FS_REGULAR, &mqp->reg_id); |
---|
3064 | 2974 | } else { |
---|
3065 | 2975 | err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); |
---|
3066 | 2976 | } |
---|