forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-09 244b2c5ca8b14627e4a17755e5922221e121c771
kernel/drivers/infiniband/core/sa_query.c
....@@ -40,7 +40,7 @@
4040 #include <linux/slab.h>
4141 #include <linux/dma-mapping.h>
4242 #include <linux/kref.h>
43
-#include <linux/idr.h>
43
+#include <linux/xarray.h>
4444 #include <linux/workqueue.h>
4545 #include <uapi/linux/if_ether.h>
4646 #include <rdma/ib_pack.h>
....@@ -101,7 +101,7 @@
101101 struct ib_sa_device {
102102 int start_port, end_port;
103103 struct ib_event_handler event_handler;
104
- struct ib_sa_port port[0];
104
+ struct ib_sa_port port[];
105105 };
106106
107107 struct ib_sa_query {
....@@ -174,7 +174,7 @@
174174 };
175175
176176
177
-static void ib_sa_add_one(struct ib_device *device);
177
+static int ib_sa_add_one(struct ib_device *device);
178178 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
179179
180180 static struct ib_client sa_client = {
....@@ -183,15 +183,14 @@
183183 .remove = ib_sa_remove_one
184184 };
185185
186
-static DEFINE_SPINLOCK(idr_lock);
187
-static DEFINE_IDR(query_idr);
186
+static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
188187
189188 static DEFINE_SPINLOCK(tid_lock);
190189 static u32 tid;
191190
192191 #define PATH_REC_FIELD(field) \
193192 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
194
- .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
193
+ .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \
195194 .field_name = "sa_path_rec:" #field
196195
197196 static const struct ib_field path_rec_table[] = {
....@@ -293,7 +292,7 @@
293292 .struct_offset_bytes = \
294293 offsetof(struct sa_path_rec, field), \
295294 .struct_size_bytes = \
296
- sizeof((struct sa_path_rec *)0)->field, \
295
+ sizeof_field(struct sa_path_rec, field), \
297296 .field_name = "sa_path_rec:" #field
298297
299298 static const struct ib_field opa_path_rec_table[] = {
....@@ -421,7 +420,7 @@
421420
422421 #define MCMEMBER_REC_FIELD(field) \
423422 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
424
- .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
423
+ .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \
425424 .field_name = "sa_mcmember_rec:" #field
426425
427426 static const struct ib_field mcmember_rec_table[] = {
....@@ -505,7 +504,7 @@
505504
506505 #define SERVICE_REC_FIELD(field) \
507506 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
508
- .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
507
+ .struct_size_bytes = sizeof_field(struct ib_sa_service_rec, field), \
509508 .field_name = "sa_service_rec:" #field
510509
511510 static const struct ib_field service_rec_table[] = {
....@@ -553,7 +552,7 @@
553552
554553 #define CLASSPORTINFO_REC_FIELD(field) \
555554 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
556
- .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
555
+ .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \
557556 .field_name = "ib_class_port_info:" #field
558557
559558 static const struct ib_field ib_classport_info_rec_table[] = {
....@@ -631,7 +630,7 @@
631630 .struct_offset_bytes =\
632631 offsetof(struct opa_class_port_info, field), \
633632 .struct_size_bytes = \
634
- sizeof((struct opa_class_port_info *)0)->field, \
633
+ sizeof_field(struct opa_class_port_info, field), \
635634 .field_name = "opa_class_port_info:" #field
636635
637636 static const struct ib_field opa_classport_info_rec_table[] = {
....@@ -711,7 +710,7 @@
711710
712711 #define GUIDINFO_REC_FIELD(field) \
713712 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
714
- .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
713
+ .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \
715714 .field_name = "sa_guidinfo_rec:" #field
716715
717716 static const struct ib_field guidinfo_rec_table[] = {
....@@ -761,8 +760,9 @@
761760
762761 /* Construct the family header first */
763762 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
764
- memcpy(header->device_name, query->port->agent->device->name,
765
- LS_DEVICE_NAME_MAX);
763
+ strscpy_pad(header->device_name,
764
+ dev_name(&query->port->agent->device->dev),
765
+ LS_DEVICE_NAME_MAX);
766766 header->port_num = query->port->port_num;
767767
768768 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
....@@ -830,14 +830,20 @@
830830 return len;
831831 }
832832
833
-static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
833
+static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
834834 {
835835 struct sk_buff *skb = NULL;
836836 struct nlmsghdr *nlh;
837837 void *data;
838
- int ret = 0;
839838 struct ib_sa_mad *mad;
840839 int len;
840
+ unsigned long flags;
841
+ unsigned long delay;
842
+ gfp_t gfp_flag;
843
+ int ret;
844
+
845
+ INIT_LIST_HEAD(&query->list);
846
+ query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
841847
842848 mad = query->mad_buf->mad;
843849 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
....@@ -862,44 +868,25 @@
862868 /* Repair the nlmsg header length */
863869 nlmsg_end(skb, nlh);
864870
865
- ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
866
- if (!ret)
867
- ret = len;
868
- else
869
- ret = 0;
871
+ gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
872
+ GFP_NOWAIT;
870873
871
- return ret;
872
-}
873
-
874
-static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
875
-{
876
- unsigned long flags;
877
- unsigned long delay;
878
- int ret;
879
-
880
- INIT_LIST_HEAD(&query->list);
881
- query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
882
-
883
- /* Put the request on the list first.*/
884874 spin_lock_irqsave(&ib_nl_request_lock, flags);
875
+ ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
876
+
877
+ if (ret)
878
+ goto out;
879
+
880
+ /* Put the request on the list.*/
885881 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
886882 query->timeout = delay + jiffies;
887883 list_add_tail(&query->list, &ib_nl_request_list);
888884 /* Start the timeout if this is the only request */
889885 if (ib_nl_request_list.next == &query->list)
890886 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
891
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
892887
893
- ret = ib_nl_send_msg(query, gfp_mask);
894
- if (ret <= 0) {
895
- ret = -EIO;
896
- /* Remove the request */
897
- spin_lock_irqsave(&ib_nl_request_lock, flags);
898
- list_del(&query->list);
899
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
900
- } else {
901
- ret = 0;
902
- }
888
+out:
889
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
903890
904891 return ret;
905892 }
....@@ -1037,8 +1024,8 @@
10371024 !(NETLINK_CB(skb).sk))
10381025 return -EPERM;
10391026
1040
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1041
- nlmsg_len(nlh), ib_nl_policy, NULL);
1027
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1028
+ nlmsg_len(nlh), ib_nl_policy, NULL);
10421029 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
10431030 if (ret || !attr)
10441031 goto settimeout_out;
....@@ -1089,8 +1076,8 @@
10891076 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
10901077 return 0;
10911078
1092
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1093
- nlmsg_len(nlh), ib_nl_policy, NULL);
1079
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1080
+ nlmsg_len(nlh), ib_nl_policy, NULL);
10941081 if (ret)
10951082 return 0;
10961083
....@@ -1156,7 +1143,7 @@
11561143 {
11571144 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
11581145
1159
- rdma_destroy_ah(sm_ah->ah);
1146
+ rdma_destroy_ah(sm_ah->ah, 0);
11601147 kfree(sm_ah);
11611148 }
11621149
....@@ -1189,14 +1176,14 @@
11891176 struct ib_mad_agent *agent;
11901177 struct ib_mad_send_buf *mad_buf;
11911178
1192
- spin_lock_irqsave(&idr_lock, flags);
1193
- if (idr_find(&query_idr, id) != query) {
1194
- spin_unlock_irqrestore(&idr_lock, flags);
1179
+ xa_lock_irqsave(&queries, flags);
1180
+ if (xa_load(&queries, id) != query) {
1181
+ xa_unlock_irqrestore(&queries, flags);
11951182 return;
11961183 }
11971184 agent = query->port->agent;
11981185 mad_buf = query->mad_buf;
1199
- spin_unlock_irqrestore(&idr_lock, flags);
1186
+ xa_unlock_irqrestore(&queries, flags);
12001187
12011188 /*
12021189 * If the query is still on the netlink request list, schedule
....@@ -1225,45 +1212,6 @@
12251212 spin_unlock_irqrestore(&port->ah_lock, flags);
12261213
12271214 return src_path_mask;
1228
-}
1229
-
1230
-static int roce_resolve_route_from_path(struct sa_path_rec *rec,
1231
- const struct ib_gid_attr *attr)
1232
-{
1233
- struct rdma_dev_addr dev_addr = {};
1234
- union {
1235
- struct sockaddr_in _sockaddr_in;
1236
- struct sockaddr_in6 _sockaddr_in6;
1237
- } sgid_addr, dgid_addr;
1238
- int ret;
1239
-
1240
- if (rec->roce.route_resolved)
1241
- return 0;
1242
- if (!attr || !attr->ndev)
1243
- return -EINVAL;
1244
-
1245
- dev_addr.bound_dev_if = attr->ndev->ifindex;
1246
- /* TODO: Use net from the ib_gid_attr once it is added to it,
1247
- * until than, limit itself to init_net.
1248
- */
1249
- dev_addr.net = &init_net;
1250
-
1251
- rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
1252
- rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
1253
-
1254
- /* validate the route */
1255
- ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
1256
- (struct sockaddr *)&dgid_addr, &dev_addr);
1257
- if (ret)
1258
- return ret;
1259
-
1260
- if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1261
- dev_addr.network == RDMA_NETWORK_IPV6) &&
1262
- rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
1263
- return -EINVAL;
1264
-
1265
- rec->roce.route_resolved = true;
1266
- return 0;
12671215 }
12681216
12691217 static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
....@@ -1295,7 +1243,7 @@
12951243 * @port_num: Port on the specified device.
12961244 * @rec: path record entry to use for ah attributes initialization.
12971245 * @ah_attr: address handle attributes to initialization from path record.
1298
- * @sgid_attr: SGID attribute to consider during initialization.
1246
+ * @gid_attr: SGID attribute to consider during initialization.
12991247 *
13001248 * When ib_init_ah_attr_from_path() returns success,
13011249 * (a) for IB link layer it optionally contains a reference to SGID attribute
....@@ -1408,23 +1356,17 @@
14081356 spin_unlock_irqrestore(&tid_lock, flags);
14091357 }
14101358
1411
-static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1359
+static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
1360
+ gfp_t gfp_mask)
14121361 {
1413
- bool preload = gfpflags_allow_blocking(gfp_mask);
14141362 unsigned long flags;
14151363 int ret, id;
14161364
1417
- if (preload)
1418
- idr_preload(gfp_mask);
1419
- spin_lock_irqsave(&idr_lock, flags);
1420
-
1421
- id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1422
-
1423
- spin_unlock_irqrestore(&idr_lock, flags);
1424
- if (preload)
1425
- idr_preload_end();
1426
- if (id < 0)
1427
- return id;
1365
+ xa_lock_irqsave(&queries, flags);
1366
+ ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
1367
+ xa_unlock_irqrestore(&queries, flags);
1368
+ if (ret < 0)
1369
+ return ret;
14281370
14291371 query->mad_buf->timeout_ms = timeout_ms;
14301372 query->mad_buf->context[0] = query;
....@@ -1432,7 +1374,7 @@
14321374
14331375 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
14341376 (!(query->flags & IB_SA_QUERY_OPA))) {
1435
- if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1377
+ if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
14361378 if (!ib_nl_make_request(query, gfp_mask))
14371379 return id;
14381380 }
....@@ -1441,9 +1383,9 @@
14411383
14421384 ret = ib_post_send_mad(query->mad_buf, NULL);
14431385 if (ret) {
1444
- spin_lock_irqsave(&idr_lock, flags);
1445
- idr_remove(&query_idr, id);
1446
- spin_unlock_irqrestore(&idr_lock, flags);
1386
+ xa_lock_irqsave(&queries, flags);
1387
+ __xa_erase(&queries, id);
1388
+ xa_unlock_irqrestore(&queries, flags);
14471389 }
14481390
14491391 /*
....@@ -1467,16 +1409,12 @@
14671409 EXPORT_SYMBOL(ib_sa_pack_path);
14681410
14691411 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1470
- struct ib_device *device,
1412
+ struct ib_sa_device *sa_dev,
14711413 u8 port_num)
14721414 {
1473
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
14741415 struct ib_sa_port *port;
14751416 unsigned long flags;
14761417 bool ret = false;
1477
-
1478
- if (!sa_dev)
1479
- return ret;
14801418
14811419 port = &sa_dev->port[port_num - sa_dev->start_port];
14821420 spin_lock_irqsave(&port->classport_lock, flags);
....@@ -1505,8 +1443,8 @@
15051443 * query is possible.
15061444 */
15071445 static int opa_pr_query_possible(struct ib_sa_client *client,
1508
- struct ib_device *device,
1509
- u8 port_num,
1446
+ struct ib_sa_device *sa_dev,
1447
+ struct ib_device *device, u8 port_num,
15101448 struct sa_path_rec *rec)
15111449 {
15121450 struct ib_port_attr port_attr;
....@@ -1514,7 +1452,7 @@
15141452 if (ib_query_port(device, port_num, &port_attr))
15151453 return PR_NOT_SUPPORTED;
15161454
1517
- if (ib_sa_opa_pathrecord_support(client, device, port_num))
1455
+ if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num))
15181456 return PR_OPA_SUPPORTED;
15191457
15201458 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
....@@ -1598,7 +1536,7 @@
15981536 struct ib_device *device, u8 port_num,
15991537 struct sa_path_rec *rec,
16001538 ib_sa_comp_mask comp_mask,
1601
- int timeout_ms, gfp_t gfp_mask,
1539
+ unsigned long timeout_ms, gfp_t gfp_mask,
16021540 void (*callback)(int status,
16031541 struct sa_path_rec *resp,
16041542 void *context),
....@@ -1629,7 +1567,8 @@
16291567
16301568 query->sa_query.port = port;
16311569 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1632
- status = opa_pr_query_possible(client, device, port_num, rec);
1570
+ status = opa_pr_query_possible(client, sa_dev, device, port_num,
1571
+ rec);
16331572 if (status == PR_NOT_SUPPORTED) {
16341573 ret = -EINVAL;
16351574 goto err1;
....@@ -1752,7 +1691,7 @@
17521691 struct ib_device *device, u8 port_num, u8 method,
17531692 struct ib_sa_service_rec *rec,
17541693 ib_sa_comp_mask comp_mask,
1755
- int timeout_ms, gfp_t gfp_mask,
1694
+ unsigned long timeout_ms, gfp_t gfp_mask,
17561695 void (*callback)(int status,
17571696 struct ib_sa_service_rec *resp,
17581697 void *context),
....@@ -1849,7 +1788,7 @@
18491788 u8 method,
18501789 struct ib_sa_mcmember_rec *rec,
18511790 ib_sa_comp_mask comp_mask,
1852
- int timeout_ms, gfp_t gfp_mask,
1791
+ unsigned long timeout_ms, gfp_t gfp_mask,
18531792 void (*callback)(int status,
18541793 struct ib_sa_mcmember_rec *resp,
18551794 void *context),
....@@ -1940,7 +1879,7 @@
19401879 struct ib_device *device, u8 port_num,
19411880 struct ib_sa_guidinfo_rec *rec,
19421881 ib_sa_comp_mask comp_mask, u8 method,
1943
- int timeout_ms, gfp_t gfp_mask,
1882
+ unsigned long timeout_ms, gfp_t gfp_mask,
19441883 void (*callback)(int status,
19451884 struct ib_sa_guidinfo_rec *resp,
19461885 void *context),
....@@ -2107,7 +2046,7 @@
21072046 }
21082047
21092048 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
2110
- int timeout_ms,
2049
+ unsigned long timeout_ms,
21112050 void (*callback)(void *context),
21122051 void *context,
21132052 struct ib_sa_query **sa_query)
....@@ -2235,9 +2174,9 @@
22352174 break;
22362175 }
22372176
2238
- spin_lock_irqsave(&idr_lock, flags);
2239
- idr_remove(&query_idr, query->id);
2240
- spin_unlock_irqrestore(&idr_lock, flags);
2177
+ xa_lock_irqsave(&queries, flags);
2178
+ __xa_erase(&queries, query->id);
2179
+ xa_unlock_irqrestore(&queries, flags);
22412180
22422181 free_mad(query);
22432182 if (query->client)
....@@ -2323,7 +2262,8 @@
23232262 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
23242263 }
23252264
2326
- new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
2265
+ new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr,
2266
+ RDMA_CREATE_AH_SLEEPABLE);
23272267 if (IS_ERR(new_ah->ah)) {
23282268 pr_warn("Couldn't create new SM AH\n");
23292269 kfree(new_ah);
....@@ -2379,20 +2319,19 @@
23792319 }
23802320 }
23812321
2382
-static void ib_sa_add_one(struct ib_device *device)
2322
+static int ib_sa_add_one(struct ib_device *device)
23832323 {
23842324 struct ib_sa_device *sa_dev;
23852325 int s, e, i;
23862326 int count = 0;
2327
+ int ret;
23872328
23882329 s = rdma_start_port(device);
23892330 e = rdma_end_port(device);
23902331
2391
- sa_dev = kzalloc(sizeof *sa_dev +
2392
- (e - s + 1) * sizeof (struct ib_sa_port),
2393
- GFP_KERNEL);
2332
+ sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
23942333 if (!sa_dev)
2395
- return;
2334
+ return -ENOMEM;
23962335
23972336 sa_dev->start_port = s;
23982337 sa_dev->end_port = e;
....@@ -2412,8 +2351,10 @@
24122351 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
24132352 NULL, 0, send_handler,
24142353 recv_handler, sa_dev, 0);
2415
- if (IS_ERR(sa_dev->port[i].agent))
2354
+ if (IS_ERR(sa_dev->port[i].agent)) {
2355
+ ret = PTR_ERR(sa_dev->port[i].agent);
24162356 goto err;
2357
+ }
24172358
24182359 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
24192360 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
....@@ -2422,8 +2363,10 @@
24222363 count++;
24232364 }
24242365
2425
- if (!count)
2366
+ if (!count) {
2367
+ ret = -EOPNOTSUPP;
24262368 goto free;
2369
+ }
24272370
24282371 ib_set_client_data(device, &sa_client, sa_dev);
24292372
....@@ -2442,7 +2385,7 @@
24422385 update_sm_ah(&sa_dev->port[i].update_task);
24432386 }
24442387
2445
- return;
2388
+ return 0;
24462389
24472390 err:
24482391 while (--i >= 0) {
....@@ -2451,16 +2394,13 @@
24512394 }
24522395 free:
24532396 kfree(sa_dev);
2454
- return;
2397
+ return ret;
24552398 }
24562399
24572400 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
24582401 {
24592402 struct ib_sa_device *sa_dev = client_data;
24602403 int i;
2461
-
2462
- if (!sa_dev)
2463
- return;
24642404
24652405 ib_unregister_event_handler(&sa_dev->event_handler);
24662406 flush_workqueue(ib_wq);
....@@ -2523,5 +2463,5 @@
25232463 destroy_workqueue(ib_nl_wq);
25242464 mcast_cleanup();
25252465 ib_unregister_client(&sa_client);
2526
- idr_destroy(&query_idr);
2466
+ WARN_ON(!xa_empty(&queries));
25272467 }