hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/net/smc/smc_core.c
....@@ -13,6 +13,9 @@
1313 #include <linux/if_vlan.h>
1414 #include <linux/random.h>
1515 #include <linux/workqueue.h>
16
+#include <linux/wait.h>
17
+#include <linux/reboot.h>
18
+#include <linux/mutex.h>
1619 #include <net/tcp.h>
1720 #include <net/sock.h>
1821 #include <rdma/ib_verbs.h>
....@@ -31,7 +34,6 @@
3134 #define SMC_LGR_NUM_INCR 256
3235 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
3336 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
34
-#define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
3537
3638 static struct smc_lgr_list smc_lgr_list = { /* established link groups */
3739 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
....@@ -39,8 +41,27 @@
3941 .num = 0,
4042 };
4143
44
+static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
45
+static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
46
+
4247 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
4348 struct smc_buf_desc *buf_desc);
49
+static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
50
+
51
+static void smc_link_down_work(struct work_struct *work);
52
+
53
+/* return head of link group list and its lock for a given link group */
54
+static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
55
+ spinlock_t **lgr_lock)
56
+{
57
+ if (lgr->is_smcd) {
58
+ *lgr_lock = &lgr->smcd->lgr_lock;
59
+ return &lgr->smcd->lgr_list;
60
+ }
61
+
62
+ *lgr_lock = &smc_lgr_list.lock;
63
+ return &smc_lgr_list.list;
64
+}
4465
4566 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
4667 {
....@@ -48,14 +69,12 @@
4869 * creation. For client use a somewhat higher removal delay time,
4970 * otherwise there is a risk of out-of-sync link groups.
5071 */
51
- mod_delayed_work(system_wq, &lgr->free_work,
52
- (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
53
- SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV);
54
-}
55
-
56
-void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
57
-{
58
- mod_delayed_work(system_wq, &lgr->free_work, SMC_LGR_FREE_DELAY_FAST);
72
+ if (!lgr->freeing) {
73
+ mod_delayed_work(system_wq, &lgr->free_work,
74
+ (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
75
+ SMC_LGR_FREE_DELAY_CLNT :
76
+ SMC_LGR_FREE_DELAY_SERV);
77
+ }
5978 }
6079
6180 /* Register connection's alert token in our lookup structure.
....@@ -85,16 +104,60 @@
85104 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
86105 }
87106
107
+/* assign an SMC-R link to the connection */
108
+static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
109
+{
110
+ enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
111
+ SMC_LNK_ACTIVE;
112
+ int i, j;
113
+
114
+ /* do link balancing */
115
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
116
+ struct smc_link *lnk = &conn->lgr->lnk[i];
117
+
118
+ if (lnk->state != expected || lnk->link_is_asym)
119
+ continue;
120
+ if (conn->lgr->role == SMC_CLNT) {
121
+ conn->lnk = lnk; /* temporary, SMC server assigns link*/
122
+ break;
123
+ }
124
+ if (conn->lgr->conns_num % 2) {
125
+ for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
126
+ struct smc_link *lnk2;
127
+
128
+ lnk2 = &conn->lgr->lnk[j];
129
+ if (lnk2->state == expected &&
130
+ !lnk2->link_is_asym) {
131
+ conn->lnk = lnk2;
132
+ break;
133
+ }
134
+ }
135
+ }
136
+ if (!conn->lnk)
137
+ conn->lnk = lnk;
138
+ break;
139
+ }
140
+ if (!conn->lnk)
141
+ return SMC_CLC_DECL_NOACTLINK;
142
+ return 0;
143
+}
144
+
88145 /* Register connection in link group by assigning an alert token
89146 * registered in a search tree.
90147 * Requires @conns_lock
91148 * Note that '0' is a reserved value and not assigned.
92149 */
93
-static void smc_lgr_register_conn(struct smc_connection *conn)
150
+static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
94151 {
95152 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
96153 static atomic_t nexttoken = ATOMIC_INIT(0);
154
+ int rc;
97155
156
+ if (!conn->lgr->is_smcd) {
157
+ rc = smcr_lgr_conn_assign_link(conn, first);
158
+ if (rc)
159
+ return rc;
160
+ }
98161 /* find a new alert_token_local value not yet used by some connection
99162 * in this link group
100163 */
....@@ -106,6 +169,7 @@
106169 }
107170 smc_lgr_add_alert_token(conn);
108171 conn->lgr->conns_num++;
172
+ return 0;
109173 }
110174
111175 /* Unregister connection and reset the alert token of the given connection<
....@@ -118,7 +182,6 @@
118182 rb_erase(&conn->alert_node, &lgr->conns_all);
119183 lgr->conns_num--;
120184 conn->alert_token_local = 0;
121
- conn->lgr = NULL;
122185 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
123186 }
124187
....@@ -135,143 +198,152 @@
135198 __smc_lgr_unregister_conn(conn);
136199 }
137200 write_unlock_bh(&lgr->conns_lock);
201
+ conn->lgr = NULL;
138202 }
139203
140
-/* Send delete link, either as client to request the initiation
141
- * of the DELETE LINK sequence from server; or as server to
142
- * initiate the delete processing. See smc_llc_rx_delete_link().
143
- */
144
-static int smc_link_send_delete(struct smc_link *lnk)
204
+void smc_lgr_cleanup_early(struct smc_connection *conn)
145205 {
146
- if (lnk->state == SMC_LNK_ACTIVE &&
147
- !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) {
148
- smc_llc_link_deleting(lnk);
149
- return 0;
150
- }
151
- return -ENOTCONN;
206
+ struct smc_link_group *lgr = conn->lgr;
207
+ spinlock_t *lgr_lock;
208
+
209
+ if (!lgr)
210
+ return;
211
+
212
+ smc_conn_free(conn);
213
+ smc_lgr_list_head(lgr, &lgr_lock);
214
+ spin_lock_bh(lgr_lock);
215
+ /* do not use this link group for new connections */
216
+ if (!list_empty(&lgr->list))
217
+ list_del_init(&lgr->list);
218
+ spin_unlock_bh(lgr_lock);
219
+ __smc_lgr_terminate(lgr, true);
152220 }
221
+
222
+static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
223
+{
224
+ int i;
225
+
226
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
227
+ struct smc_link *lnk = &lgr->lnk[i];
228
+
229
+ if (smc_link_sendable(lnk))
230
+ lnk->state = SMC_LNK_INACTIVE;
231
+ }
232
+ wake_up_all(&lgr->llc_msg_waiter);
233
+ wake_up_all(&lgr->llc_flow_waiter);
234
+}
235
+
236
+static void smc_lgr_free(struct smc_link_group *lgr);
153237
154238 static void smc_lgr_free_work(struct work_struct *work)
155239 {
156240 struct smc_link_group *lgr = container_of(to_delayed_work(work),
157241 struct smc_link_group,
158242 free_work);
243
+ spinlock_t *lgr_lock;
159244 bool conns;
160245
161
- spin_lock_bh(&smc_lgr_list.lock);
162
- if (list_empty(&lgr->list))
163
- goto free;
246
+ smc_lgr_list_head(lgr, &lgr_lock);
247
+ spin_lock_bh(lgr_lock);
248
+ if (lgr->freeing) {
249
+ spin_unlock_bh(lgr_lock);
250
+ return;
251
+ }
164252 read_lock_bh(&lgr->conns_lock);
165253 conns = RB_EMPTY_ROOT(&lgr->conns_all);
166254 read_unlock_bh(&lgr->conns_lock);
167255 if (!conns) { /* number of lgr connections is no longer zero */
168
- spin_unlock_bh(&smc_lgr_list.lock);
256
+ spin_unlock_bh(lgr_lock);
169257 return;
170258 }
171259 list_del_init(&lgr->list); /* remove from smc_lgr_list */
172
-free:
173
- spin_unlock_bh(&smc_lgr_list.lock);
260
+ lgr->freeing = 1; /* this instance does the freeing, no new schedule */
261
+ spin_unlock_bh(lgr_lock);
262
+ cancel_delayed_work(&lgr->free_work);
174263
175
- if (!lgr->is_smcd && !lgr->terminating) {
176
- /* try to send del link msg, on error free lgr immediately */
177
- if (!smc_link_send_delete(&lgr->lnk[SMC_SINGLE_LINK])) {
178
- /* reschedule in case we never receive a response */
179
- smc_lgr_schedule_free_work(lgr);
180
- return;
181
- }
182
- }
183
-
184
- if (!delayed_work_pending(&lgr->free_work)) {
185
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
186
-
187
- if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
188
- smc_llc_link_inactive(lnk);
189
- smc_lgr_free(lgr);
190
- }
264
+ if (!lgr->is_smcd && !lgr->terminating)
265
+ smc_llc_send_link_delete_all(lgr, true,
266
+ SMC_LLC_DEL_PROG_INIT_TERM);
267
+ if (lgr->is_smcd && !lgr->terminating)
268
+ smc_ism_signal_shutdown(lgr);
269
+ if (!lgr->is_smcd)
270
+ smcr_lgr_link_deactivate_all(lgr);
271
+ smc_lgr_free(lgr);
191272 }
192273
193
-/* create a new SMC link group */
194
-static int smc_lgr_create(struct smc_sock *smc, bool is_smcd,
195
- struct smc_ib_device *smcibdev, u8 ibport,
196
- char *peer_systemid, unsigned short vlan_id,
197
- struct smcd_dev *smcismdev, u64 peer_gid)
274
+static void smc_lgr_terminate_work(struct work_struct *work)
198275 {
199
- struct smc_link_group *lgr;
200
- struct smc_link *lnk;
201
- u8 rndvec[3];
202
- int rc = 0;
276
+ struct smc_link_group *lgr = container_of(work, struct smc_link_group,
277
+ terminate_work);
278
+
279
+ __smc_lgr_terminate(lgr, true);
280
+}
281
+
282
+/* return next unique link id for the lgr */
283
+static u8 smcr_next_link_id(struct smc_link_group *lgr)
284
+{
285
+ u8 link_id;
203286 int i;
204287
205
- if (is_smcd && vlan_id) {
206
- rc = smc_ism_get_vlan(smcismdev, vlan_id);
288
+ while (1) {
289
+again:
290
+ link_id = ++lgr->next_link_id;
291
+ if (!link_id) /* skip zero as link_id */
292
+ link_id = ++lgr->next_link_id;
293
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
294
+ if (smc_link_usable(&lgr->lnk[i]) &&
295
+ lgr->lnk[i].link_id == link_id)
296
+ goto again;
297
+ }
298
+ break;
299
+ }
300
+ return link_id;
301
+}
302
+
303
+int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
304
+ u8 link_idx, struct smc_init_info *ini)
305
+{
306
+ u8 rndvec[3];
307
+ int rc;
308
+
309
+ get_device(&ini->ib_dev->ibdev->dev);
310
+ atomic_inc(&ini->ib_dev->lnk_cnt);
311
+ lnk->link_id = smcr_next_link_id(lgr);
312
+ lnk->lgr = lgr;
313
+ lnk->link_idx = link_idx;
314
+ lnk->smcibdev = ini->ib_dev;
315
+ lnk->ibport = ini->ib_port;
316
+ lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
317
+ smc_llc_link_set_uid(lnk);
318
+ INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
319
+ if (!ini->ib_dev->initialized) {
320
+ rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev);
207321 if (rc)
208322 goto out;
209323 }
210
-
211
- lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
212
- if (!lgr) {
213
- rc = -ENOMEM;
324
+ get_random_bytes(rndvec, sizeof(rndvec));
325
+ lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
326
+ (rndvec[2] << 16);
327
+ rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
328
+ ini->vlan_id, lnk->gid, &lnk->sgid_index);
329
+ if (rc)
214330 goto out;
215
- }
216
- lgr->is_smcd = is_smcd;
217
- lgr->sync_err = 0;
218
- lgr->vlan_id = vlan_id;
219
- rwlock_init(&lgr->sndbufs_lock);
220
- rwlock_init(&lgr->rmbs_lock);
221
- rwlock_init(&lgr->conns_lock);
222
- for (i = 0; i < SMC_RMBE_SIZES; i++) {
223
- INIT_LIST_HEAD(&lgr->sndbufs[i]);
224
- INIT_LIST_HEAD(&lgr->rmbs[i]);
225
- }
226
- smc_lgr_list.num += SMC_LGR_NUM_INCR;
227
- memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
228
- INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
229
- lgr->conns_all = RB_ROOT;
230
- if (is_smcd) {
231
- /* SMC-D specific settings */
232
- lgr->peer_gid = peer_gid;
233
- lgr->smcd = smcismdev;
234
- } else {
235
- /* SMC-R specific settings */
236
- lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
237
- memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
238
-
239
- lnk = &lgr->lnk[SMC_SINGLE_LINK];
240
- /* initialize link */
241
- lnk->state = SMC_LNK_ACTIVATING;
242
- lnk->link_id = SMC_SINGLE_LINK;
243
- lnk->smcibdev = smcibdev;
244
- lnk->ibport = ibport;
245
- lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
246
- if (!smcibdev->initialized)
247
- smc_ib_setup_per_ibdev(smcibdev);
248
- get_random_bytes(rndvec, sizeof(rndvec));
249
- lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
250
- (rndvec[2] << 16);
251
- rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
252
- vlan_id, lnk->gid, &lnk->sgid_index);
253
- if (rc)
254
- goto free_lgr;
255
- rc = smc_llc_link_init(lnk);
256
- if (rc)
257
- goto free_lgr;
258
- rc = smc_wr_alloc_link_mem(lnk);
259
- if (rc)
260
- goto clear_llc_lnk;
261
- rc = smc_ib_create_protection_domain(lnk);
262
- if (rc)
263
- goto free_link_mem;
264
- rc = smc_ib_create_queue_pair(lnk);
265
- if (rc)
266
- goto dealloc_pd;
267
- rc = smc_wr_create_link(lnk);
268
- if (rc)
269
- goto destroy_qp;
270
- }
271
- smc->conn.lgr = lgr;
272
- spin_lock_bh(&smc_lgr_list.lock);
273
- list_add(&lgr->list, &smc_lgr_list.list);
274
- spin_unlock_bh(&smc_lgr_list.lock);
331
+ rc = smc_llc_link_init(lnk);
332
+ if (rc)
333
+ goto out;
334
+ rc = smc_wr_alloc_link_mem(lnk);
335
+ if (rc)
336
+ goto clear_llc_lnk;
337
+ rc = smc_ib_create_protection_domain(lnk);
338
+ if (rc)
339
+ goto free_link_mem;
340
+ rc = smc_ib_create_queue_pair(lnk);
341
+ if (rc)
342
+ goto dealloc_pd;
343
+ rc = smc_wr_create_link(lnk);
344
+ if (rc)
345
+ goto destroy_qp;
346
+ lnk->state = SMC_LNK_ACTIVATING;
275347 return 0;
276348
277349 destroy_qp:
....@@ -281,11 +353,285 @@
281353 free_link_mem:
282354 smc_wr_free_link_mem(lnk);
283355 clear_llc_lnk:
284
- smc_llc_link_clear(lnk);
356
+ smc_llc_link_clear(lnk, false);
357
+out:
358
+ put_device(&ini->ib_dev->ibdev->dev);
359
+ memset(lnk, 0, sizeof(struct smc_link));
360
+ lnk->state = SMC_LNK_UNUSED;
361
+ if (!atomic_dec_return(&ini->ib_dev->lnk_cnt))
362
+ wake_up(&ini->ib_dev->lnks_deleted);
363
+ return rc;
364
+}
365
+
366
+/* create a new SMC link group */
367
+static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
368
+{
369
+ struct smc_link_group *lgr;
370
+ struct list_head *lgr_list;
371
+ struct smc_link *lnk;
372
+ spinlock_t *lgr_lock;
373
+ u8 link_idx;
374
+ int rc = 0;
375
+ int i;
376
+
377
+ if (ini->is_smcd && ini->vlan_id) {
378
+ if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
379
+ ini->vlan_id)) {
380
+ rc = SMC_CLC_DECL_ISMVLANERR;
381
+ goto out;
382
+ }
383
+ }
384
+
385
+ lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
386
+ if (!lgr) {
387
+ rc = SMC_CLC_DECL_MEM;
388
+ goto ism_put_vlan;
389
+ }
390
+ lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
391
+ SMC_LGR_ID_SIZE, &lgr->id);
392
+ if (!lgr->tx_wq) {
393
+ rc = -ENOMEM;
394
+ goto free_lgr;
395
+ }
396
+ lgr->is_smcd = ini->is_smcd;
397
+ lgr->sync_err = 0;
398
+ lgr->terminating = 0;
399
+ lgr->freeing = 0;
400
+ lgr->vlan_id = ini->vlan_id;
401
+ mutex_init(&lgr->sndbufs_lock);
402
+ mutex_init(&lgr->rmbs_lock);
403
+ rwlock_init(&lgr->conns_lock);
404
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
405
+ INIT_LIST_HEAD(&lgr->sndbufs[i]);
406
+ INIT_LIST_HEAD(&lgr->rmbs[i]);
407
+ }
408
+ lgr->next_link_id = 0;
409
+ smc_lgr_list.num += SMC_LGR_NUM_INCR;
410
+ memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
411
+ INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
412
+ INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
413
+ lgr->conns_all = RB_ROOT;
414
+ if (ini->is_smcd) {
415
+ /* SMC-D specific settings */
416
+ get_device(&ini->ism_dev[ini->ism_selected]->dev);
417
+ lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected];
418
+ lgr->smcd = ini->ism_dev[ini->ism_selected];
419
+ lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
420
+ lgr_lock = &lgr->smcd->lgr_lock;
421
+ lgr->smc_version = ini->smcd_version;
422
+ lgr->peer_shutdown = 0;
423
+ atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
424
+ } else {
425
+ /* SMC-R specific settings */
426
+ lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
427
+ memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
428
+ SMC_SYSTEMID_LEN);
429
+ memcpy(lgr->pnet_id, ini->ib_dev->pnetid[ini->ib_port - 1],
430
+ SMC_MAX_PNETID_LEN);
431
+ smc_llc_lgr_init(lgr, smc);
432
+
433
+ link_idx = SMC_SINGLE_LINK;
434
+ lnk = &lgr->lnk[link_idx];
435
+ rc = smcr_link_init(lgr, lnk, link_idx, ini);
436
+ if (rc)
437
+ goto free_wq;
438
+ lgr_list = &smc_lgr_list.list;
439
+ lgr_lock = &smc_lgr_list.lock;
440
+ atomic_inc(&lgr_cnt);
441
+ }
442
+ smc->conn.lgr = lgr;
443
+ spin_lock_bh(lgr_lock);
444
+ list_add_tail(&lgr->list, lgr_list);
445
+ spin_unlock_bh(lgr_lock);
446
+ return 0;
447
+
448
+free_wq:
449
+ destroy_workqueue(lgr->tx_wq);
285450 free_lgr:
286451 kfree(lgr);
452
+ism_put_vlan:
453
+ if (ini->is_smcd && ini->vlan_id)
454
+ smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
287455 out:
456
+ if (rc < 0) {
457
+ if (rc == -ENOMEM)
458
+ rc = SMC_CLC_DECL_MEM;
459
+ else
460
+ rc = SMC_CLC_DECL_INTERR;
461
+ }
288462 return rc;
463
+}
464
+
465
+static int smc_write_space(struct smc_connection *conn)
466
+{
467
+ int buffer_len = conn->peer_rmbe_size;
468
+ union smc_host_cursor prod;
469
+ union smc_host_cursor cons;
470
+ int space;
471
+
472
+ smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
473
+ smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
474
+ /* determine rx_buf space */
475
+ space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
476
+ return space;
477
+}
478
+
479
+static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
480
+ struct smc_wr_buf *wr_buf)
481
+{
482
+ struct smc_connection *conn = &smc->conn;
483
+ union smc_host_cursor cons, fin;
484
+ int rc = 0;
485
+ int diff;
486
+
487
+ smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
488
+ smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
489
+ /* set prod cursor to old state, enforce tx_rdma_writes() */
490
+ smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
491
+ smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
492
+
493
+ if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
494
+ /* cons cursor advanced more than fin, and prod was set
495
+ * fin above, so now prod is smaller than cons. Fix that.
496
+ */
497
+ diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
498
+ smc_curs_add(conn->sndbuf_desc->len,
499
+ &conn->tx_curs_sent, diff);
500
+ smc_curs_add(conn->sndbuf_desc->len,
501
+ &conn->tx_curs_fin, diff);
502
+
503
+ smp_mb__before_atomic();
504
+ atomic_add(diff, &conn->sndbuf_space);
505
+ smp_mb__after_atomic();
506
+
507
+ smc_curs_add(conn->peer_rmbe_size,
508
+ &conn->local_tx_ctrl.prod, diff);
509
+ smc_curs_add(conn->peer_rmbe_size,
510
+ &conn->local_tx_ctrl_fin, diff);
511
+ }
512
+ /* recalculate, value is used by tx_rdma_writes() */
513
+ atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
514
+
515
+ if (smc->sk.sk_state != SMC_INIT &&
516
+ smc->sk.sk_state != SMC_CLOSED) {
517
+ rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
518
+ if (!rc) {
519
+ queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
520
+ smc->sk.sk_data_ready(&smc->sk);
521
+ }
522
+ } else {
523
+ smc_wr_tx_put_slot(conn->lnk,
524
+ (struct smc_wr_tx_pend_priv *)pend);
525
+ }
526
+ return rc;
527
+}
528
+
529
+struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
530
+ struct smc_link *from_lnk, bool is_dev_err)
531
+{
532
+ struct smc_link *to_lnk = NULL;
533
+ struct smc_cdc_tx_pend *pend;
534
+ struct smc_connection *conn;
535
+ struct smc_wr_buf *wr_buf;
536
+ struct smc_sock *smc;
537
+ struct rb_node *node;
538
+ int i, rc = 0;
539
+
540
+ /* link is inactive, wake up tx waiters */
541
+ smc_wr_wakeup_tx_wait(from_lnk);
542
+
543
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
544
+ if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
545
+ continue;
546
+ if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
547
+ from_lnk->ibport == lgr->lnk[i].ibport) {
548
+ continue;
549
+ }
550
+ to_lnk = &lgr->lnk[i];
551
+ break;
552
+ }
553
+ if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
554
+ smc_lgr_terminate_sched(lgr);
555
+ return NULL;
556
+ }
557
+again:
558
+ read_lock_bh(&lgr->conns_lock);
559
+ for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
560
+ conn = rb_entry(node, struct smc_connection, alert_node);
561
+ if (conn->lnk != from_lnk)
562
+ continue;
563
+ smc = container_of(conn, struct smc_sock, conn);
564
+ /* conn->lnk not yet set in SMC_INIT state */
565
+ if (smc->sk.sk_state == SMC_INIT)
566
+ continue;
567
+ if (smc->sk.sk_state == SMC_CLOSED ||
568
+ smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
569
+ smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
570
+ smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
571
+ smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
572
+ smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
573
+ smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
574
+ smc->sk.sk_state == SMC_PEERABORTWAIT ||
575
+ smc->sk.sk_state == SMC_PROCESSABORT) {
576
+ spin_lock_bh(&conn->send_lock);
577
+ conn->lnk = to_lnk;
578
+ spin_unlock_bh(&conn->send_lock);
579
+ continue;
580
+ }
581
+ sock_hold(&smc->sk);
582
+ read_unlock_bh(&lgr->conns_lock);
583
+ /* pre-fetch buffer outside of send_lock, might sleep */
584
+ rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
585
+ if (rc)
586
+ goto err_out;
587
+ /* avoid race with smcr_tx_sndbuf_nonempty() */
588
+ spin_lock_bh(&conn->send_lock);
589
+ conn->lnk = to_lnk;
590
+ rc = smc_switch_cursor(smc, pend, wr_buf);
591
+ spin_unlock_bh(&conn->send_lock);
592
+ sock_put(&smc->sk);
593
+ if (rc)
594
+ goto err_out;
595
+ goto again;
596
+ }
597
+ read_unlock_bh(&lgr->conns_lock);
598
+ smc_wr_tx_link_put(to_lnk);
599
+ return to_lnk;
600
+
601
+err_out:
602
+ smcr_link_down_cond_sched(to_lnk);
603
+ smc_wr_tx_link_put(to_lnk);
604
+ return NULL;
605
+}
606
+
607
+static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
608
+ struct smc_link_group *lgr)
609
+{
610
+ int rc;
611
+
612
+ if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
613
+ /* unregister rmb with peer */
614
+ rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
615
+ if (!rc) {
616
+ /* protect against smc_llc_cli_rkey_exchange() */
617
+ mutex_lock(&lgr->llc_conf_mutex);
618
+ smc_llc_do_delete_rkey(lgr, rmb_desc);
619
+ rmb_desc->is_conf_rkey = false;
620
+ mutex_unlock(&lgr->llc_conf_mutex);
621
+ smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
622
+ }
623
+ }
624
+
625
+ if (rmb_desc->is_reg_err) {
626
+ /* buf registration failed, reuse not possible */
627
+ mutex_lock(&lgr->rmbs_lock);
628
+ list_del(&rmb_desc->list);
629
+ mutex_unlock(&lgr->rmbs_lock);
630
+
631
+ smc_buf_free(lgr, true, rmb_desc);
632
+ } else {
633
+ rmb_desc->used = 0;
634
+ }
289635 }
290636
291637 static void smc_buf_unuse(struct smc_connection *conn,
....@@ -293,19 +639,10 @@
293639 {
294640 if (conn->sndbuf_desc)
295641 conn->sndbuf_desc->used = 0;
296
- if (conn->rmb_desc) {
297
- if (!conn->rmb_desc->regerr) {
298
- conn->rmb_desc->reused = 1;
299
- conn->rmb_desc->used = 0;
300
- } else {
301
- /* buf registration failed, reuse not possible */
302
- write_lock_bh(&lgr->rmbs_lock);
303
- list_del(&conn->rmb_desc->list);
304
- write_unlock_bh(&lgr->rmbs_lock);
305
-
306
- smc_buf_free(lgr, true, conn->rmb_desc);
307
- }
308
- }
642
+ if (conn->rmb_desc && lgr->is_smcd)
643
+ conn->rmb_desc->used = 0;
644
+ else if (conn->rmb_desc)
645
+ smcr_buf_unuse(conn->rmb_desc, lgr);
309646 }
310647
311648 /* remove a finished connection from its link group */
....@@ -316,45 +653,108 @@
316653 if (!lgr)
317654 return;
318655 if (lgr->is_smcd) {
319
- smc_ism_unset_conn(conn);
656
+ if (!list_empty(&lgr->list))
657
+ smc_ism_unset_conn(conn);
320658 tasklet_kill(&conn->rx_tsklet);
321659 } else {
322
- smc_cdc_tx_dismiss_slots(conn);
660
+ smc_cdc_wait_pend_tx_wr(conn);
661
+ if (current_work() != &conn->abort_work)
662
+ cancel_work_sync(&conn->abort_work);
323663 }
324
- smc_lgr_unregister_conn(conn); /* unsets conn->lgr */
325
- smc_buf_unuse(conn, lgr); /* allow buffer reuse */
664
+ if (!list_empty(&lgr->list)) {
665
+ smc_buf_unuse(conn, lgr); /* allow buffer reuse */
666
+ smc_lgr_unregister_conn(conn);
667
+ }
326668
327669 if (!lgr->conns_num)
328670 smc_lgr_schedule_free_work(lgr);
329671 }
330672
331
-static void smc_link_clear(struct smc_link *lnk)
673
+/* unregister a link from a buf_desc */
674
+static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
675
+ struct smc_link *lnk)
332676 {
677
+ if (is_rmb)
678
+ buf_desc->is_reg_mr[lnk->link_idx] = false;
679
+ if (!buf_desc->is_map_ib[lnk->link_idx])
680
+ return;
681
+ if (is_rmb) {
682
+ if (buf_desc->mr_rx[lnk->link_idx]) {
683
+ smc_ib_put_memory_region(
684
+ buf_desc->mr_rx[lnk->link_idx]);
685
+ buf_desc->mr_rx[lnk->link_idx] = NULL;
686
+ }
687
+ smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
688
+ } else {
689
+ smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
690
+ }
691
+ sg_free_table(&buf_desc->sgt[lnk->link_idx]);
692
+ buf_desc->is_map_ib[lnk->link_idx] = false;
693
+}
694
+
695
+/* unmap all buffers of lgr for a deleted link */
696
+static void smcr_buf_unmap_lgr(struct smc_link *lnk)
697
+{
698
+ struct smc_link_group *lgr = lnk->lgr;
699
+ struct smc_buf_desc *buf_desc, *bf;
700
+ int i;
701
+
702
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
703
+ mutex_lock(&lgr->rmbs_lock);
704
+ list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
705
+ smcr_buf_unmap_link(buf_desc, true, lnk);
706
+ mutex_unlock(&lgr->rmbs_lock);
707
+ mutex_lock(&lgr->sndbufs_lock);
708
+ list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
709
+ list)
710
+ smcr_buf_unmap_link(buf_desc, false, lnk);
711
+ mutex_unlock(&lgr->sndbufs_lock);
712
+ }
713
+}
714
+
715
+static void smcr_rtoken_clear_link(struct smc_link *lnk)
716
+{
717
+ struct smc_link_group *lgr = lnk->lgr;
718
+ int i;
719
+
720
+ for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
721
+ lgr->rtokens[i][lnk->link_idx].rkey = 0;
722
+ lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
723
+ }
724
+}
725
+
726
+/* must be called under lgr->llc_conf_mutex lock */
727
+void smcr_link_clear(struct smc_link *lnk, bool log)
728
+{
729
+ struct smc_ib_device *smcibdev;
730
+
731
+ if (!lnk->lgr || lnk->state == SMC_LNK_UNUSED)
732
+ return;
333733 lnk->peer_qpn = 0;
334
- smc_llc_link_clear(lnk);
335
- smc_ib_modify_qp_reset(lnk);
734
+ smc_llc_link_clear(lnk, log);
735
+ smcr_buf_unmap_lgr(lnk);
736
+ smcr_rtoken_clear_link(lnk);
737
+ smc_ib_modify_qp_error(lnk);
336738 smc_wr_free_link(lnk);
337739 smc_ib_destroy_queue_pair(lnk);
338740 smc_ib_dealloc_protection_domain(lnk);
339741 smc_wr_free_link_mem(lnk);
742
+ put_device(&lnk->smcibdev->ibdev->dev);
743
+ smcibdev = lnk->smcibdev;
744
+ memset(lnk, 0, sizeof(struct smc_link));
745
+ lnk->state = SMC_LNK_UNUSED;
746
+ if (!atomic_dec_return(&smcibdev->lnk_cnt))
747
+ wake_up(&smcibdev->lnks_deleted);
340748 }
341749
342750 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
343751 struct smc_buf_desc *buf_desc)
344752 {
345
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
753
+ int i;
346754
347
- if (is_rmb) {
348
- if (buf_desc->mr_rx[SMC_SINGLE_LINK])
349
- smc_ib_put_memory_region(
350
- buf_desc->mr_rx[SMC_SINGLE_LINK]);
351
- smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
352
- DMA_FROM_DEVICE);
353
- } else {
354
- smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
355
- DMA_TO_DEVICE);
356
- }
357
- sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
755
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
756
+ smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
757
+
358758 if (buf_desc->pages)
359759 __free_pages(buf_desc->pages, buf_desc->order);
360760 kfree(buf_desc);
....@@ -410,27 +810,100 @@
410810 }
411811
412812 /* remove a link group */
413
-void smc_lgr_free(struct smc_link_group *lgr)
813
+static void smc_lgr_free(struct smc_link_group *lgr)
414814 {
815
+ int i;
816
+
817
+ if (!lgr->is_smcd) {
818
+ mutex_lock(&lgr->llc_conf_mutex);
819
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
820
+ if (lgr->lnk[i].state != SMC_LNK_UNUSED)
821
+ smcr_link_clear(&lgr->lnk[i], false);
822
+ }
823
+ mutex_unlock(&lgr->llc_conf_mutex);
824
+ smc_llc_lgr_clear(lgr);
825
+ }
826
+
415827 smc_lgr_free_bufs(lgr);
416
- if (lgr->is_smcd)
828
+ destroy_workqueue(lgr->tx_wq);
829
+ if (lgr->is_smcd) {
417830 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
418
- else
419
- smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
831
+ put_device(&lgr->smcd->dev);
832
+ if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
833
+ wake_up(&lgr->smcd->lgrs_deleted);
834
+ } else {
835
+ if (!atomic_dec_return(&lgr_cnt))
836
+ wake_up(&lgrs_deleted);
837
+ }
420838 kfree(lgr);
421839 }
422840
423
-void smc_lgr_forget(struct smc_link_group *lgr)
841
+static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
424842 {
425
- spin_lock_bh(&smc_lgr_list.lock);
426
- /* do not use this link group for new connections */
427
- if (!list_empty(&lgr->list))
428
- list_del_init(&lgr->list);
429
- spin_unlock_bh(&smc_lgr_list.lock);
843
+ int i;
844
+
845
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
846
+ struct smc_buf_desc *buf_desc;
847
+
848
+ list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
849
+ buf_desc->len += sizeof(struct smcd_cdc_msg);
850
+ smc_ism_unregister_dmb(lgr->smcd, buf_desc);
851
+ }
852
+ }
430853 }
431854
432
-/* terminate linkgroup abnormally */
433
-static void __smc_lgr_terminate(struct smc_link_group *lgr)
855
+static void smc_sk_wake_ups(struct smc_sock *smc)
856
+{
857
+ smc->sk.sk_write_space(&smc->sk);
858
+ smc->sk.sk_data_ready(&smc->sk);
859
+ smc->sk.sk_state_change(&smc->sk);
860
+}
861
+
862
+/* kill a connection */
863
+static void smc_conn_kill(struct smc_connection *conn, bool soft)
864
+{
865
+ struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
866
+
867
+ if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
868
+ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
869
+ else
870
+ smc_close_abort(conn);
871
+ conn->killed = 1;
872
+ smc->sk.sk_err = ECONNABORTED;
873
+ smc_sk_wake_ups(smc);
874
+ if (conn->lgr->is_smcd) {
875
+ smc_ism_unset_conn(conn);
876
+ if (soft)
877
+ tasklet_kill(&conn->rx_tsklet);
878
+ else
879
+ tasklet_unlock_wait(&conn->rx_tsklet);
880
+ } else {
881
+ smc_cdc_wait_pend_tx_wr(conn);
882
+ }
883
+ smc_lgr_unregister_conn(conn);
884
+ smc_close_active_abort(smc);
885
+}
886
+
887
+static void smc_lgr_cleanup(struct smc_link_group *lgr)
888
+{
889
+ if (lgr->is_smcd) {
890
+ smc_ism_signal_shutdown(lgr);
891
+ smcd_unregister_all_dmbs(lgr);
892
+ } else {
893
+ u32 rsn = lgr->llc_termination_rsn;
894
+
895
+ if (!rsn)
896
+ rsn = SMC_LLC_DEL_PROG_INIT_TERM;
897
+ smc_llc_send_link_delete_all(lgr, false, rsn);
898
+ smcr_lgr_link_deactivate_all(lgr);
899
+ }
900
+}
901
+
902
+/* terminate link group
903
+ * @soft: true if link group shutdown can take its time
904
+ * false if immediate link group shutdown is required
905
+ */
906
+static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
434907 {
435908 struct smc_connection *conn;
436909 struct smc_sock *smc;
....@@ -438,90 +911,328 @@
438911
439912 if (lgr->terminating)
440913 return; /* lgr already terminating */
914
+ /* cancel free_work sync, will terminate when lgr->freeing is set */
915
+ cancel_delayed_work(&lgr->free_work);
441916 lgr->terminating = 1;
442
- if (!list_empty(&lgr->list)) /* forget lgr */
443
- list_del_init(&lgr->list);
444
- if (!lgr->is_smcd)
445
- smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
446917
447
- write_lock_bh(&lgr->conns_lock);
918
+ /* kill remaining link group connections */
919
+ read_lock_bh(&lgr->conns_lock);
448920 node = rb_first(&lgr->conns_all);
449921 while (node) {
922
+ read_unlock_bh(&lgr->conns_lock);
450923 conn = rb_entry(node, struct smc_connection, alert_node);
451924 smc = container_of(conn, struct smc_sock, conn);
452
- sock_hold(&smc->sk); /* sock_put in close work */
453
- conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
454
- __smc_lgr_unregister_conn(conn);
455
- write_unlock_bh(&lgr->conns_lock);
456
- if (!schedule_work(&conn->close_work))
457
- sock_put(&smc->sk);
458
- write_lock_bh(&lgr->conns_lock);
925
+ sock_hold(&smc->sk); /* sock_put below */
926
+ lock_sock(&smc->sk);
927
+ smc_conn_kill(conn, soft);
928
+ release_sock(&smc->sk);
929
+ sock_put(&smc->sk); /* sock_hold above */
930
+ read_lock_bh(&lgr->conns_lock);
459931 node = rb_first(&lgr->conns_all);
460932 }
461
- write_unlock_bh(&lgr->conns_lock);
462
- if (!lgr->is_smcd)
463
- wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
464
- smc_lgr_schedule_free_work(lgr);
933
+ read_unlock_bh(&lgr->conns_lock);
934
+ smc_lgr_cleanup(lgr);
935
+ smc_lgr_free(lgr);
465936 }
466937
467
-void smc_lgr_terminate(struct smc_link_group *lgr)
938
+/* unlink link group and schedule termination */
939
+void smc_lgr_terminate_sched(struct smc_link_group *lgr)
468940 {
469
- spin_lock_bh(&smc_lgr_list.lock);
470
- __smc_lgr_terminate(lgr);
471
- spin_unlock_bh(&smc_lgr_list.lock);
472
-}
941
+ spinlock_t *lgr_lock;
473942
474
-/* Called when IB port is terminated */
475
-void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
476
-{
477
- struct smc_link_group *lgr, *l;
478
-
479
- spin_lock_bh(&smc_lgr_list.lock);
480
- list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
481
- if (!lgr->is_smcd &&
482
- lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
483
- lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
484
- __smc_lgr_terminate(lgr);
943
+ smc_lgr_list_head(lgr, &lgr_lock);
944
+ spin_lock_bh(lgr_lock);
945
+ if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
946
+ spin_unlock_bh(lgr_lock);
947
+ return; /* lgr already terminating */
485948 }
486
- spin_unlock_bh(&smc_lgr_list.lock);
949
+ list_del_init(&lgr->list);
950
+ lgr->freeing = 1;
951
+ spin_unlock_bh(lgr_lock);
952
+ schedule_work(&lgr->terminate_work);
487953 }
488954
489
-/* Called when SMC-D device is terminated or peer is lost */
490
-void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
955
+/* Called when peer lgr shutdown (regularly or abnormally) is received */
956
+void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
491957 {
492958 struct smc_link_group *lgr, *l;
493959 LIST_HEAD(lgr_free_list);
494960
495961 /* run common cleanup function and build free list */
496
- spin_lock_bh(&smc_lgr_list.lock);
497
- list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
498
- if (lgr->is_smcd && lgr->smcd == dev &&
499
- (!peer_gid || lgr->peer_gid == peer_gid) &&
500
- !list_empty(&lgr->list)) {
501
- __smc_lgr_terminate(lgr);
962
+ spin_lock_bh(&dev->lgr_lock);
963
+ list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
964
+ if ((!peer_gid || lgr->peer_gid == peer_gid) &&
965
+ (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
966
+ if (peer_gid) /* peer triggered termination */
967
+ lgr->peer_shutdown = 1;
502968 list_move(&lgr->list, &lgr_free_list);
969
+ lgr->freeing = 1;
503970 }
504971 }
505
- spin_unlock_bh(&smc_lgr_list.lock);
972
+ spin_unlock_bh(&dev->lgr_lock);
506973
507974 /* cancel the regular free workers and actually free lgrs */
508975 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
509976 list_del_init(&lgr->list);
510
- cancel_delayed_work_sync(&lgr->free_work);
511
- smc_lgr_free(lgr);
977
+ schedule_work(&lgr->terminate_work);
512978 }
513979 }
514980
515
-/* Determine vlan of internal TCP socket.
516
- * @vlan_id: address to store the determined vlan id into
981
+/* Called when an SMCD device is removed or the smc module is unloaded */
982
+void smc_smcd_terminate_all(struct smcd_dev *smcd)
983
+{
984
+ struct smc_link_group *lgr, *lg;
985
+ LIST_HEAD(lgr_free_list);
986
+
987
+ spin_lock_bh(&smcd->lgr_lock);
988
+ list_splice_init(&smcd->lgr_list, &lgr_free_list);
989
+ list_for_each_entry(lgr, &lgr_free_list, list)
990
+ lgr->freeing = 1;
991
+ spin_unlock_bh(&smcd->lgr_lock);
992
+
993
+ list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
994
+ list_del_init(&lgr->list);
995
+ __smc_lgr_terminate(lgr, false);
996
+ }
997
+
998
+ if (atomic_read(&smcd->lgr_cnt))
999
+ wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
1000
+}
1001
+
1002
+/* Called when an SMCR device is removed or the smc module is unloaded.
1003
+ * If smcibdev is given, all SMCR link groups using this device are terminated.
1004
+ * If smcibdev is NULL, all SMCR link groups are terminated.
5171005 */
518
-int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
1006
+void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1007
+{
1008
+ struct smc_link_group *lgr, *lg;
1009
+ LIST_HEAD(lgr_free_list);
1010
+ int i;
1011
+
1012
+ spin_lock_bh(&smc_lgr_list.lock);
1013
+ if (!smcibdev) {
1014
+ list_splice_init(&smc_lgr_list.list, &lgr_free_list);
1015
+ list_for_each_entry(lgr, &lgr_free_list, list)
1016
+ lgr->freeing = 1;
1017
+ } else {
1018
+ list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1019
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1020
+ if (lgr->lnk[i].smcibdev == smcibdev)
1021
+ smcr_link_down_cond_sched(&lgr->lnk[i]);
1022
+ }
1023
+ }
1024
+ }
1025
+ spin_unlock_bh(&smc_lgr_list.lock);
1026
+
1027
+ list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1028
+ list_del_init(&lgr->list);
1029
+ smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
1030
+ __smc_lgr_terminate(lgr, false);
1031
+ }
1032
+
1033
+ if (smcibdev) {
1034
+ if (atomic_read(&smcibdev->lnk_cnt))
1035
+ wait_event(smcibdev->lnks_deleted,
1036
+ !atomic_read(&smcibdev->lnk_cnt));
1037
+ } else {
1038
+ if (atomic_read(&lgr_cnt))
1039
+ wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
1040
+ }
1041
+}
1042
+
1043
+/* set new lgr type and clear all asymmetric link tagging */
1044
+void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
1045
+{
1046
+ char *lgr_type = "";
1047
+ int i;
1048
+
1049
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1050
+ if (smc_link_usable(&lgr->lnk[i]))
1051
+ lgr->lnk[i].link_is_asym = false;
1052
+ if (lgr->type == new_type)
1053
+ return;
1054
+ lgr->type = new_type;
1055
+
1056
+ switch (lgr->type) {
1057
+ case SMC_LGR_NONE:
1058
+ lgr_type = "NONE";
1059
+ break;
1060
+ case SMC_LGR_SINGLE:
1061
+ lgr_type = "SINGLE";
1062
+ break;
1063
+ case SMC_LGR_SYMMETRIC:
1064
+ lgr_type = "SYMMETRIC";
1065
+ break;
1066
+ case SMC_LGR_ASYMMETRIC_PEER:
1067
+ lgr_type = "ASYMMETRIC_PEER";
1068
+ break;
1069
+ case SMC_LGR_ASYMMETRIC_LOCAL:
1070
+ lgr_type = "ASYMMETRIC_LOCAL";
1071
+ break;
1072
+ }
1073
+ pr_warn_ratelimited("smc: SMC-R lg %*phN state changed: "
1074
+ "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
1075
+ lgr_type, lgr->pnet_id);
1076
+}
1077
+
1078
+/* set new lgr type and tag a link as asymmetric */
1079
+void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
1080
+ enum smc_lgr_type new_type, int asym_lnk_idx)
1081
+{
1082
+ smcr_lgr_set_type(lgr, new_type);
1083
+ lgr->lnk[asym_lnk_idx].link_is_asym = true;
1084
+}
1085
+
1086
+/* abort connection, abort_work scheduled from tasklet context */
1087
+static void smc_conn_abort_work(struct work_struct *work)
1088
+{
1089
+ struct smc_connection *conn = container_of(work,
1090
+ struct smc_connection,
1091
+ abort_work);
1092
+ struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1093
+
1094
+ lock_sock(&smc->sk);
1095
+ smc_conn_kill(conn, true);
1096
+ release_sock(&smc->sk);
1097
+ sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
1098
+}
1099
+
1100
+void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1101
+{
1102
+ struct smc_link_group *lgr, *n;
1103
+
1104
+ spin_lock_bh(&smc_lgr_list.lock);
1105
+ list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1106
+ struct smc_link *link;
1107
+
1108
+ if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1109
+ SMC_MAX_PNETID_LEN) ||
1110
+ lgr->type == SMC_LGR_SYMMETRIC ||
1111
+ lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1112
+ continue;
1113
+
1114
+ /* trigger local add link processing */
1115
+ link = smc_llc_usable_link(lgr);
1116
+ if (link)
1117
+ smc_llc_add_link_local(link);
1118
+ }
1119
+ spin_unlock_bh(&smc_lgr_list.lock);
1120
+}
1121
+
1122
+/* link is down - switch connections to alternate link,
1123
+ * must be called under lgr->llc_conf_mutex lock
1124
+ */
1125
+static void smcr_link_down(struct smc_link *lnk)
1126
+{
1127
+ struct smc_link_group *lgr = lnk->lgr;
1128
+ struct smc_link *to_lnk;
1129
+ int del_link_id;
1130
+
1131
+ if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1132
+ return;
1133
+
1134
+ to_lnk = smc_switch_conns(lgr, lnk, true);
1135
+ if (!to_lnk) { /* no backup link available */
1136
+ smcr_link_clear(lnk, true);
1137
+ return;
1138
+ }
1139
+ smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1140
+ del_link_id = lnk->link_id;
1141
+
1142
+ if (lgr->role == SMC_SERV) {
1143
+ /* trigger local delete link processing */
1144
+ smc_llc_srv_delete_link_local(to_lnk, del_link_id);
1145
+ } else {
1146
+ if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1147
+ /* another llc task is ongoing */
1148
+ mutex_unlock(&lgr->llc_conf_mutex);
1149
+ wait_event_timeout(lgr->llc_flow_waiter,
1150
+ (list_empty(&lgr->list) ||
1151
+ lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
1152
+ SMC_LLC_WAIT_TIME);
1153
+ mutex_lock(&lgr->llc_conf_mutex);
1154
+ }
1155
+ if (!list_empty(&lgr->list)) {
1156
+ smc_llc_send_delete_link(to_lnk, del_link_id,
1157
+ SMC_LLC_REQ, true,
1158
+ SMC_LLC_DEL_LOST_PATH);
1159
+ smcr_link_clear(lnk, true);
1160
+ }
1161
+ wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
1162
+ }
1163
+}
1164
+
1165
+/* must be called under lgr->llc_conf_mutex lock */
1166
+void smcr_link_down_cond(struct smc_link *lnk)
1167
+{
1168
+ if (smc_link_downing(&lnk->state))
1169
+ smcr_link_down(lnk);
1170
+}
1171
+
1172
+/* will get the lgr->llc_conf_mutex lock */
1173
+void smcr_link_down_cond_sched(struct smc_link *lnk)
1174
+{
1175
+ if (smc_link_downing(&lnk->state))
1176
+ schedule_work(&lnk->link_down_wrk);
1177
+}
1178
+
1179
+void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
1180
+{
1181
+ struct smc_link_group *lgr, *n;
1182
+ int i;
1183
+
1184
+ list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1185
+ if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1186
+ SMC_MAX_PNETID_LEN))
1187
+ continue; /* lgr is not affected */
1188
+ if (list_empty(&lgr->list))
1189
+ continue;
1190
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1191
+ struct smc_link *lnk = &lgr->lnk[i];
1192
+
1193
+ if (smc_link_usable(lnk) &&
1194
+ lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1195
+ smcr_link_down_cond_sched(lnk);
1196
+ }
1197
+ }
1198
+}
1199
+
1200
+static void smc_link_down_work(struct work_struct *work)
1201
+{
1202
+ struct smc_link *link = container_of(work, struct smc_link,
1203
+ link_down_wrk);
1204
+ struct smc_link_group *lgr = link->lgr;
1205
+
1206
+ if (list_empty(&lgr->list))
1207
+ return;
1208
+ wake_up_all(&lgr->llc_msg_waiter);
1209
+ mutex_lock(&lgr->llc_conf_mutex);
1210
+ smcr_link_down(link);
1211
+ mutex_unlock(&lgr->llc_conf_mutex);
1212
+}
1213
+
1214
+static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
1215
+ struct netdev_nested_priv *priv)
1216
+{
1217
+ unsigned short *vlan_id = (unsigned short *)priv->data;
1218
+
1219
+ if (is_vlan_dev(lower_dev)) {
1220
+ *vlan_id = vlan_dev_vlan_id(lower_dev);
1221
+ return 1;
1222
+ }
1223
+
1224
+ return 0;
1225
+}
1226
+
1227
+/* Determine vlan of internal TCP socket. */
1228
+int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
5191229 {
5201230 struct dst_entry *dst = sk_dst_get(clcsock->sk);
1231
+ struct netdev_nested_priv priv;
5211232 struct net_device *ndev;
522
- int i, nest_lvl, rc = 0;
1233
+ int rc = 0;
5231234
524
- *vlan_id = 0;
1235
+ ini->vlan_id = 0;
5251236 if (!dst) {
5261237 rc = -ENOTCONN;
5271238 goto out;
....@@ -533,24 +1244,13 @@
5331244
5341245 ndev = dst->dev;
5351246 if (is_vlan_dev(ndev)) {
536
- *vlan_id = vlan_dev_vlan_id(ndev);
1247
+ ini->vlan_id = vlan_dev_vlan_id(ndev);
5371248 goto out_rel;
5381249 }
5391250
1251
+ priv.data = (void *)&ini->vlan_id;
5401252 rtnl_lock();
541
- nest_lvl = dev_get_nest_level(ndev);
542
- for (i = 0; i < nest_lvl; i++) {
543
- struct list_head *lower = &ndev->adj_list.lower;
544
-
545
- if (list_empty(lower))
546
- break;
547
- lower = lower->next;
548
- ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
549
- if (is_vlan_dev(ndev)) {
550
- *vlan_id = vlan_dev_vlan_id(ndev);
551
- break;
552
- }
553
- }
1253
+ netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
5541254 rtnl_unlock();
5551255
5561256 out_rel:
....@@ -561,15 +1261,23 @@
5611261
5621262 static bool smcr_lgr_match(struct smc_link_group *lgr,
5631263 struct smc_clc_msg_local *lcl,
564
- enum smc_lgr_role role)
1264
+ enum smc_lgr_role role, u32 clcqpn)
5651265 {
566
- return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
567
- SMC_SYSTEMID_LEN) &&
568
- !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
569
- SMC_GID_SIZE) &&
570
- !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
571
- sizeof(lcl->mac)) &&
572
- lgr->role == role;
1266
+ int i;
1267
+
1268
+ if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) ||
1269
+ lgr->role != role)
1270
+ return false;
1271
+
1272
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1273
+ if (!smc_link_active(&lgr->lnk[i]))
1274
+ continue;
1275
+ if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
1276
+ !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
1277
+ !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac)))
1278
+ return true;
1279
+ }
1280
+ return false;
5731281 }
5741282
5751283 static bool smcd_lgr_match(struct smc_link_group *lgr,
....@@ -579,80 +1287,92 @@
5791287 }
5801288
5811289 /* create a new SMC connection (and a new link group if necessary) */
582
-int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
583
- struct smc_ib_device *smcibdev, u8 ibport,
584
- struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
585
- u64 peer_gid)
1290
+int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
5861291 {
5871292 struct smc_connection *conn = &smc->conn;
588
- int local_contact = SMC_FIRST_CONTACT;
1293
+ struct list_head *lgr_list;
5891294 struct smc_link_group *lgr;
590
- unsigned short vlan_id;
5911295 enum smc_lgr_role role;
1296
+ spinlock_t *lgr_lock;
5921297 int rc = 0;
5931298
1299
+ lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
1300
+ &smc_lgr_list.list;
1301
+ lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
1302
+ &smc_lgr_list.lock;
1303
+ ini->first_contact_local = 1;
5941304 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
595
- rc = smc_vlan_by_tcpsk(smc->clcsock, &vlan_id);
596
- if (rc)
597
- return rc;
598
-
599
- if ((role == SMC_CLNT) && srv_first_contact)
1305
+ if (role == SMC_CLNT && ini->first_contact_peer)
6001306 /* create new link group as well */
6011307 goto create;
6021308
6031309 /* determine if an existing link group can be reused */
604
- spin_lock_bh(&smc_lgr_list.lock);
605
- list_for_each_entry(lgr, &smc_lgr_list.list, list) {
1310
+ spin_lock_bh(lgr_lock);
1311
+ list_for_each_entry(lgr, lgr_list, list) {
6061312 write_lock_bh(&lgr->conns_lock);
607
- if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
608
- smcr_lgr_match(lgr, lcl, role)) &&
1313
+ if ((ini->is_smcd ?
1314
+ smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
1315
+ ini->ism_peer_gid[ini->ism_selected]) :
1316
+ smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
6091317 !lgr->sync_err &&
610
- lgr->vlan_id == vlan_id &&
611
- (role == SMC_CLNT ||
612
- lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
1318
+ (ini->smcd_version == SMC_V2 ||
1319
+ lgr->vlan_id == ini->vlan_id) &&
1320
+ (role == SMC_CLNT || ini->is_smcd ||
1321
+ (lgr->conns_num < SMC_RMBS_PER_LGR_MAX &&
1322
+ !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) {
6131323 /* link group found */
614
- local_contact = SMC_REUSE_CONTACT;
1324
+ ini->first_contact_local = 0;
6151325 conn->lgr = lgr;
616
- smc_lgr_register_conn(conn); /* add smc conn to lgr */
617
- if (delayed_work_pending(&lgr->free_work))
618
- cancel_delayed_work(&lgr->free_work);
1326
+ rc = smc_lgr_register_conn(conn, false);
6191327 write_unlock_bh(&lgr->conns_lock);
1328
+ if (!rc && delayed_work_pending(&lgr->free_work))
1329
+ cancel_delayed_work(&lgr->free_work);
6201330 break;
6211331 }
6221332 write_unlock_bh(&lgr->conns_lock);
6231333 }
624
- spin_unlock_bh(&smc_lgr_list.lock);
1334
+ spin_unlock_bh(lgr_lock);
1335
+ if (rc)
1336
+ return rc;
6251337
626
- if (role == SMC_CLNT && !srv_first_contact &&
627
- (local_contact == SMC_FIRST_CONTACT)) {
1338
+ if (role == SMC_CLNT && !ini->first_contact_peer &&
1339
+ ini->first_contact_local) {
6281340 /* Server reuses a link group, but Client wants to start
6291341 * a new one
6301342 * send out_of_sync decline, reason synchr. error
6311343 */
632
- return -ENOLINK;
1344
+ return SMC_CLC_DECL_SYNCERR;
6331345 }
6341346
6351347 create:
636
- if (local_contact == SMC_FIRST_CONTACT) {
637
- rc = smc_lgr_create(smc, is_smcd, smcibdev, ibport,
638
- lcl->id_for_peer, vlan_id, smcd, peer_gid);
1348
+ if (ini->first_contact_local) {
1349
+ rc = smc_lgr_create(smc, ini);
6391350 if (rc)
6401351 goto out;
641
- smc_lgr_register_conn(conn); /* add smc conn to lgr */
1352
+ lgr = conn->lgr;
1353
+ write_lock_bh(&lgr->conns_lock);
1354
+ rc = smc_lgr_register_conn(conn, true);
1355
+ write_unlock_bh(&lgr->conns_lock);
1356
+ if (rc)
1357
+ goto out;
6421358 }
6431359 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
6441360 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
6451361 conn->urg_state = SMC_URG_READ;
646
- if (is_smcd) {
1362
+ init_waitqueue_head(&conn->cdc_pend_tx_wq);
1363
+ INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
1364
+ if (ini->is_smcd) {
6471365 conn->rx_off = sizeof(struct smcd_cdc_msg);
6481366 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
1367
+ } else {
1368
+ conn->rx_off = 0;
6491369 }
6501370 #ifndef KERNEL_HAS_ATOMIC64
6511371 spin_lock_init(&conn->acurs_lock);
6521372 #endif
6531373
6541374 out:
655
- return rc ? rc : local_contact;
1375
+ return rc;
6561376 }
6571377
6581378 /* convert the RMB size into the compressed notation - minimum 16K.
....@@ -686,19 +1406,19 @@
6861406 * buffer size; if not available, return NULL
6871407 */
6881408 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
689
- rwlock_t *lock,
1409
+ struct mutex *lock,
6901410 struct list_head *buf_list)
6911411 {
6921412 struct smc_buf_desc *buf_slot;
6931413
694
- read_lock_bh(lock);
1414
+ mutex_lock(lock);
6951415 list_for_each_entry(buf_slot, buf_list, list) {
6961416 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
697
- read_unlock_bh(lock);
1417
+ mutex_unlock(lock);
6981418 return buf_slot;
6991419 }
7001420 }
701
- read_unlock_bh(lock);
1421
+ mutex_unlock(lock);
7021422 return NULL;
7031423 }
7041424
....@@ -708,15 +1428,138 @@
7081428 */
7091429 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
7101430 {
711
- return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
1431
+ return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
1432
+}
1433
+
1434
+/* map an rmb buf to a link */
1435
+static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1436
+ struct smc_link *lnk)
1437
+{
1438
+ int rc;
1439
+
1440
+ if (buf_desc->is_map_ib[lnk->link_idx])
1441
+ return 0;
1442
+
1443
+ rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], 1, GFP_KERNEL);
1444
+ if (rc)
1445
+ return rc;
1446
+ sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
1447
+ buf_desc->cpu_addr, buf_desc->len);
1448
+
1449
+ /* map sg table to DMA address */
1450
+ rc = smc_ib_buf_map_sg(lnk, buf_desc,
1451
+ is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1452
+ /* SMC protocol depends on mapping to one DMA address only */
1453
+ if (rc != 1) {
1454
+ rc = -EAGAIN;
1455
+ goto free_table;
1456
+ }
1457
+
1458
+ /* create a new memory region for the RMB */
1459
+ if (is_rmb) {
1460
+ rc = smc_ib_get_memory_region(lnk->roce_pd,
1461
+ IB_ACCESS_REMOTE_WRITE |
1462
+ IB_ACCESS_LOCAL_WRITE,
1463
+ buf_desc, lnk->link_idx);
1464
+ if (rc)
1465
+ goto buf_unmap;
1466
+ smc_ib_sync_sg_for_device(lnk, buf_desc, DMA_FROM_DEVICE);
1467
+ }
1468
+ buf_desc->is_map_ib[lnk->link_idx] = true;
1469
+ return 0;
1470
+
1471
+buf_unmap:
1472
+ smc_ib_buf_unmap_sg(lnk, buf_desc,
1473
+ is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1474
+free_table:
1475
+ sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1476
+ return rc;
1477
+}
1478
+
1479
+/* register a new rmb on IB device,
1480
+ * must be called under lgr->llc_conf_mutex lock
1481
+ */
1482
+int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
1483
+{
1484
+ if (list_empty(&link->lgr->list))
1485
+ return -ENOLINK;
1486
+ if (!rmb_desc->is_reg_mr[link->link_idx]) {
1487
+ /* register memory region for new rmb */
1488
+ if (smc_wr_reg_send(link, rmb_desc->mr_rx[link->link_idx])) {
1489
+ rmb_desc->is_reg_err = true;
1490
+ return -EFAULT;
1491
+ }
1492
+ rmb_desc->is_reg_mr[link->link_idx] = true;
1493
+ }
1494
+ return 0;
1495
+}
1496
+
1497
+static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
1498
+ struct list_head *lst, bool is_rmb)
1499
+{
1500
+ struct smc_buf_desc *buf_desc, *bf;
1501
+ int rc = 0;
1502
+
1503
+ mutex_lock(lock);
1504
+ list_for_each_entry_safe(buf_desc, bf, lst, list) {
1505
+ if (!buf_desc->used)
1506
+ continue;
1507
+ rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
1508
+ if (rc)
1509
+ goto out;
1510
+ }
1511
+out:
1512
+ mutex_unlock(lock);
1513
+ return rc;
1514
+}
1515
+
1516
+/* map all used buffers of lgr for a new link */
1517
+int smcr_buf_map_lgr(struct smc_link *lnk)
1518
+{
1519
+ struct smc_link_group *lgr = lnk->lgr;
1520
+ int i, rc = 0;
1521
+
1522
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
1523
+ rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
1524
+ &lgr->rmbs[i], true);
1525
+ if (rc)
1526
+ return rc;
1527
+ rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
1528
+ &lgr->sndbufs[i], false);
1529
+ if (rc)
1530
+ return rc;
1531
+ }
1532
+ return 0;
1533
+}
1534
+
1535
+/* register all used buffers of lgr for a new link,
1536
+ * must be called under lgr->llc_conf_mutex lock
1537
+ */
1538
+int smcr_buf_reg_lgr(struct smc_link *lnk)
1539
+{
1540
+ struct smc_link_group *lgr = lnk->lgr;
1541
+ struct smc_buf_desc *buf_desc, *bf;
1542
+ int i, rc = 0;
1543
+
1544
+ mutex_lock(&lgr->rmbs_lock);
1545
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
1546
+ list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
1547
+ if (!buf_desc->used)
1548
+ continue;
1549
+ rc = smcr_link_reg_rmb(lnk, buf_desc);
1550
+ if (rc)
1551
+ goto out;
1552
+ }
1553
+ }
1554
+out:
1555
+ mutex_unlock(&lgr->rmbs_lock);
1556
+ return rc;
7121557 }
7131558
7141559 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
7151560 bool is_rmb, int bufsize)
7161561 {
7171562 struct smc_buf_desc *buf_desc;
718
- struct smc_link *lnk;
719
- int rc;
7201563
7211564 /* try to alloc a new buffer */
7221565 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
....@@ -733,41 +1576,36 @@
7331576 return ERR_PTR(-EAGAIN);
7341577 }
7351578 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
736
-
737
- /* build the sg table from the pages */
738
- lnk = &lgr->lnk[SMC_SINGLE_LINK];
739
- rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
740
- GFP_KERNEL);
741
- if (rc) {
742
- smc_buf_free(lgr, is_rmb, buf_desc);
743
- return ERR_PTR(rc);
744
- }
745
- sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
746
- buf_desc->cpu_addr, bufsize);
747
-
748
- /* map sg table to DMA address */
749
- rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
750
- is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
751
- /* SMC protocol depends on mapping to one DMA address only */
752
- if (rc != 1) {
753
- smc_buf_free(lgr, is_rmb, buf_desc);
754
- return ERR_PTR(-EAGAIN);
755
- }
756
-
757
- /* create a new memory region for the RMB */
758
- if (is_rmb) {
759
- rc = smc_ib_get_memory_region(lnk->roce_pd,
760
- IB_ACCESS_REMOTE_WRITE |
761
- IB_ACCESS_LOCAL_WRITE,
762
- buf_desc);
763
- if (rc) {
764
- smc_buf_free(lgr, is_rmb, buf_desc);
765
- return ERR_PTR(rc);
766
- }
767
- }
768
-
7691579 buf_desc->len = bufsize;
7701580 return buf_desc;
1581
+}
1582
+
1583
+/* map buf_desc on all usable links,
1584
+ * unused buffers stay mapped as long as the link is up
1585
+ */
1586
+static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
1587
+ struct smc_buf_desc *buf_desc, bool is_rmb)
1588
+{
1589
+ int i, rc = 0, cnt = 0;
1590
+
1591
+ /* protect against parallel link reconfiguration */
1592
+ mutex_lock(&lgr->llc_conf_mutex);
1593
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1594
+ struct smc_link *lnk = &lgr->lnk[i];
1595
+
1596
+ if (!smc_link_usable(lnk))
1597
+ continue;
1598
+ if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
1599
+ rc = -ENOMEM;
1600
+ goto out;
1601
+ }
1602
+ cnt++;
1603
+ }
1604
+out:
1605
+ mutex_unlock(&lgr->llc_conf_mutex);
1606
+ if (!rc && !cnt)
1607
+ rc = -EINVAL;
1608
+ return rc;
7711609 }
7721610
7731611 #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
....@@ -789,7 +1627,11 @@
7891627 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
7901628 if (rc) {
7911629 kfree(buf_desc);
792
- return ERR_PTR(-EAGAIN);
1630
+ if (rc == -ENOMEM)
1631
+ return ERR_PTR(-EAGAIN);
1632
+ if (rc == -ENOSPC)
1633
+ return ERR_PTR(-ENOSPC);
1634
+ return ERR_PTR(-EIO);
7931635 }
7941636 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
7951637 /* CDC header stored in buf. So, pretend it was smaller */
....@@ -814,8 +1656,8 @@
8141656 struct smc_link_group *lgr = conn->lgr;
8151657 struct list_head *buf_list;
8161658 int bufsize, bufsize_short;
1659
+ struct mutex *lock; /* lock buffer list */
8171660 int sk_buf_size;
818
- rwlock_t *lock;
8191661
8201662 if (is_rmb)
8211663 /* use socket recv buffer size (w/o overhead) as start value */
....@@ -856,14 +1698,21 @@
8561698 continue;
8571699
8581700 buf_desc->used = 1;
859
- write_lock_bh(lock);
1701
+ mutex_lock(lock);
8601702 list_add(&buf_desc->list, buf_list);
861
- write_unlock_bh(lock);
1703
+ mutex_unlock(lock);
8621704 break; /* found */
8631705 }
8641706
8651707 if (IS_ERR(buf_desc))
866
- return -ENOMEM;
1708
+ return PTR_ERR(buf_desc);
1709
+
1710
+ if (!is_smcd) {
1711
+ if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
1712
+ smcr_buf_unuse(buf_desc, lgr);
1713
+ return -ENOMEM;
1714
+ }
1715
+ }
8671716
8681717 if (is_rmb) {
8691718 conn->rmb_desc = buf_desc;
....@@ -884,42 +1733,44 @@
8841733
8851734 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
8861735 {
887
- struct smc_link_group *lgr = conn->lgr;
888
-
889
- if (!conn->lgr || conn->lgr->is_smcd)
1736
+ if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
8901737 return;
891
- smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
892
- conn->sndbuf_desc, DMA_TO_DEVICE);
1738
+ smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
8931739 }
8941740
8951741 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
8961742 {
897
- struct smc_link_group *lgr = conn->lgr;
898
-
899
- if (!conn->lgr || conn->lgr->is_smcd)
1743
+ if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
9001744 return;
901
- smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
902
- conn->sndbuf_desc, DMA_TO_DEVICE);
1745
+ smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
9031746 }
9041747
9051748 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
9061749 {
907
- struct smc_link_group *lgr = conn->lgr;
1750
+ int i;
9081751
9091752 if (!conn->lgr || conn->lgr->is_smcd)
9101753 return;
911
- smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
912
- conn->rmb_desc, DMA_FROM_DEVICE);
1754
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1755
+ if (!smc_link_active(&conn->lgr->lnk[i]))
1756
+ continue;
1757
+ smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
1758
+ DMA_FROM_DEVICE);
1759
+ }
9131760 }
9141761
9151762 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
9161763 {
917
- struct smc_link_group *lgr = conn->lgr;
1764
+ int i;
9181765
9191766 if (!conn->lgr || conn->lgr->is_smcd)
9201767 return;
921
- smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
922
- conn->rmb_desc, DMA_FROM_DEVICE);
1768
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1769
+ if (!smc_link_active(&conn->lgr->lnk[i]))
1770
+ continue;
1771
+ smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
1772
+ DMA_FROM_DEVICE);
1773
+ }
9231774 }
9241775
9251776 /* create the send and receive buffer for an SMC socket;
....@@ -938,8 +1789,13 @@
9381789 return rc;
9391790 /* create rmb */
9401791 rc = __smc_buf_create(smc, is_smcd, true);
941
- if (rc)
1792
+ if (rc) {
1793
+ mutex_lock(&smc->conn.lgr->sndbufs_lock);
1794
+ list_del(&smc->conn.sndbuf_desc->list);
1795
+ mutex_unlock(&smc->conn.lgr->sndbufs_lock);
9421796 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1797
+ smc->conn.sndbuf_desc = NULL;
1798
+ }
9431799 return rc;
9441800 }
9451801
....@@ -954,16 +1810,64 @@
9541810 return -ENOSPC;
9551811 }
9561812
957
-/* add a new rtoken from peer */
958
-int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
1813
+static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
1814
+ u32 rkey)
9591815 {
1816
+ int i;
1817
+
1818
+ for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1819
+ if (test_bit(i, lgr->rtokens_used_mask) &&
1820
+ lgr->rtokens[i][lnk_idx].rkey == rkey)
1821
+ return i;
1822
+ }
1823
+ return -ENOENT;
1824
+}
1825
+
1826
+/* set rtoken for a new link to an existing rmb */
1827
+void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
1828
+ __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
1829
+{
1830
+ int rtok_idx;
1831
+
1832
+ rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
1833
+ if (rtok_idx == -ENOENT)
1834
+ return;
1835
+ lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
1836
+ lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
1837
+}
1838
+
1839
+/* set rtoken for a new link whose link_id is given */
1840
+void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
1841
+ __be64 nw_vaddr, __be32 nw_rkey)
1842
+{
1843
+ u64 dma_addr = be64_to_cpu(nw_vaddr);
1844
+ u32 rkey = ntohl(nw_rkey);
1845
+ bool found = false;
1846
+ int link_idx;
1847
+
1848
+ for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
1849
+ if (lgr->lnk[link_idx].link_id == link_id) {
1850
+ found = true;
1851
+ break;
1852
+ }
1853
+ }
1854
+ if (!found)
1855
+ return;
1856
+ lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
1857
+ lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
1858
+}
1859
+
1860
+/* add a new rtoken from peer */
1861
+int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
1862
+{
1863
+ struct smc_link_group *lgr = smc_get_lgr(lnk);
9601864 u64 dma_addr = be64_to_cpu(nw_vaddr);
9611865 u32 rkey = ntohl(nw_rkey);
9621866 int i;
9631867
9641868 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
965
- if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
966
- (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
1869
+ if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
1870
+ lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
9671871 test_bit(i, lgr->rtokens_used_mask)) {
9681872 /* already in list */
9691873 return i;
....@@ -972,23 +1876,25 @@
9721876 i = smc_rmb_reserve_rtoken_idx(lgr);
9731877 if (i < 0)
9741878 return i;
975
- lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
976
- lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
1879
+ lgr->rtokens[i][lnk->link_idx].rkey = rkey;
1880
+ lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
9771881 return i;
9781882 }
9791883
980
-/* delete an rtoken */
981
-int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
1884
+/* delete an rtoken from all links */
1885
+int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
9821886 {
1887
+ struct smc_link_group *lgr = smc_get_lgr(lnk);
9831888 u32 rkey = ntohl(nw_rkey);
984
- int i;
1889
+ int i, j;
9851890
9861891 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
987
- if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
1892
+ if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
9881893 test_bit(i, lgr->rtokens_used_mask)) {
989
- lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
990
- lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
991
-
1894
+ for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
1895
+ lgr->rtokens[i][j].rkey = 0;
1896
+ lgr->rtokens[i][j].dma_addr = 0;
1897
+ }
9921898 clear_bit(i, lgr->rtokens_used_mask);
9931899 return 0;
9941900 }
....@@ -998,36 +1904,72 @@
9981904
9991905 /* save rkey and dma_addr received from peer during clc handshake */
10001906 int smc_rmb_rtoken_handling(struct smc_connection *conn,
1907
+ struct smc_link *lnk,
10011908 struct smc_clc_msg_accept_confirm *clc)
10021909 {
1003
- conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
1004
- clc->rmb_rkey);
1910
+ conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
1911
+ clc->r0.rmb_rkey);
10051912 if (conn->rtoken_idx < 0)
10061913 return conn->rtoken_idx;
10071914 return 0;
10081915 }
10091916
1917
+static void smc_core_going_away(void)
1918
+{
1919
+ struct smc_ib_device *smcibdev;
1920
+ struct smcd_dev *smcd;
1921
+
1922
+ mutex_lock(&smc_ib_devices.mutex);
1923
+ list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1924
+ int i;
1925
+
1926
+ for (i = 0; i < SMC_MAX_PORTS; i++)
1927
+ set_bit(i, smcibdev->ports_going_away);
1928
+ }
1929
+ mutex_unlock(&smc_ib_devices.mutex);
1930
+
1931
+ mutex_lock(&smcd_dev_list.mutex);
1932
+ list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1933
+ smcd->going_away = 1;
1934
+ }
1935
+ mutex_unlock(&smcd_dev_list.mutex);
1936
+}
1937
+
1938
+/* Clean up all SMC link groups */
1939
+static void smc_lgrs_shutdown(void)
1940
+{
1941
+ struct smcd_dev *smcd;
1942
+
1943
+ smc_core_going_away();
1944
+
1945
+ smc_smcr_terminate_all(NULL);
1946
+
1947
+ mutex_lock(&smcd_dev_list.mutex);
1948
+ list_for_each_entry(smcd, &smcd_dev_list.list, list)
1949
+ smc_smcd_terminate_all(smcd);
1950
+ mutex_unlock(&smcd_dev_list.mutex);
1951
+}
1952
+
1953
+static int smc_core_reboot_event(struct notifier_block *this,
1954
+ unsigned long event, void *ptr)
1955
+{
1956
+ smc_lgrs_shutdown();
1957
+ smc_ib_unregister_client();
1958
+ return 0;
1959
+}
1960
+
1961
+static struct notifier_block smc_reboot_notifier = {
1962
+ .notifier_call = smc_core_reboot_event,
1963
+};
1964
+
1965
+int __init smc_core_init(void)
1966
+{
1967
+ return register_reboot_notifier(&smc_reboot_notifier);
1968
+}
1969
+
10101970 /* Called (from smc_exit) when module is removed */
10111971 void smc_core_exit(void)
10121972 {
1013
- struct smc_link_group *lgr, *lg;
1014
- LIST_HEAD(lgr_freeing_list);
1015
-
1016
- spin_lock_bh(&smc_lgr_list.lock);
1017
- if (!list_empty(&smc_lgr_list.list))
1018
- list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
1019
- spin_unlock_bh(&smc_lgr_list.lock);
1020
- list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
1021
- list_del_init(&lgr->list);
1022
- if (!lgr->is_smcd) {
1023
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
1024
-
1025
- if (lnk->state == SMC_LNK_ACTIVE)
1026
- smc_llc_send_delete_link(lnk, SMC_LLC_REQ,
1027
- false);
1028
- smc_llc_link_inactive(lnk);
1029
- }
1030
- cancel_delayed_work_sync(&lgr->free_work);
1031
- smc_lgr_free(lgr); /* free link group */
1032
- }
1973
+ unregister_reboot_notifier(&smc_reboot_notifier);
1974
+ smc_lgrs_shutdown();
10331975 }