forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/net/smc/smc_core.c
....@@ -13,6 +13,9 @@
1313 #include <linux/if_vlan.h>
1414 #include <linux/random.h>
1515 #include <linux/workqueue.h>
16
+#include <linux/wait.h>
17
+#include <linux/reboot.h>
18
+#include <linux/mutex.h>
1619 #include <net/tcp.h>
1720 #include <net/sock.h>
1821 #include <rdma/ib_verbs.h>
....@@ -31,7 +34,6 @@
3134 #define SMC_LGR_NUM_INCR 256
3235 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
3336 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
34
-#define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
3537
3638 static struct smc_lgr_list smc_lgr_list = { /* established link groups */
3739 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
....@@ -39,8 +41,27 @@
3941 .num = 0,
4042 };
4143
44
+static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
45
+static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
46
+
4247 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
4348 struct smc_buf_desc *buf_desc);
49
+static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
50
+
51
+static void smc_link_down_work(struct work_struct *work);
52
+
53
+/* return head of link group list and its lock for a given link group */
54
+static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
55
+ spinlock_t **lgr_lock)
56
+{
57
+ if (lgr->is_smcd) {
58
+ *lgr_lock = &lgr->smcd->lgr_lock;
59
+ return &lgr->smcd->lgr_list;
60
+ }
61
+
62
+ *lgr_lock = &smc_lgr_list.lock;
63
+ return &smc_lgr_list.list;
64
+}
4465
4566 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
4667 {
....@@ -48,14 +69,12 @@
4869 * creation. For client use a somewhat higher removal delay time,
4970 * otherwise there is a risk of out-of-sync link groups.
5071 */
51
- mod_delayed_work(system_wq, &lgr->free_work,
52
- (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
53
- SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV);
54
-}
55
-
56
-void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
57
-{
58
- mod_delayed_work(system_wq, &lgr->free_work, SMC_LGR_FREE_DELAY_FAST);
72
+ if (!lgr->freeing) {
73
+ mod_delayed_work(system_wq, &lgr->free_work,
74
+ (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
75
+ SMC_LGR_FREE_DELAY_CLNT :
76
+ SMC_LGR_FREE_DELAY_SERV);
77
+ }
5978 }
6079
6180 /* Register connection's alert token in our lookup structure.
....@@ -85,16 +104,60 @@
85104 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
86105 }
87106
107
+/* assign an SMC-R link to the connection */
108
+static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
109
+{
110
+ enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
111
+ SMC_LNK_ACTIVE;
112
+ int i, j;
113
+
114
+ /* do link balancing */
115
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
116
+ struct smc_link *lnk = &conn->lgr->lnk[i];
117
+
118
+ if (lnk->state != expected || lnk->link_is_asym)
119
+ continue;
120
+ if (conn->lgr->role == SMC_CLNT) {
121
+ conn->lnk = lnk; /* temporary, SMC server assigns link*/
122
+ break;
123
+ }
124
+ if (conn->lgr->conns_num % 2) {
125
+ for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
126
+ struct smc_link *lnk2;
127
+
128
+ lnk2 = &conn->lgr->lnk[j];
129
+ if (lnk2->state == expected &&
130
+ !lnk2->link_is_asym) {
131
+ conn->lnk = lnk2;
132
+ break;
133
+ }
134
+ }
135
+ }
136
+ if (!conn->lnk)
137
+ conn->lnk = lnk;
138
+ break;
139
+ }
140
+ if (!conn->lnk)
141
+ return SMC_CLC_DECL_NOACTLINK;
142
+ return 0;
143
+}
144
+
88145 /* Register connection in link group by assigning an alert token
89146 * registered in a search tree.
90147 * Requires @conns_lock
91148 * Note that '0' is a reserved value and not assigned.
92149 */
93
-static void smc_lgr_register_conn(struct smc_connection *conn)
150
+static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
94151 {
95152 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
96153 static atomic_t nexttoken = ATOMIC_INIT(0);
154
+ int rc;
97155
156
+ if (!conn->lgr->is_smcd) {
157
+ rc = smcr_lgr_conn_assign_link(conn, first);
158
+ if (rc)
159
+ return rc;
160
+ }
98161 /* find a new alert_token_local value not yet used by some connection
99162 * in this link group
100163 */
....@@ -106,6 +169,7 @@
106169 }
107170 smc_lgr_add_alert_token(conn);
108171 conn->lgr->conns_num++;
172
+ return 0;
109173 }
110174
111175 /* Unregister connection and reset the alert token of the given connection<
....@@ -118,7 +182,6 @@
118182 rb_erase(&conn->alert_node, &lgr->conns_all);
119183 lgr->conns_num--;
120184 conn->alert_token_local = 0;
121
- conn->lgr = NULL;
122185 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
123186 }
124187
....@@ -135,143 +198,152 @@
135198 __smc_lgr_unregister_conn(conn);
136199 }
137200 write_unlock_bh(&lgr->conns_lock);
201
+ conn->lgr = NULL;
138202 }
139203
140
-/* Send delete link, either as client to request the initiation
141
- * of the DELETE LINK sequence from server; or as server to
142
- * initiate the delete processing. See smc_llc_rx_delete_link().
143
- */
144
-static int smc_link_send_delete(struct smc_link *lnk)
204
+void smc_lgr_cleanup_early(struct smc_connection *conn)
145205 {
146
- if (lnk->state == SMC_LNK_ACTIVE &&
147
- !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) {
148
- smc_llc_link_deleting(lnk);
149
- return 0;
150
- }
151
- return -ENOTCONN;
206
+ struct smc_link_group *lgr = conn->lgr;
207
+ spinlock_t *lgr_lock;
208
+
209
+ if (!lgr)
210
+ return;
211
+
212
+ smc_conn_free(conn);
213
+ smc_lgr_list_head(lgr, &lgr_lock);
214
+ spin_lock_bh(lgr_lock);
215
+ /* do not use this link group for new connections */
216
+ if (!list_empty(&lgr->list))
217
+ list_del_init(&lgr->list);
218
+ spin_unlock_bh(lgr_lock);
219
+ __smc_lgr_terminate(lgr, true);
152220 }
221
+
222
+static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
223
+{
224
+ int i;
225
+
226
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
227
+ struct smc_link *lnk = &lgr->lnk[i];
228
+
229
+ if (smc_link_sendable(lnk))
230
+ lnk->state = SMC_LNK_INACTIVE;
231
+ }
232
+ wake_up_all(&lgr->llc_msg_waiter);
233
+ wake_up_all(&lgr->llc_flow_waiter);
234
+}
235
+
236
+static void smc_lgr_free(struct smc_link_group *lgr);
153237
154238 static void smc_lgr_free_work(struct work_struct *work)
155239 {
156240 struct smc_link_group *lgr = container_of(to_delayed_work(work),
157241 struct smc_link_group,
158242 free_work);
243
+ spinlock_t *lgr_lock;
159244 bool conns;
160245
161
- spin_lock_bh(&smc_lgr_list.lock);
162
- if (list_empty(&lgr->list))
163
- goto free;
246
+ smc_lgr_list_head(lgr, &lgr_lock);
247
+ spin_lock_bh(lgr_lock);
248
+ if (lgr->freeing) {
249
+ spin_unlock_bh(lgr_lock);
250
+ return;
251
+ }
164252 read_lock_bh(&lgr->conns_lock);
165253 conns = RB_EMPTY_ROOT(&lgr->conns_all);
166254 read_unlock_bh(&lgr->conns_lock);
167255 if (!conns) { /* number of lgr connections is no longer zero */
168
- spin_unlock_bh(&smc_lgr_list.lock);
256
+ spin_unlock_bh(lgr_lock);
169257 return;
170258 }
171259 list_del_init(&lgr->list); /* remove from smc_lgr_list */
172
-free:
173
- spin_unlock_bh(&smc_lgr_list.lock);
260
+ lgr->freeing = 1; /* this instance does the freeing, no new schedule */
261
+ spin_unlock_bh(lgr_lock);
262
+ cancel_delayed_work(&lgr->free_work);
174263
175
- if (!lgr->is_smcd && !lgr->terminating) {
176
- /* try to send del link msg, on error free lgr immediately */
177
- if (!smc_link_send_delete(&lgr->lnk[SMC_SINGLE_LINK])) {
178
- /* reschedule in case we never receive a response */
179
- smc_lgr_schedule_free_work(lgr);
180
- return;
181
- }
182
- }
183
-
184
- if (!delayed_work_pending(&lgr->free_work)) {
185
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
186
-
187
- if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
188
- smc_llc_link_inactive(lnk);
189
- smc_lgr_free(lgr);
190
- }
264
+ if (!lgr->is_smcd && !lgr->terminating)
265
+ smc_llc_send_link_delete_all(lgr, true,
266
+ SMC_LLC_DEL_PROG_INIT_TERM);
267
+ if (lgr->is_smcd && !lgr->terminating)
268
+ smc_ism_signal_shutdown(lgr);
269
+ if (!lgr->is_smcd)
270
+ smcr_lgr_link_deactivate_all(lgr);
271
+ smc_lgr_free(lgr);
191272 }
192273
193
-/* create a new SMC link group */
194
-static int smc_lgr_create(struct smc_sock *smc, bool is_smcd,
195
- struct smc_ib_device *smcibdev, u8 ibport,
196
- char *peer_systemid, unsigned short vlan_id,
197
- struct smcd_dev *smcismdev, u64 peer_gid)
274
+static void smc_lgr_terminate_work(struct work_struct *work)
198275 {
199
- struct smc_link_group *lgr;
200
- struct smc_link *lnk;
201
- u8 rndvec[3];
202
- int rc = 0;
276
+ struct smc_link_group *lgr = container_of(work, struct smc_link_group,
277
+ terminate_work);
278
+
279
+ __smc_lgr_terminate(lgr, true);
280
+}
281
+
282
+/* return next unique link id for the lgr */
283
+static u8 smcr_next_link_id(struct smc_link_group *lgr)
284
+{
285
+ u8 link_id;
203286 int i;
204287
205
- if (is_smcd && vlan_id) {
206
- rc = smc_ism_get_vlan(smcismdev, vlan_id);
288
+ while (1) {
289
+again:
290
+ link_id = ++lgr->next_link_id;
291
+ if (!link_id) /* skip zero as link_id */
292
+ link_id = ++lgr->next_link_id;
293
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
294
+ if (smc_link_usable(&lgr->lnk[i]) &&
295
+ lgr->lnk[i].link_id == link_id)
296
+ goto again;
297
+ }
298
+ break;
299
+ }
300
+ return link_id;
301
+}
302
+
303
+int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
304
+ u8 link_idx, struct smc_init_info *ini)
305
+{
306
+ u8 rndvec[3];
307
+ int rc;
308
+
309
+ get_device(&ini->ib_dev->ibdev->dev);
310
+ atomic_inc(&ini->ib_dev->lnk_cnt);
311
+ lnk->link_id = smcr_next_link_id(lgr);
312
+ lnk->lgr = lgr;
313
+ lnk->link_idx = link_idx;
314
+ lnk->smcibdev = ini->ib_dev;
315
+ lnk->ibport = ini->ib_port;
316
+ lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
317
+ smc_llc_link_set_uid(lnk);
318
+ INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
319
+ if (!ini->ib_dev->initialized) {
320
+ rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev);
207321 if (rc)
208322 goto out;
209323 }
210
-
211
- lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
212
- if (!lgr) {
213
- rc = -ENOMEM;
324
+ get_random_bytes(rndvec, sizeof(rndvec));
325
+ lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
326
+ (rndvec[2] << 16);
327
+ rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
328
+ ini->vlan_id, lnk->gid, &lnk->sgid_index);
329
+ if (rc)
214330 goto out;
215
- }
216
- lgr->is_smcd = is_smcd;
217
- lgr->sync_err = 0;
218
- lgr->vlan_id = vlan_id;
219
- rwlock_init(&lgr->sndbufs_lock);
220
- rwlock_init(&lgr->rmbs_lock);
221
- rwlock_init(&lgr->conns_lock);
222
- for (i = 0; i < SMC_RMBE_SIZES; i++) {
223
- INIT_LIST_HEAD(&lgr->sndbufs[i]);
224
- INIT_LIST_HEAD(&lgr->rmbs[i]);
225
- }
226
- smc_lgr_list.num += SMC_LGR_NUM_INCR;
227
- memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
228
- INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
229
- lgr->conns_all = RB_ROOT;
230
- if (is_smcd) {
231
- /* SMC-D specific settings */
232
- lgr->peer_gid = peer_gid;
233
- lgr->smcd = smcismdev;
234
- } else {
235
- /* SMC-R specific settings */
236
- lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
237
- memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
238
-
239
- lnk = &lgr->lnk[SMC_SINGLE_LINK];
240
- /* initialize link */
241
- lnk->state = SMC_LNK_ACTIVATING;
242
- lnk->link_id = SMC_SINGLE_LINK;
243
- lnk->smcibdev = smcibdev;
244
- lnk->ibport = ibport;
245
- lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
246
- if (!smcibdev->initialized)
247
- smc_ib_setup_per_ibdev(smcibdev);
248
- get_random_bytes(rndvec, sizeof(rndvec));
249
- lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
250
- (rndvec[2] << 16);
251
- rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
252
- vlan_id, lnk->gid, &lnk->sgid_index);
253
- if (rc)
254
- goto free_lgr;
255
- rc = smc_llc_link_init(lnk);
256
- if (rc)
257
- goto free_lgr;
258
- rc = smc_wr_alloc_link_mem(lnk);
259
- if (rc)
260
- goto clear_llc_lnk;
261
- rc = smc_ib_create_protection_domain(lnk);
262
- if (rc)
263
- goto free_link_mem;
264
- rc = smc_ib_create_queue_pair(lnk);
265
- if (rc)
266
- goto dealloc_pd;
267
- rc = smc_wr_create_link(lnk);
268
- if (rc)
269
- goto destroy_qp;
270
- }
271
- smc->conn.lgr = lgr;
272
- spin_lock_bh(&smc_lgr_list.lock);
273
- list_add(&lgr->list, &smc_lgr_list.list);
274
- spin_unlock_bh(&smc_lgr_list.lock);
331
+ rc = smc_llc_link_init(lnk);
332
+ if (rc)
333
+ goto out;
334
+ rc = smc_wr_alloc_link_mem(lnk);
335
+ if (rc)
336
+ goto clear_llc_lnk;
337
+ rc = smc_ib_create_protection_domain(lnk);
338
+ if (rc)
339
+ goto free_link_mem;
340
+ rc = smc_ib_create_queue_pair(lnk);
341
+ if (rc)
342
+ goto dealloc_pd;
343
+ rc = smc_wr_create_link(lnk);
344
+ if (rc)
345
+ goto destroy_qp;
346
+ lnk->state = SMC_LNK_ACTIVATING;
275347 return 0;
276348
277349 destroy_qp:
....@@ -281,11 +353,285 @@
281353 free_link_mem:
282354 smc_wr_free_link_mem(lnk);
283355 clear_llc_lnk:
284
- smc_llc_link_clear(lnk);
356
+ smc_llc_link_clear(lnk, false);
357
+out:
358
+ put_device(&ini->ib_dev->ibdev->dev);
359
+ memset(lnk, 0, sizeof(struct smc_link));
360
+ lnk->state = SMC_LNK_UNUSED;
361
+ if (!atomic_dec_return(&ini->ib_dev->lnk_cnt))
362
+ wake_up(&ini->ib_dev->lnks_deleted);
363
+ return rc;
364
+}
365
+
366
+/* create a new SMC link group */
367
+static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
368
+{
369
+ struct smc_link_group *lgr;
370
+ struct list_head *lgr_list;
371
+ struct smc_link *lnk;
372
+ spinlock_t *lgr_lock;
373
+ u8 link_idx;
374
+ int rc = 0;
375
+ int i;
376
+
377
+ if (ini->is_smcd && ini->vlan_id) {
378
+ if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
379
+ ini->vlan_id)) {
380
+ rc = SMC_CLC_DECL_ISMVLANERR;
381
+ goto out;
382
+ }
383
+ }
384
+
385
+ lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
386
+ if (!lgr) {
387
+ rc = SMC_CLC_DECL_MEM;
388
+ goto ism_put_vlan;
389
+ }
390
+ lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
391
+ SMC_LGR_ID_SIZE, &lgr->id);
392
+ if (!lgr->tx_wq) {
393
+ rc = -ENOMEM;
394
+ goto free_lgr;
395
+ }
396
+ lgr->is_smcd = ini->is_smcd;
397
+ lgr->sync_err = 0;
398
+ lgr->terminating = 0;
399
+ lgr->freeing = 0;
400
+ lgr->vlan_id = ini->vlan_id;
401
+ mutex_init(&lgr->sndbufs_lock);
402
+ mutex_init(&lgr->rmbs_lock);
403
+ rwlock_init(&lgr->conns_lock);
404
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
405
+ INIT_LIST_HEAD(&lgr->sndbufs[i]);
406
+ INIT_LIST_HEAD(&lgr->rmbs[i]);
407
+ }
408
+ lgr->next_link_id = 0;
409
+ smc_lgr_list.num += SMC_LGR_NUM_INCR;
410
+ memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
411
+ INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
412
+ INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
413
+ lgr->conns_all = RB_ROOT;
414
+ if (ini->is_smcd) {
415
+ /* SMC-D specific settings */
416
+ get_device(&ini->ism_dev[ini->ism_selected]->dev);
417
+ lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected];
418
+ lgr->smcd = ini->ism_dev[ini->ism_selected];
419
+ lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
420
+ lgr_lock = &lgr->smcd->lgr_lock;
421
+ lgr->smc_version = ini->smcd_version;
422
+ lgr->peer_shutdown = 0;
423
+ atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
424
+ } else {
425
+ /* SMC-R specific settings */
426
+ lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
427
+ memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
428
+ SMC_SYSTEMID_LEN);
429
+ memcpy(lgr->pnet_id, ini->ib_dev->pnetid[ini->ib_port - 1],
430
+ SMC_MAX_PNETID_LEN);
431
+ smc_llc_lgr_init(lgr, smc);
432
+
433
+ link_idx = SMC_SINGLE_LINK;
434
+ lnk = &lgr->lnk[link_idx];
435
+ rc = smcr_link_init(lgr, lnk, link_idx, ini);
436
+ if (rc)
437
+ goto free_wq;
438
+ lgr_list = &smc_lgr_list.list;
439
+ lgr_lock = &smc_lgr_list.lock;
440
+ atomic_inc(&lgr_cnt);
441
+ }
442
+ smc->conn.lgr = lgr;
443
+ spin_lock_bh(lgr_lock);
444
+ list_add_tail(&lgr->list, lgr_list);
445
+ spin_unlock_bh(lgr_lock);
446
+ return 0;
447
+
448
+free_wq:
449
+ destroy_workqueue(lgr->tx_wq);
285450 free_lgr:
286451 kfree(lgr);
452
+ism_put_vlan:
453
+ if (ini->is_smcd && ini->vlan_id)
454
+ smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
287455 out:
456
+ if (rc < 0) {
457
+ if (rc == -ENOMEM)
458
+ rc = SMC_CLC_DECL_MEM;
459
+ else
460
+ rc = SMC_CLC_DECL_INTERR;
461
+ }
288462 return rc;
463
+}
464
+
465
+static int smc_write_space(struct smc_connection *conn)
466
+{
467
+ int buffer_len = conn->peer_rmbe_size;
468
+ union smc_host_cursor prod;
469
+ union smc_host_cursor cons;
470
+ int space;
471
+
472
+ smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
473
+ smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
474
+ /* determine rx_buf space */
475
+ space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
476
+ return space;
477
+}
478
+
479
+static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
480
+ struct smc_wr_buf *wr_buf)
481
+{
482
+ struct smc_connection *conn = &smc->conn;
483
+ union smc_host_cursor cons, fin;
484
+ int rc = 0;
485
+ int diff;
486
+
487
+ smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
488
+ smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
489
+ /* set prod cursor to old state, enforce tx_rdma_writes() */
490
+ smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
491
+ smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
492
+
493
+ if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
494
+ /* cons cursor advanced more than fin, and prod was set
495
+ * fin above, so now prod is smaller than cons. Fix that.
496
+ */
497
+ diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
498
+ smc_curs_add(conn->sndbuf_desc->len,
499
+ &conn->tx_curs_sent, diff);
500
+ smc_curs_add(conn->sndbuf_desc->len,
501
+ &conn->tx_curs_fin, diff);
502
+
503
+ smp_mb__before_atomic();
504
+ atomic_add(diff, &conn->sndbuf_space);
505
+ smp_mb__after_atomic();
506
+
507
+ smc_curs_add(conn->peer_rmbe_size,
508
+ &conn->local_tx_ctrl.prod, diff);
509
+ smc_curs_add(conn->peer_rmbe_size,
510
+ &conn->local_tx_ctrl_fin, diff);
511
+ }
512
+ /* recalculate, value is used by tx_rdma_writes() */
513
+ atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
514
+
515
+ if (smc->sk.sk_state != SMC_INIT &&
516
+ smc->sk.sk_state != SMC_CLOSED) {
517
+ rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
518
+ if (!rc) {
519
+ queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
520
+ smc->sk.sk_data_ready(&smc->sk);
521
+ }
522
+ } else {
523
+ smc_wr_tx_put_slot(conn->lnk,
524
+ (struct smc_wr_tx_pend_priv *)pend);
525
+ }
526
+ return rc;
527
+}
528
+
529
+struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
530
+ struct smc_link *from_lnk, bool is_dev_err)
531
+{
532
+ struct smc_link *to_lnk = NULL;
533
+ struct smc_cdc_tx_pend *pend;
534
+ struct smc_connection *conn;
535
+ struct smc_wr_buf *wr_buf;
536
+ struct smc_sock *smc;
537
+ struct rb_node *node;
538
+ int i, rc = 0;
539
+
540
+ /* link is inactive, wake up tx waiters */
541
+ smc_wr_wakeup_tx_wait(from_lnk);
542
+
543
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
544
+ if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
545
+ continue;
546
+ if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
547
+ from_lnk->ibport == lgr->lnk[i].ibport) {
548
+ continue;
549
+ }
550
+ to_lnk = &lgr->lnk[i];
551
+ break;
552
+ }
553
+ if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
554
+ smc_lgr_terminate_sched(lgr);
555
+ return NULL;
556
+ }
557
+again:
558
+ read_lock_bh(&lgr->conns_lock);
559
+ for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
560
+ conn = rb_entry(node, struct smc_connection, alert_node);
561
+ if (conn->lnk != from_lnk)
562
+ continue;
563
+ smc = container_of(conn, struct smc_sock, conn);
564
+ /* conn->lnk not yet set in SMC_INIT state */
565
+ if (smc->sk.sk_state == SMC_INIT)
566
+ continue;
567
+ if (smc->sk.sk_state == SMC_CLOSED ||
568
+ smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
569
+ smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
570
+ smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
571
+ smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
572
+ smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
573
+ smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
574
+ smc->sk.sk_state == SMC_PEERABORTWAIT ||
575
+ smc->sk.sk_state == SMC_PROCESSABORT) {
576
+ spin_lock_bh(&conn->send_lock);
577
+ conn->lnk = to_lnk;
578
+ spin_unlock_bh(&conn->send_lock);
579
+ continue;
580
+ }
581
+ sock_hold(&smc->sk);
582
+ read_unlock_bh(&lgr->conns_lock);
583
+ /* pre-fetch buffer outside of send_lock, might sleep */
584
+ rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
585
+ if (rc)
586
+ goto err_out;
587
+ /* avoid race with smcr_tx_sndbuf_nonempty() */
588
+ spin_lock_bh(&conn->send_lock);
589
+ conn->lnk = to_lnk;
590
+ rc = smc_switch_cursor(smc, pend, wr_buf);
591
+ spin_unlock_bh(&conn->send_lock);
592
+ sock_put(&smc->sk);
593
+ if (rc)
594
+ goto err_out;
595
+ goto again;
596
+ }
597
+ read_unlock_bh(&lgr->conns_lock);
598
+ smc_wr_tx_link_put(to_lnk);
599
+ return to_lnk;
600
+
601
+err_out:
602
+ smcr_link_down_cond_sched(to_lnk);
603
+ smc_wr_tx_link_put(to_lnk);
604
+ return NULL;
605
+}
606
+
607
+static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
608
+ struct smc_link_group *lgr)
609
+{
610
+ int rc;
611
+
612
+ if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
613
+ /* unregister rmb with peer */
614
+ rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
615
+ if (!rc) {
616
+ /* protect against smc_llc_cli_rkey_exchange() */
617
+ mutex_lock(&lgr->llc_conf_mutex);
618
+ smc_llc_do_delete_rkey(lgr, rmb_desc);
619
+ rmb_desc->is_conf_rkey = false;
620
+ mutex_unlock(&lgr->llc_conf_mutex);
621
+ smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
622
+ }
623
+ }
624
+
625
+ if (rmb_desc->is_reg_err) {
626
+ /* buf registration failed, reuse not possible */
627
+ mutex_lock(&lgr->rmbs_lock);
628
+ list_del(&rmb_desc->list);
629
+ mutex_unlock(&lgr->rmbs_lock);
630
+
631
+ smc_buf_free(lgr, true, rmb_desc);
632
+ } else {
633
+ rmb_desc->used = 0;
634
+ }
289635 }
290636
291637 static void smc_buf_unuse(struct smc_connection *conn,
....@@ -293,19 +639,10 @@
293639 {
294640 if (conn->sndbuf_desc)
295641 conn->sndbuf_desc->used = 0;
296
- if (conn->rmb_desc) {
297
- if (!conn->rmb_desc->regerr) {
298
- conn->rmb_desc->reused = 1;
299
- conn->rmb_desc->used = 0;
300
- } else {
301
- /* buf registration failed, reuse not possible */
302
- write_lock_bh(&lgr->rmbs_lock);
303
- list_del(&conn->rmb_desc->list);
304
- write_unlock_bh(&lgr->rmbs_lock);
305
-
306
- smc_buf_free(lgr, true, conn->rmb_desc);
307
- }
308
- }
642
+ if (conn->rmb_desc && lgr->is_smcd)
643
+ conn->rmb_desc->used = 0;
644
+ else if (conn->rmb_desc)
645
+ smcr_buf_unuse(conn->rmb_desc, lgr);
309646 }
310647
311648 /* remove a finished connection from its link group */
....@@ -316,45 +653,108 @@
316653 if (!lgr)
317654 return;
318655 if (lgr->is_smcd) {
319
- smc_ism_unset_conn(conn);
656
+ if (!list_empty(&lgr->list))
657
+ smc_ism_unset_conn(conn);
320658 tasklet_kill(&conn->rx_tsklet);
321659 } else {
322
- smc_cdc_tx_dismiss_slots(conn);
660
+ smc_cdc_wait_pend_tx_wr(conn);
661
+ if (current_work() != &conn->abort_work)
662
+ cancel_work_sync(&conn->abort_work);
323663 }
324
- smc_lgr_unregister_conn(conn); /* unsets conn->lgr */
325
- smc_buf_unuse(conn, lgr); /* allow buffer reuse */
664
+ if (!list_empty(&lgr->list)) {
665
+ smc_buf_unuse(conn, lgr); /* allow buffer reuse */
666
+ smc_lgr_unregister_conn(conn);
667
+ }
326668
327669 if (!lgr->conns_num)
328670 smc_lgr_schedule_free_work(lgr);
329671 }
330672
331
-static void smc_link_clear(struct smc_link *lnk)
673
+/* unregister a link from a buf_desc */
674
+static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
675
+ struct smc_link *lnk)
332676 {
677
+ if (is_rmb)
678
+ buf_desc->is_reg_mr[lnk->link_idx] = false;
679
+ if (!buf_desc->is_map_ib[lnk->link_idx])
680
+ return;
681
+ if (is_rmb) {
682
+ if (buf_desc->mr_rx[lnk->link_idx]) {
683
+ smc_ib_put_memory_region(
684
+ buf_desc->mr_rx[lnk->link_idx]);
685
+ buf_desc->mr_rx[lnk->link_idx] = NULL;
686
+ }
687
+ smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
688
+ } else {
689
+ smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
690
+ }
691
+ sg_free_table(&buf_desc->sgt[lnk->link_idx]);
692
+ buf_desc->is_map_ib[lnk->link_idx] = false;
693
+}
694
+
695
+/* unmap all buffers of lgr for a deleted link */
696
+static void smcr_buf_unmap_lgr(struct smc_link *lnk)
697
+{
698
+ struct smc_link_group *lgr = lnk->lgr;
699
+ struct smc_buf_desc *buf_desc, *bf;
700
+ int i;
701
+
702
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
703
+ mutex_lock(&lgr->rmbs_lock);
704
+ list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
705
+ smcr_buf_unmap_link(buf_desc, true, lnk);
706
+ mutex_unlock(&lgr->rmbs_lock);
707
+ mutex_lock(&lgr->sndbufs_lock);
708
+ list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
709
+ list)
710
+ smcr_buf_unmap_link(buf_desc, false, lnk);
711
+ mutex_unlock(&lgr->sndbufs_lock);
712
+ }
713
+}
714
+
715
+static void smcr_rtoken_clear_link(struct smc_link *lnk)
716
+{
717
+ struct smc_link_group *lgr = lnk->lgr;
718
+ int i;
719
+
720
+ for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
721
+ lgr->rtokens[i][lnk->link_idx].rkey = 0;
722
+ lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
723
+ }
724
+}
725
+
726
+/* must be called under lgr->llc_conf_mutex lock */
727
+void smcr_link_clear(struct smc_link *lnk, bool log)
728
+{
729
+ struct smc_ib_device *smcibdev;
730
+
731
+ if (!lnk->lgr || lnk->state == SMC_LNK_UNUSED)
732
+ return;
333733 lnk->peer_qpn = 0;
334
- smc_llc_link_clear(lnk);
335
- smc_ib_modify_qp_reset(lnk);
734
+ smc_llc_link_clear(lnk, log);
735
+ smcr_buf_unmap_lgr(lnk);
736
+ smcr_rtoken_clear_link(lnk);
737
+ smc_ib_modify_qp_error(lnk);
336738 smc_wr_free_link(lnk);
337739 smc_ib_destroy_queue_pair(lnk);
338740 smc_ib_dealloc_protection_domain(lnk);
339741 smc_wr_free_link_mem(lnk);
742
+ put_device(&lnk->smcibdev->ibdev->dev);
743
+ smcibdev = lnk->smcibdev;
744
+ memset(lnk, 0, sizeof(struct smc_link));
745
+ lnk->state = SMC_LNK_UNUSED;
746
+ if (!atomic_dec_return(&smcibdev->lnk_cnt))
747
+ wake_up(&smcibdev->lnks_deleted);
340748 }
341749
342750 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
343751 struct smc_buf_desc *buf_desc)
344752 {
345
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
753
+ int i;
346754
347
- if (is_rmb) {
348
- if (buf_desc->mr_rx[SMC_SINGLE_LINK])
349
- smc_ib_put_memory_region(
350
- buf_desc->mr_rx[SMC_SINGLE_LINK]);
351
- smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
352
- DMA_FROM_DEVICE);
353
- } else {
354
- smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
355
- DMA_TO_DEVICE);
356
- }
357
- sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
755
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
756
+ smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
757
+
358758 if (buf_desc->pages)
359759 __free_pages(buf_desc->pages, buf_desc->order);
360760 kfree(buf_desc);
....@@ -410,27 +810,100 @@
410810 }
411811
412812 /* remove a link group */
413
-void smc_lgr_free(struct smc_link_group *lgr)
813
+static void smc_lgr_free(struct smc_link_group *lgr)
414814 {
815
+ int i;
816
+
817
+ if (!lgr->is_smcd) {
818
+ mutex_lock(&lgr->llc_conf_mutex);
819
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
820
+ if (lgr->lnk[i].state != SMC_LNK_UNUSED)
821
+ smcr_link_clear(&lgr->lnk[i], false);
822
+ }
823
+ mutex_unlock(&lgr->llc_conf_mutex);
824
+ smc_llc_lgr_clear(lgr);
825
+ }
826
+
415827 smc_lgr_free_bufs(lgr);
416
- if (lgr->is_smcd)
828
+ destroy_workqueue(lgr->tx_wq);
829
+ if (lgr->is_smcd) {
417830 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
418
- else
419
- smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
831
+ put_device(&lgr->smcd->dev);
832
+ if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
833
+ wake_up(&lgr->smcd->lgrs_deleted);
834
+ } else {
835
+ if (!atomic_dec_return(&lgr_cnt))
836
+ wake_up(&lgrs_deleted);
837
+ }
420838 kfree(lgr);
421839 }
422840
423
-void smc_lgr_forget(struct smc_link_group *lgr)
841
+static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
424842 {
425
- spin_lock_bh(&smc_lgr_list.lock);
426
- /* do not use this link group for new connections */
427
- if (!list_empty(&lgr->list))
428
- list_del_init(&lgr->list);
429
- spin_unlock_bh(&smc_lgr_list.lock);
843
+ int i;
844
+
845
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
846
+ struct smc_buf_desc *buf_desc;
847
+
848
+ list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
849
+ buf_desc->len += sizeof(struct smcd_cdc_msg);
850
+ smc_ism_unregister_dmb(lgr->smcd, buf_desc);
851
+ }
852
+ }
430853 }
431854
432
-/* terminate linkgroup abnormally */
433
-static void __smc_lgr_terminate(struct smc_link_group *lgr)
855
+static void smc_sk_wake_ups(struct smc_sock *smc)
856
+{
857
+ smc->sk.sk_write_space(&smc->sk);
858
+ smc->sk.sk_data_ready(&smc->sk);
859
+ smc->sk.sk_state_change(&smc->sk);
860
+}
861
+
862
+/* kill a connection */
863
+static void smc_conn_kill(struct smc_connection *conn, bool soft)
864
+{
865
+ struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
866
+
867
+ if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
868
+ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
869
+ else
870
+ smc_close_abort(conn);
871
+ conn->killed = 1;
872
+ smc->sk.sk_err = ECONNABORTED;
873
+ smc_sk_wake_ups(smc);
874
+ if (conn->lgr->is_smcd) {
875
+ smc_ism_unset_conn(conn);
876
+ if (soft)
877
+ tasklet_kill(&conn->rx_tsklet);
878
+ else
879
+ tasklet_unlock_wait(&conn->rx_tsklet);
880
+ } else {
881
+ smc_cdc_wait_pend_tx_wr(conn);
882
+ }
883
+ smc_lgr_unregister_conn(conn);
884
+ smc_close_active_abort(smc);
885
+}
886
+
887
+static void smc_lgr_cleanup(struct smc_link_group *lgr)
888
+{
889
+ if (lgr->is_smcd) {
890
+ smc_ism_signal_shutdown(lgr);
891
+ smcd_unregister_all_dmbs(lgr);
892
+ } else {
893
+ u32 rsn = lgr->llc_termination_rsn;
894
+
895
+ if (!rsn)
896
+ rsn = SMC_LLC_DEL_PROG_INIT_TERM;
897
+ smc_llc_send_link_delete_all(lgr, false, rsn);
898
+ smcr_lgr_link_deactivate_all(lgr);
899
+ }
900
+}
901
+
902
+/* terminate link group
903
+ * @soft: true if link group shutdown can take its time
904
+ * false if immediate link group shutdown is required
905
+ */
906
+static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
434907 {
435908 struct smc_connection *conn;
436909 struct smc_sock *smc;
....@@ -438,90 +911,326 @@
438911
439912 if (lgr->terminating)
440913 return; /* lgr already terminating */
914
+ /* cancel free_work sync, will terminate when lgr->freeing is set */
915
+ cancel_delayed_work_sync(&lgr->free_work);
441916 lgr->terminating = 1;
442
- if (!list_empty(&lgr->list)) /* forget lgr */
443
- list_del_init(&lgr->list);
444
- if (!lgr->is_smcd)
445
- smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
446917
447
- write_lock_bh(&lgr->conns_lock);
918
+ /* kill remaining link group connections */
919
+ read_lock_bh(&lgr->conns_lock);
448920 node = rb_first(&lgr->conns_all);
449921 while (node) {
922
+ read_unlock_bh(&lgr->conns_lock);
450923 conn = rb_entry(node, struct smc_connection, alert_node);
451924 smc = container_of(conn, struct smc_sock, conn);
452
- sock_hold(&smc->sk); /* sock_put in close work */
453
- conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
454
- __smc_lgr_unregister_conn(conn);
455
- write_unlock_bh(&lgr->conns_lock);
456
- if (!schedule_work(&conn->close_work))
457
- sock_put(&smc->sk);
458
- write_lock_bh(&lgr->conns_lock);
925
+ sock_hold(&smc->sk); /* sock_put below */
926
+ lock_sock(&smc->sk);
927
+ smc_conn_kill(conn, soft);
928
+ release_sock(&smc->sk);
929
+ sock_put(&smc->sk); /* sock_hold above */
930
+ read_lock_bh(&lgr->conns_lock);
459931 node = rb_first(&lgr->conns_all);
460932 }
461
- write_unlock_bh(&lgr->conns_lock);
462
- if (!lgr->is_smcd)
463
- wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
464
- smc_lgr_schedule_free_work(lgr);
933
+ read_unlock_bh(&lgr->conns_lock);
934
+ smc_lgr_cleanup(lgr);
935
+ smc_lgr_free(lgr);
465936 }
466937
467
-void smc_lgr_terminate(struct smc_link_group *lgr)
938
+/* unlink link group and schedule termination */
939
+void smc_lgr_terminate_sched(struct smc_link_group *lgr)
468940 {
469
- spin_lock_bh(&smc_lgr_list.lock);
470
- __smc_lgr_terminate(lgr);
471
- spin_unlock_bh(&smc_lgr_list.lock);
472
-}
941
+ spinlock_t *lgr_lock;
473942
474
-/* Called when IB port is terminated */
475
-void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
476
-{
477
- struct smc_link_group *lgr, *l;
478
-
479
- spin_lock_bh(&smc_lgr_list.lock);
480
- list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
481
- if (!lgr->is_smcd &&
482
- lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
483
- lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
484
- __smc_lgr_terminate(lgr);
943
+ smc_lgr_list_head(lgr, &lgr_lock);
944
+ spin_lock_bh(lgr_lock);
945
+ if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
946
+ spin_unlock_bh(lgr_lock);
947
+ return; /* lgr already terminating */
485948 }
486
- spin_unlock_bh(&smc_lgr_list.lock);
949
+ list_del_init(&lgr->list);
950
+ lgr->freeing = 1;
951
+ spin_unlock_bh(lgr_lock);
952
+ schedule_work(&lgr->terminate_work);
487953 }
488954
489
-/* Called when SMC-D device is terminated or peer is lost */
490
-void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
955
+/* Called when peer lgr shutdown (regularly or abnormally) is received */
956
+void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
491957 {
492958 struct smc_link_group *lgr, *l;
493959 LIST_HEAD(lgr_free_list);
494960
495961 /* run common cleanup function and build free list */
496
- spin_lock_bh(&smc_lgr_list.lock);
497
- list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
498
- if (lgr->is_smcd && lgr->smcd == dev &&
499
- (!peer_gid || lgr->peer_gid == peer_gid) &&
500
- !list_empty(&lgr->list)) {
501
- __smc_lgr_terminate(lgr);
962
+ spin_lock_bh(&dev->lgr_lock);
963
+ list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
964
+ if ((!peer_gid || lgr->peer_gid == peer_gid) &&
965
+ (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
966
+ if (peer_gid) /* peer triggered termination */
967
+ lgr->peer_shutdown = 1;
502968 list_move(&lgr->list, &lgr_free_list);
969
+ lgr->freeing = 1;
503970 }
504971 }
505
- spin_unlock_bh(&smc_lgr_list.lock);
972
+ spin_unlock_bh(&dev->lgr_lock);
506973
507974 /* cancel the regular free workers and actually free lgrs */
508975 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
509976 list_del_init(&lgr->list);
510
- cancel_delayed_work_sync(&lgr->free_work);
511
- smc_lgr_free(lgr);
977
+ schedule_work(&lgr->terminate_work);
512978 }
513979 }
514980
515
-/* Determine vlan of internal TCP socket.
516
- * @vlan_id: address to store the determined vlan id into
981
+/* Called when an SMCD device is removed or the smc module is unloaded */
982
+void smc_smcd_terminate_all(struct smcd_dev *smcd)
983
+{
984
+ struct smc_link_group *lgr, *lg;
985
+ LIST_HEAD(lgr_free_list);
986
+
987
+ spin_lock_bh(&smcd->lgr_lock);
988
+ list_splice_init(&smcd->lgr_list, &lgr_free_list);
989
+ list_for_each_entry(lgr, &lgr_free_list, list)
990
+ lgr->freeing = 1;
991
+ spin_unlock_bh(&smcd->lgr_lock);
992
+
993
+ list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
994
+ list_del_init(&lgr->list);
995
+ __smc_lgr_terminate(lgr, false);
996
+ }
997
+
998
+ if (atomic_read(&smcd->lgr_cnt))
999
+ wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
1000
+}
1001
+
1002
+/* Called when an SMCR device is removed or the smc module is unloaded.
1003
+ * If smcibdev is given, all SMCR link groups using this device are terminated.
1004
+ * If smcibdev is NULL, all SMCR link groups are terminated.
5171005 */
518
-int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
1006
+void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1007
+{
1008
+ struct smc_link_group *lgr, *lg;
1009
+ LIST_HEAD(lgr_free_list);
1010
+ int i;
1011
+
1012
+ spin_lock_bh(&smc_lgr_list.lock);
1013
+ if (!smcibdev) {
1014
+ list_splice_init(&smc_lgr_list.list, &lgr_free_list);
1015
+ list_for_each_entry(lgr, &lgr_free_list, list)
1016
+ lgr->freeing = 1;
1017
+ } else {
1018
+ list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1019
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1020
+ if (lgr->lnk[i].smcibdev == smcibdev)
1021
+ smcr_link_down_cond_sched(&lgr->lnk[i]);
1022
+ }
1023
+ }
1024
+ }
1025
+ spin_unlock_bh(&smc_lgr_list.lock);
1026
+
1027
+ list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1028
+ list_del_init(&lgr->list);
1029
+ smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
1030
+ __smc_lgr_terminate(lgr, false);
1031
+ }
1032
+
1033
+ if (smcibdev) {
1034
+ if (atomic_read(&smcibdev->lnk_cnt))
1035
+ wait_event(smcibdev->lnks_deleted,
1036
+ !atomic_read(&smcibdev->lnk_cnt));
1037
+ } else {
1038
+ if (atomic_read(&lgr_cnt))
1039
+ wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
1040
+ }
1041
+}
1042
+
1043
+/* set new lgr type and clear all asymmetric link tagging */
1044
+void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
1045
+{
1046
+ char *lgr_type = "";
1047
+ int i;
1048
+
1049
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1050
+ if (smc_link_usable(&lgr->lnk[i]))
1051
+ lgr->lnk[i].link_is_asym = false;
1052
+ if (lgr->type == new_type)
1053
+ return;
1054
+ lgr->type = new_type;
1055
+
1056
+ switch (lgr->type) {
1057
+ case SMC_LGR_NONE:
1058
+ lgr_type = "NONE";
1059
+ break;
1060
+ case SMC_LGR_SINGLE:
1061
+ lgr_type = "SINGLE";
1062
+ break;
1063
+ case SMC_LGR_SYMMETRIC:
1064
+ lgr_type = "SYMMETRIC";
1065
+ break;
1066
+ case SMC_LGR_ASYMMETRIC_PEER:
1067
+ lgr_type = "ASYMMETRIC_PEER";
1068
+ break;
1069
+ case SMC_LGR_ASYMMETRIC_LOCAL:
1070
+ lgr_type = "ASYMMETRIC_LOCAL";
1071
+ break;
1072
+ }
1073
+ pr_warn_ratelimited("smc: SMC-R lg %*phN state changed: "
1074
+ "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
1075
+ lgr_type, lgr->pnet_id);
1076
+}
1077
+
1078
+/* set new lgr type and tag a link as asymmetric */
1079
+void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
1080
+ enum smc_lgr_type new_type, int asym_lnk_idx)
1081
+{
1082
+ smcr_lgr_set_type(lgr, new_type);
1083
+ lgr->lnk[asym_lnk_idx].link_is_asym = true;
1084
+}
1085
+
1086
+/* abort connection, abort_work scheduled from tasklet context */
1087
+static void smc_conn_abort_work(struct work_struct *work)
1088
+{
1089
+ struct smc_connection *conn = container_of(work,
1090
+ struct smc_connection,
1091
+ abort_work);
1092
+ struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1093
+
1094
+ lock_sock(&smc->sk);
1095
+ smc_conn_kill(conn, true);
1096
+ release_sock(&smc->sk);
1097
+ sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
1098
+}
1099
+
1100
+void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1101
+{
1102
+ struct smc_link_group *lgr, *n;
1103
+
1104
+ list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1105
+ struct smc_link *link;
1106
+
1107
+ if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1108
+ SMC_MAX_PNETID_LEN) ||
1109
+ lgr->type == SMC_LGR_SYMMETRIC ||
1110
+ lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1111
+ continue;
1112
+
1113
+ /* trigger local add link processing */
1114
+ link = smc_llc_usable_link(lgr);
1115
+ if (link)
1116
+ smc_llc_add_link_local(link);
1117
+ }
1118
+}
1119
+
1120
+/* link is down - switch connections to alternate link,
1121
+ * must be called under lgr->llc_conf_mutex lock
1122
+ */
1123
+static void smcr_link_down(struct smc_link *lnk)
1124
+{
1125
+ struct smc_link_group *lgr = lnk->lgr;
1126
+ struct smc_link *to_lnk;
1127
+ int del_link_id;
1128
+
1129
+ if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1130
+ return;
1131
+
1132
+ to_lnk = smc_switch_conns(lgr, lnk, true);
1133
+ if (!to_lnk) { /* no backup link available */
1134
+ smcr_link_clear(lnk, true);
1135
+ return;
1136
+ }
1137
+ smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1138
+ del_link_id = lnk->link_id;
1139
+
1140
+ if (lgr->role == SMC_SERV) {
1141
+ /* trigger local delete link processing */
1142
+ smc_llc_srv_delete_link_local(to_lnk, del_link_id);
1143
+ } else {
1144
+ if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1145
+ /* another llc task is ongoing */
1146
+ mutex_unlock(&lgr->llc_conf_mutex);
1147
+ wait_event_timeout(lgr->llc_flow_waiter,
1148
+ (list_empty(&lgr->list) ||
1149
+ lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
1150
+ SMC_LLC_WAIT_TIME);
1151
+ mutex_lock(&lgr->llc_conf_mutex);
1152
+ }
1153
+ if (!list_empty(&lgr->list)) {
1154
+ smc_llc_send_delete_link(to_lnk, del_link_id,
1155
+ SMC_LLC_REQ, true,
1156
+ SMC_LLC_DEL_LOST_PATH);
1157
+ smcr_link_clear(lnk, true);
1158
+ }
1159
+ wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
1160
+ }
1161
+}
1162
+
1163
+/* must be called under lgr->llc_conf_mutex lock */
1164
+void smcr_link_down_cond(struct smc_link *lnk)
1165
+{
1166
+ if (smc_link_downing(&lnk->state))
1167
+ smcr_link_down(lnk);
1168
+}
1169
+
1170
+/* will get the lgr->llc_conf_mutex lock */
1171
+void smcr_link_down_cond_sched(struct smc_link *lnk)
1172
+{
1173
+ if (smc_link_downing(&lnk->state))
1174
+ schedule_work(&lnk->link_down_wrk);
1175
+}
1176
+
1177
+void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
1178
+{
1179
+ struct smc_link_group *lgr, *n;
1180
+ int i;
1181
+
1182
+ list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1183
+ if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1184
+ SMC_MAX_PNETID_LEN))
1185
+ continue; /* lgr is not affected */
1186
+ if (list_empty(&lgr->list))
1187
+ continue;
1188
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1189
+ struct smc_link *lnk = &lgr->lnk[i];
1190
+
1191
+ if (smc_link_usable(lnk) &&
1192
+ lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1193
+ smcr_link_down_cond_sched(lnk);
1194
+ }
1195
+ }
1196
+}
1197
+
1198
+static void smc_link_down_work(struct work_struct *work)
1199
+{
1200
+ struct smc_link *link = container_of(work, struct smc_link,
1201
+ link_down_wrk);
1202
+ struct smc_link_group *lgr = link->lgr;
1203
+
1204
+ if (list_empty(&lgr->list))
1205
+ return;
1206
+ wake_up_all(&lgr->llc_msg_waiter);
1207
+ mutex_lock(&lgr->llc_conf_mutex);
1208
+ smcr_link_down(link);
1209
+ mutex_unlock(&lgr->llc_conf_mutex);
1210
+}
1211
+
1212
+static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
1213
+ struct netdev_nested_priv *priv)
1214
+{
1215
+ unsigned short *vlan_id = (unsigned short *)priv->data;
1216
+
1217
+ if (is_vlan_dev(lower_dev)) {
1218
+ *vlan_id = vlan_dev_vlan_id(lower_dev);
1219
+ return 1;
1220
+ }
1221
+
1222
+ return 0;
1223
+}
1224
+
1225
+/* Determine vlan of internal TCP socket. */
1226
+int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
5191227 {
5201228 struct dst_entry *dst = sk_dst_get(clcsock->sk);
1229
+ struct netdev_nested_priv priv;
5211230 struct net_device *ndev;
522
- int i, nest_lvl, rc = 0;
1231
+ int rc = 0;
5231232
524
- *vlan_id = 0;
1233
+ ini->vlan_id = 0;
5251234 if (!dst) {
5261235 rc = -ENOTCONN;
5271236 goto out;
....@@ -533,24 +1242,13 @@
5331242
5341243 ndev = dst->dev;
5351244 if (is_vlan_dev(ndev)) {
536
- *vlan_id = vlan_dev_vlan_id(ndev);
1245
+ ini->vlan_id = vlan_dev_vlan_id(ndev);
5371246 goto out_rel;
5381247 }
5391248
1249
+ priv.data = (void *)&ini->vlan_id;
5401250 rtnl_lock();
541
- nest_lvl = dev_get_nest_level(ndev);
542
- for (i = 0; i < nest_lvl; i++) {
543
- struct list_head *lower = &ndev->adj_list.lower;
544
-
545
- if (list_empty(lower))
546
- break;
547
- lower = lower->next;
548
- ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
549
- if (is_vlan_dev(ndev)) {
550
- *vlan_id = vlan_dev_vlan_id(ndev);
551
- break;
552
- }
553
- }
1251
+ netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
5541252 rtnl_unlock();
5551253
5561254 out_rel:
....@@ -561,15 +1259,23 @@
5611259
5621260 static bool smcr_lgr_match(struct smc_link_group *lgr,
5631261 struct smc_clc_msg_local *lcl,
564
- enum smc_lgr_role role)
1262
+ enum smc_lgr_role role, u32 clcqpn)
5651263 {
566
- return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
567
- SMC_SYSTEMID_LEN) &&
568
- !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
569
- SMC_GID_SIZE) &&
570
- !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
571
- sizeof(lcl->mac)) &&
572
- lgr->role == role;
1264
+ int i;
1265
+
1266
+ if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) ||
1267
+ lgr->role != role)
1268
+ return false;
1269
+
1270
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1271
+ if (!smc_link_active(&lgr->lnk[i]))
1272
+ continue;
1273
+ if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
1274
+ !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
1275
+ !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac)))
1276
+ return true;
1277
+ }
1278
+ return false;
5731279 }
5741280
5751281 static bool smcd_lgr_match(struct smc_link_group *lgr,
....@@ -579,80 +1285,92 @@
5791285 }
5801286
5811287 /* create a new SMC connection (and a new link group if necessary) */
582
-int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
583
- struct smc_ib_device *smcibdev, u8 ibport,
584
- struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
585
- u64 peer_gid)
1288
+int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
5861289 {
5871290 struct smc_connection *conn = &smc->conn;
588
- int local_contact = SMC_FIRST_CONTACT;
1291
+ struct list_head *lgr_list;
5891292 struct smc_link_group *lgr;
590
- unsigned short vlan_id;
5911293 enum smc_lgr_role role;
1294
+ spinlock_t *lgr_lock;
5921295 int rc = 0;
5931296
1297
+ lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
1298
+ &smc_lgr_list.list;
1299
+ lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
1300
+ &smc_lgr_list.lock;
1301
+ ini->first_contact_local = 1;
5941302 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
595
- rc = smc_vlan_by_tcpsk(smc->clcsock, &vlan_id);
596
- if (rc)
597
- return rc;
598
-
599
- if ((role == SMC_CLNT) && srv_first_contact)
1303
+ if (role == SMC_CLNT && ini->first_contact_peer)
6001304 /* create new link group as well */
6011305 goto create;
6021306
6031307 /* determine if an existing link group can be reused */
604
- spin_lock_bh(&smc_lgr_list.lock);
605
- list_for_each_entry(lgr, &smc_lgr_list.list, list) {
1308
+ spin_lock_bh(lgr_lock);
1309
+ list_for_each_entry(lgr, lgr_list, list) {
6061310 write_lock_bh(&lgr->conns_lock);
607
- if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
608
- smcr_lgr_match(lgr, lcl, role)) &&
1311
+ if ((ini->is_smcd ?
1312
+ smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
1313
+ ini->ism_peer_gid[ini->ism_selected]) :
1314
+ smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
6091315 !lgr->sync_err &&
610
- lgr->vlan_id == vlan_id &&
611
- (role == SMC_CLNT ||
612
- lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
1316
+ (ini->smcd_version == SMC_V2 ||
1317
+ lgr->vlan_id == ini->vlan_id) &&
1318
+ (role == SMC_CLNT || ini->is_smcd ||
1319
+ (lgr->conns_num < SMC_RMBS_PER_LGR_MAX &&
1320
+ !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) {
6131321 /* link group found */
614
- local_contact = SMC_REUSE_CONTACT;
1322
+ ini->first_contact_local = 0;
6151323 conn->lgr = lgr;
616
- smc_lgr_register_conn(conn); /* add smc conn to lgr */
617
- if (delayed_work_pending(&lgr->free_work))
618
- cancel_delayed_work(&lgr->free_work);
1324
+ rc = smc_lgr_register_conn(conn, false);
6191325 write_unlock_bh(&lgr->conns_lock);
1326
+ if (!rc && delayed_work_pending(&lgr->free_work))
1327
+ cancel_delayed_work(&lgr->free_work);
6201328 break;
6211329 }
6221330 write_unlock_bh(&lgr->conns_lock);
6231331 }
624
- spin_unlock_bh(&smc_lgr_list.lock);
1332
+ spin_unlock_bh(lgr_lock);
1333
+ if (rc)
1334
+ return rc;
6251335
626
- if (role == SMC_CLNT && !srv_first_contact &&
627
- (local_contact == SMC_FIRST_CONTACT)) {
1336
+ if (role == SMC_CLNT && !ini->first_contact_peer &&
1337
+ ini->first_contact_local) {
6281338 /* Server reuses a link group, but Client wants to start
6291339 * a new one
6301340 * send out_of_sync decline, reason synchr. error
6311341 */
632
- return -ENOLINK;
1342
+ return SMC_CLC_DECL_SYNCERR;
6331343 }
6341344
6351345 create:
636
- if (local_contact == SMC_FIRST_CONTACT) {
637
- rc = smc_lgr_create(smc, is_smcd, smcibdev, ibport,
638
- lcl->id_for_peer, vlan_id, smcd, peer_gid);
1346
+ if (ini->first_contact_local) {
1347
+ rc = smc_lgr_create(smc, ini);
6391348 if (rc)
6401349 goto out;
641
- smc_lgr_register_conn(conn); /* add smc conn to lgr */
1350
+ lgr = conn->lgr;
1351
+ write_lock_bh(&lgr->conns_lock);
1352
+ rc = smc_lgr_register_conn(conn, true);
1353
+ write_unlock_bh(&lgr->conns_lock);
1354
+ if (rc)
1355
+ goto out;
6421356 }
6431357 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
6441358 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
6451359 conn->urg_state = SMC_URG_READ;
646
- if (is_smcd) {
1360
+ init_waitqueue_head(&conn->cdc_pend_tx_wq);
1361
+ INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
1362
+ if (ini->is_smcd) {
6471363 conn->rx_off = sizeof(struct smcd_cdc_msg);
6481364 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
1365
+ } else {
1366
+ conn->rx_off = 0;
6491367 }
6501368 #ifndef KERNEL_HAS_ATOMIC64
6511369 spin_lock_init(&conn->acurs_lock);
6521370 #endif
6531371
6541372 out:
655
- return rc ? rc : local_contact;
1373
+ return rc;
6561374 }
6571375
6581376 /* convert the RMB size into the compressed notation - minimum 16K.
....@@ -686,19 +1404,19 @@
6861404 * buffer size; if not available, return NULL
6871405 */
6881406 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
689
- rwlock_t *lock,
1407
+ struct mutex *lock,
6901408 struct list_head *buf_list)
6911409 {
6921410 struct smc_buf_desc *buf_slot;
6931411
694
- read_lock_bh(lock);
1412
+ mutex_lock(lock);
6951413 list_for_each_entry(buf_slot, buf_list, list) {
6961414 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
697
- read_unlock_bh(lock);
1415
+ mutex_unlock(lock);
6981416 return buf_slot;
6991417 }
7001418 }
701
- read_unlock_bh(lock);
1419
+ mutex_unlock(lock);
7021420 return NULL;
7031421 }
7041422
....@@ -708,15 +1426,138 @@
7081426 */
7091427 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
7101428 {
711
- return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
1429
+ return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
1430
+}
1431
+
1432
+/* map an rmb buf to a link */
1433
+static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1434
+ struct smc_link *lnk)
1435
+{
1436
+ int rc;
1437
+
1438
+ if (buf_desc->is_map_ib[lnk->link_idx])
1439
+ return 0;
1440
+
1441
+ rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], 1, GFP_KERNEL);
1442
+ if (rc)
1443
+ return rc;
1444
+ sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
1445
+ buf_desc->cpu_addr, buf_desc->len);
1446
+
1447
+ /* map sg table to DMA address */
1448
+ rc = smc_ib_buf_map_sg(lnk, buf_desc,
1449
+ is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1450
+ /* SMC protocol depends on mapping to one DMA address only */
1451
+ if (rc != 1) {
1452
+ rc = -EAGAIN;
1453
+ goto free_table;
1454
+ }
1455
+
1456
+ /* create a new memory region for the RMB */
1457
+ if (is_rmb) {
1458
+ rc = smc_ib_get_memory_region(lnk->roce_pd,
1459
+ IB_ACCESS_REMOTE_WRITE |
1460
+ IB_ACCESS_LOCAL_WRITE,
1461
+ buf_desc, lnk->link_idx);
1462
+ if (rc)
1463
+ goto buf_unmap;
1464
+ smc_ib_sync_sg_for_device(lnk, buf_desc, DMA_FROM_DEVICE);
1465
+ }
1466
+ buf_desc->is_map_ib[lnk->link_idx] = true;
1467
+ return 0;
1468
+
1469
+buf_unmap:
1470
+ smc_ib_buf_unmap_sg(lnk, buf_desc,
1471
+ is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1472
+free_table:
1473
+ sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1474
+ return rc;
1475
+}
1476
+
1477
+/* register a new rmb on IB device,
1478
+ * must be called under lgr->llc_conf_mutex lock
1479
+ */
1480
+int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
1481
+{
1482
+ if (list_empty(&link->lgr->list))
1483
+ return -ENOLINK;
1484
+ if (!rmb_desc->is_reg_mr[link->link_idx]) {
1485
+ /* register memory region for new rmb */
1486
+ if (smc_wr_reg_send(link, rmb_desc->mr_rx[link->link_idx])) {
1487
+ rmb_desc->is_reg_err = true;
1488
+ return -EFAULT;
1489
+ }
1490
+ rmb_desc->is_reg_mr[link->link_idx] = true;
1491
+ }
1492
+ return 0;
1493
+}
1494
+
1495
+static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
1496
+ struct list_head *lst, bool is_rmb)
1497
+{
1498
+ struct smc_buf_desc *buf_desc, *bf;
1499
+ int rc = 0;
1500
+
1501
+ mutex_lock(lock);
1502
+ list_for_each_entry_safe(buf_desc, bf, lst, list) {
1503
+ if (!buf_desc->used)
1504
+ continue;
1505
+ rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
1506
+ if (rc)
1507
+ goto out;
1508
+ }
1509
+out:
1510
+ mutex_unlock(lock);
1511
+ return rc;
1512
+}
1513
+
1514
+/* map all used buffers of lgr for a new link */
1515
+int smcr_buf_map_lgr(struct smc_link *lnk)
1516
+{
1517
+ struct smc_link_group *lgr = lnk->lgr;
1518
+ int i, rc = 0;
1519
+
1520
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
1521
+ rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
1522
+ &lgr->rmbs[i], true);
1523
+ if (rc)
1524
+ return rc;
1525
+ rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
1526
+ &lgr->sndbufs[i], false);
1527
+ if (rc)
1528
+ return rc;
1529
+ }
1530
+ return 0;
1531
+}
1532
+
1533
+/* register all used buffers of lgr for a new link,
1534
+ * must be called under lgr->llc_conf_mutex lock
1535
+ */
1536
+int smcr_buf_reg_lgr(struct smc_link *lnk)
1537
+{
1538
+ struct smc_link_group *lgr = lnk->lgr;
1539
+ struct smc_buf_desc *buf_desc, *bf;
1540
+ int i, rc = 0;
1541
+
1542
+ mutex_lock(&lgr->rmbs_lock);
1543
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
1544
+ list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
1545
+ if (!buf_desc->used)
1546
+ continue;
1547
+ rc = smcr_link_reg_rmb(lnk, buf_desc);
1548
+ if (rc)
1549
+ goto out;
1550
+ }
1551
+ }
1552
+out:
1553
+ mutex_unlock(&lgr->rmbs_lock);
1554
+ return rc;
7121555 }
7131556
7141557 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
7151558 bool is_rmb, int bufsize)
7161559 {
7171560 struct smc_buf_desc *buf_desc;
718
- struct smc_link *lnk;
719
- int rc;
7201561
7211562 /* try to alloc a new buffer */
7221563 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
....@@ -733,41 +1574,36 @@
7331574 return ERR_PTR(-EAGAIN);
7341575 }
7351576 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
736
-
737
- /* build the sg table from the pages */
738
- lnk = &lgr->lnk[SMC_SINGLE_LINK];
739
- rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
740
- GFP_KERNEL);
741
- if (rc) {
742
- smc_buf_free(lgr, is_rmb, buf_desc);
743
- return ERR_PTR(rc);
744
- }
745
- sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
746
- buf_desc->cpu_addr, bufsize);
747
-
748
- /* map sg table to DMA address */
749
- rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
750
- is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
751
- /* SMC protocol depends on mapping to one DMA address only */
752
- if (rc != 1) {
753
- smc_buf_free(lgr, is_rmb, buf_desc);
754
- return ERR_PTR(-EAGAIN);
755
- }
756
-
757
- /* create a new memory region for the RMB */
758
- if (is_rmb) {
759
- rc = smc_ib_get_memory_region(lnk->roce_pd,
760
- IB_ACCESS_REMOTE_WRITE |
761
- IB_ACCESS_LOCAL_WRITE,
762
- buf_desc);
763
- if (rc) {
764
- smc_buf_free(lgr, is_rmb, buf_desc);
765
- return ERR_PTR(rc);
766
- }
767
- }
768
-
7691577 buf_desc->len = bufsize;
7701578 return buf_desc;
1579
+}
1580
+
1581
+/* map buf_desc on all usable links,
1582
+ * unused buffers stay mapped as long as the link is up
1583
+ */
1584
+static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
1585
+ struct smc_buf_desc *buf_desc, bool is_rmb)
1586
+{
1587
+ int i, rc = 0, cnt = 0;
1588
+
1589
+ /* protect against parallel link reconfiguration */
1590
+ mutex_lock(&lgr->llc_conf_mutex);
1591
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1592
+ struct smc_link *lnk = &lgr->lnk[i];
1593
+
1594
+ if (!smc_link_usable(lnk))
1595
+ continue;
1596
+ if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
1597
+ rc = -ENOMEM;
1598
+ goto out;
1599
+ }
1600
+ cnt++;
1601
+ }
1602
+out:
1603
+ mutex_unlock(&lgr->llc_conf_mutex);
1604
+ if (!rc && !cnt)
1605
+ rc = -EINVAL;
1606
+ return rc;
7711607 }
7721608
7731609 #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
....@@ -789,7 +1625,11 @@
7891625 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
7901626 if (rc) {
7911627 kfree(buf_desc);
792
- return ERR_PTR(-EAGAIN);
1628
+ if (rc == -ENOMEM)
1629
+ return ERR_PTR(-EAGAIN);
1630
+ if (rc == -ENOSPC)
1631
+ return ERR_PTR(-ENOSPC);
1632
+ return ERR_PTR(-EIO);
7931633 }
7941634 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
7951635 /* CDC header stored in buf. So, pretend it was smaller */
....@@ -814,8 +1654,8 @@
8141654 struct smc_link_group *lgr = conn->lgr;
8151655 struct list_head *buf_list;
8161656 int bufsize, bufsize_short;
1657
+ struct mutex *lock; /* lock buffer list */
8171658 int sk_buf_size;
818
- rwlock_t *lock;
8191659
8201660 if (is_rmb)
8211661 /* use socket recv buffer size (w/o overhead) as start value */
....@@ -856,14 +1696,21 @@
8561696 continue;
8571697
8581698 buf_desc->used = 1;
859
- write_lock_bh(lock);
1699
+ mutex_lock(lock);
8601700 list_add(&buf_desc->list, buf_list);
861
- write_unlock_bh(lock);
1701
+ mutex_unlock(lock);
8621702 break; /* found */
8631703 }
8641704
8651705 if (IS_ERR(buf_desc))
866
- return -ENOMEM;
1706
+ return PTR_ERR(buf_desc);
1707
+
1708
+ if (!is_smcd) {
1709
+ if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
1710
+ smcr_buf_unuse(buf_desc, lgr);
1711
+ return -ENOMEM;
1712
+ }
1713
+ }
8671714
8681715 if (is_rmb) {
8691716 conn->rmb_desc = buf_desc;
....@@ -884,42 +1731,44 @@
8841731
8851732 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
8861733 {
887
- struct smc_link_group *lgr = conn->lgr;
888
-
889
- if (!conn->lgr || conn->lgr->is_smcd)
1734
+ if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
8901735 return;
891
- smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
892
- conn->sndbuf_desc, DMA_TO_DEVICE);
1736
+ smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
8931737 }
8941738
8951739 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
8961740 {
897
- struct smc_link_group *lgr = conn->lgr;
898
-
899
- if (!conn->lgr || conn->lgr->is_smcd)
1741
+ if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
9001742 return;
901
- smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
902
- conn->sndbuf_desc, DMA_TO_DEVICE);
1743
+ smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
9031744 }
9041745
9051746 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
9061747 {
907
- struct smc_link_group *lgr = conn->lgr;
1748
+ int i;
9081749
9091750 if (!conn->lgr || conn->lgr->is_smcd)
9101751 return;
911
- smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
912
- conn->rmb_desc, DMA_FROM_DEVICE);
1752
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1753
+ if (!smc_link_active(&conn->lgr->lnk[i]))
1754
+ continue;
1755
+ smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
1756
+ DMA_FROM_DEVICE);
1757
+ }
9131758 }
9141759
9151760 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
9161761 {
917
- struct smc_link_group *lgr = conn->lgr;
1762
+ int i;
9181763
9191764 if (!conn->lgr || conn->lgr->is_smcd)
9201765 return;
921
- smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
922
- conn->rmb_desc, DMA_FROM_DEVICE);
1766
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1767
+ if (!smc_link_active(&conn->lgr->lnk[i]))
1768
+ continue;
1769
+ smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
1770
+ DMA_FROM_DEVICE);
1771
+ }
9231772 }
9241773
9251774 /* create the send and receive buffer for an SMC socket;
....@@ -938,8 +1787,13 @@
9381787 return rc;
9391788 /* create rmb */
9401789 rc = __smc_buf_create(smc, is_smcd, true);
941
- if (rc)
1790
+ if (rc) {
1791
+ mutex_lock(&smc->conn.lgr->sndbufs_lock);
1792
+ list_del(&smc->conn.sndbuf_desc->list);
1793
+ mutex_unlock(&smc->conn.lgr->sndbufs_lock);
9421794 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1795
+ smc->conn.sndbuf_desc = NULL;
1796
+ }
9431797 return rc;
9441798 }
9451799
....@@ -954,16 +1808,64 @@
9541808 return -ENOSPC;
9551809 }
9561810
957
-/* add a new rtoken from peer */
958
-int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
1811
+static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
1812
+ u32 rkey)
9591813 {
1814
+ int i;
1815
+
1816
+ for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1817
+ if (test_bit(i, lgr->rtokens_used_mask) &&
1818
+ lgr->rtokens[i][lnk_idx].rkey == rkey)
1819
+ return i;
1820
+ }
1821
+ return -ENOENT;
1822
+}
1823
+
1824
+/* set rtoken for a new link to an existing rmb */
1825
+void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
1826
+ __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
1827
+{
1828
+ int rtok_idx;
1829
+
1830
+ rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
1831
+ if (rtok_idx == -ENOENT)
1832
+ return;
1833
+ lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
1834
+ lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
1835
+}
1836
+
1837
+/* set rtoken for a new link whose link_id is given */
1838
+void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
1839
+ __be64 nw_vaddr, __be32 nw_rkey)
1840
+{
1841
+ u64 dma_addr = be64_to_cpu(nw_vaddr);
1842
+ u32 rkey = ntohl(nw_rkey);
1843
+ bool found = false;
1844
+ int link_idx;
1845
+
1846
+ for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
1847
+ if (lgr->lnk[link_idx].link_id == link_id) {
1848
+ found = true;
1849
+ break;
1850
+ }
1851
+ }
1852
+ if (!found)
1853
+ return;
1854
+ lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
1855
+ lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
1856
+}
1857
+
1858
+/* add a new rtoken from peer */
1859
+int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
1860
+{
1861
+ struct smc_link_group *lgr = smc_get_lgr(lnk);
9601862 u64 dma_addr = be64_to_cpu(nw_vaddr);
9611863 u32 rkey = ntohl(nw_rkey);
9621864 int i;
9631865
9641866 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
965
- if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
966
- (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
1867
+ if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
1868
+ lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
9671869 test_bit(i, lgr->rtokens_used_mask)) {
9681870 /* already in list */
9691871 return i;
....@@ -972,23 +1874,25 @@
9721874 i = smc_rmb_reserve_rtoken_idx(lgr);
9731875 if (i < 0)
9741876 return i;
975
- lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
976
- lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
1877
+ lgr->rtokens[i][lnk->link_idx].rkey = rkey;
1878
+ lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
9771879 return i;
9781880 }
9791881
980
-/* delete an rtoken */
981
-int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
1882
+/* delete an rtoken from all links */
1883
+int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
9821884 {
1885
+ struct smc_link_group *lgr = smc_get_lgr(lnk);
9831886 u32 rkey = ntohl(nw_rkey);
984
- int i;
1887
+ int i, j;
9851888
9861889 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
987
- if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
1890
+ if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
9881891 test_bit(i, lgr->rtokens_used_mask)) {
989
- lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
990
- lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
991
-
1892
+ for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
1893
+ lgr->rtokens[i][j].rkey = 0;
1894
+ lgr->rtokens[i][j].dma_addr = 0;
1895
+ }
9921896 clear_bit(i, lgr->rtokens_used_mask);
9931897 return 0;
9941898 }
....@@ -998,36 +1902,72 @@
9981902
9991903 /* save rkey and dma_addr received from peer during clc handshake */
10001904 int smc_rmb_rtoken_handling(struct smc_connection *conn,
1905
+ struct smc_link *lnk,
10011906 struct smc_clc_msg_accept_confirm *clc)
10021907 {
1003
- conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
1004
- clc->rmb_rkey);
1908
+ conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
1909
+ clc->r0.rmb_rkey);
10051910 if (conn->rtoken_idx < 0)
10061911 return conn->rtoken_idx;
10071912 return 0;
10081913 }
10091914
1915
+static void smc_core_going_away(void)
1916
+{
1917
+ struct smc_ib_device *smcibdev;
1918
+ struct smcd_dev *smcd;
1919
+
1920
+ mutex_lock(&smc_ib_devices.mutex);
1921
+ list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1922
+ int i;
1923
+
1924
+ for (i = 0; i < SMC_MAX_PORTS; i++)
1925
+ set_bit(i, smcibdev->ports_going_away);
1926
+ }
1927
+ mutex_unlock(&smc_ib_devices.mutex);
1928
+
1929
+ mutex_lock(&smcd_dev_list.mutex);
1930
+ list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1931
+ smcd->going_away = 1;
1932
+ }
1933
+ mutex_unlock(&smcd_dev_list.mutex);
1934
+}
1935
+
1936
+/* Clean up all SMC link groups */
1937
+static void smc_lgrs_shutdown(void)
1938
+{
1939
+ struct smcd_dev *smcd;
1940
+
1941
+ smc_core_going_away();
1942
+
1943
+ smc_smcr_terminate_all(NULL);
1944
+
1945
+ mutex_lock(&smcd_dev_list.mutex);
1946
+ list_for_each_entry(smcd, &smcd_dev_list.list, list)
1947
+ smc_smcd_terminate_all(smcd);
1948
+ mutex_unlock(&smcd_dev_list.mutex);
1949
+}
1950
+
1951
+static int smc_core_reboot_event(struct notifier_block *this,
1952
+ unsigned long event, void *ptr)
1953
+{
1954
+ smc_lgrs_shutdown();
1955
+ smc_ib_unregister_client();
1956
+ return 0;
1957
+}
1958
+
1959
+static struct notifier_block smc_reboot_notifier = {
1960
+ .notifier_call = smc_core_reboot_event,
1961
+};
1962
+
1963
+int __init smc_core_init(void)
1964
+{
1965
+ return register_reboot_notifier(&smc_reboot_notifier);
1966
+}
1967
+
10101968 /* Called (from smc_exit) when module is removed */
10111969 void smc_core_exit(void)
10121970 {
1013
- struct smc_link_group *lgr, *lg;
1014
- LIST_HEAD(lgr_freeing_list);
1015
-
1016
- spin_lock_bh(&smc_lgr_list.lock);
1017
- if (!list_empty(&smc_lgr_list.list))
1018
- list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
1019
- spin_unlock_bh(&smc_lgr_list.lock);
1020
- list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
1021
- list_del_init(&lgr->list);
1022
- if (!lgr->is_smcd) {
1023
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
1024
-
1025
- if (lnk->state == SMC_LNK_ACTIVE)
1026
- smc_llc_send_delete_link(lnk, SMC_LLC_REQ,
1027
- false);
1028
- smc_llc_link_inactive(lnk);
1029
- }
1030
- cancel_delayed_work_sync(&lgr->free_work);
1031
- smc_lgr_free(lgr); /* free link group */
1032
- }
1971
+ unregister_reboot_notifier(&smc_reboot_notifier);
1972
+ smc_lgrs_shutdown();
10331973 }