1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Basic Transport Functions exploiting Infiniband API
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
10 */
11
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/reboot.h>
18 #include <linux/mutex.h>
19 #include <net/tcp.h>
20 #include <net/sock.h>
21 #include <rdma/ib_verbs.h>
22 #include <rdma/ib_cache.h>
23
24 #include "smc.h"
25 #include "smc_clc.h"
26 #include "smc_core.h"
27 #include "smc_ib.h"
28 #include "smc_wr.h"
29 #include "smc_llc.h"
30 #include "smc_cdc.h"
31 #include "smc_close.h"
32 #include "smc_ism.h"
33
34 #define SMC_LGR_NUM_INCR 256
35 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
36 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
37
38 static struct smc_lgr_list smc_lgr_list = { /* established link groups */
39 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
40 .list = LIST_HEAD_INIT(smc_lgr_list.list),
41 .num = 0,
42 };
43
44 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
45 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
46
47 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
48 struct smc_buf_desc *buf_desc);
49 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
50
51 static void smc_link_down_work(struct work_struct *work);
52
53 /* return head of link group list and its lock for a given link group */
smc_lgr_list_head(struct smc_link_group * lgr,spinlock_t ** lgr_lock)54 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
55 spinlock_t **lgr_lock)
56 {
57 if (lgr->is_smcd) {
58 *lgr_lock = &lgr->smcd->lgr_lock;
59 return &lgr->smcd->lgr_list;
60 }
61
62 *lgr_lock = &smc_lgr_list.lock;
63 return &smc_lgr_list.list;
64 }
65
smc_lgr_schedule_free_work(struct smc_link_group * lgr)66 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
67 {
68 /* client link group creation always follows the server link group
69 * creation. For client use a somewhat higher removal delay time,
70 * otherwise there is a risk of out-of-sync link groups.
71 */
72 if (!lgr->freeing) {
73 mod_delayed_work(system_wq, &lgr->free_work,
74 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
75 SMC_LGR_FREE_DELAY_CLNT :
76 SMC_LGR_FREE_DELAY_SERV);
77 }
78 }
79
80 /* Register connection's alert token in our lookup structure.
81 * To use rbtrees we have to implement our own insert core.
82 * Requires @conns_lock
83 * @smc connection to register
84 * Returns 0 on success, != otherwise.
85 */
smc_lgr_add_alert_token(struct smc_connection * conn)86 static void smc_lgr_add_alert_token(struct smc_connection *conn)
87 {
88 struct rb_node **link, *parent = NULL;
89 u32 token = conn->alert_token_local;
90
91 link = &conn->lgr->conns_all.rb_node;
92 while (*link) {
93 struct smc_connection *cur = rb_entry(*link,
94 struct smc_connection, alert_node);
95
96 parent = *link;
97 if (cur->alert_token_local > token)
98 link = &parent->rb_left;
99 else
100 link = &parent->rb_right;
101 }
102 /* Put the new node there */
103 rb_link_node(&conn->alert_node, parent, link);
104 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
105 }
106
107 /* assign an SMC-R link to the connection */
smcr_lgr_conn_assign_link(struct smc_connection * conn,bool first)108 static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
109 {
110 enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
111 SMC_LNK_ACTIVE;
112 int i, j;
113
114 /* do link balancing */
115 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
116 struct smc_link *lnk = &conn->lgr->lnk[i];
117
118 if (lnk->state != expected || lnk->link_is_asym)
119 continue;
120 if (conn->lgr->role == SMC_CLNT) {
121 conn->lnk = lnk; /* temporary, SMC server assigns link*/
122 break;
123 }
124 if (conn->lgr->conns_num % 2) {
125 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
126 struct smc_link *lnk2;
127
128 lnk2 = &conn->lgr->lnk[j];
129 if (lnk2->state == expected &&
130 !lnk2->link_is_asym) {
131 conn->lnk = lnk2;
132 break;
133 }
134 }
135 }
136 if (!conn->lnk)
137 conn->lnk = lnk;
138 break;
139 }
140 if (!conn->lnk)
141 return SMC_CLC_DECL_NOACTLINK;
142 return 0;
143 }
144
145 /* Register connection in link group by assigning an alert token
146 * registered in a search tree.
147 * Requires @conns_lock
148 * Note that '0' is a reserved value and not assigned.
149 */
smc_lgr_register_conn(struct smc_connection * conn,bool first)150 static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
151 {
152 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
153 static atomic_t nexttoken = ATOMIC_INIT(0);
154 int rc;
155
156 if (!conn->lgr->is_smcd) {
157 rc = smcr_lgr_conn_assign_link(conn, first);
158 if (rc)
159 return rc;
160 }
161 /* find a new alert_token_local value not yet used by some connection
162 * in this link group
163 */
164 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
165 while (!conn->alert_token_local) {
166 conn->alert_token_local = atomic_inc_return(&nexttoken);
167 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
168 conn->alert_token_local = 0;
169 }
170 smc_lgr_add_alert_token(conn);
171 conn->lgr->conns_num++;
172 return 0;
173 }
174
175 /* Unregister connection and reset the alert token of the given connection<
176 */
__smc_lgr_unregister_conn(struct smc_connection * conn)177 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
178 {
179 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
180 struct smc_link_group *lgr = conn->lgr;
181
182 rb_erase(&conn->alert_node, &lgr->conns_all);
183 lgr->conns_num--;
184 conn->alert_token_local = 0;
185 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
186 }
187
188 /* Unregister connection from lgr
189 */
smc_lgr_unregister_conn(struct smc_connection * conn)190 static void smc_lgr_unregister_conn(struct smc_connection *conn)
191 {
192 struct smc_link_group *lgr = conn->lgr;
193
194 if (!lgr)
195 return;
196 write_lock_bh(&lgr->conns_lock);
197 if (conn->alert_token_local) {
198 __smc_lgr_unregister_conn(conn);
199 }
200 write_unlock_bh(&lgr->conns_lock);
201 conn->lgr = NULL;
202 }
203
smc_lgr_cleanup_early(struct smc_connection * conn)204 void smc_lgr_cleanup_early(struct smc_connection *conn)
205 {
206 struct smc_link_group *lgr = conn->lgr;
207 struct list_head *lgr_list;
208 spinlock_t *lgr_lock;
209
210 if (!lgr)
211 return;
212
213 smc_conn_free(conn);
214 lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
215 spin_lock_bh(lgr_lock);
216 /* do not use this link group for new connections */
217 if (!list_empty(lgr_list))
218 list_del_init(lgr_list);
219 spin_unlock_bh(lgr_lock);
220 __smc_lgr_terminate(lgr, true);
221 }
222
smcr_lgr_link_deactivate_all(struct smc_link_group * lgr)223 static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
224 {
225 int i;
226
227 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
228 struct smc_link *lnk = &lgr->lnk[i];
229
230 if (smc_link_usable(lnk))
231 lnk->state = SMC_LNK_INACTIVE;
232 }
233 wake_up_all(&lgr->llc_msg_waiter);
234 wake_up_all(&lgr->llc_flow_waiter);
235 }
236
237 static void smc_lgr_free(struct smc_link_group *lgr);
238
smc_lgr_free_work(struct work_struct * work)239 static void smc_lgr_free_work(struct work_struct *work)
240 {
241 struct smc_link_group *lgr = container_of(to_delayed_work(work),
242 struct smc_link_group,
243 free_work);
244 spinlock_t *lgr_lock;
245 bool conns;
246
247 smc_lgr_list_head(lgr, &lgr_lock);
248 spin_lock_bh(lgr_lock);
249 if (lgr->freeing) {
250 spin_unlock_bh(lgr_lock);
251 return;
252 }
253 read_lock_bh(&lgr->conns_lock);
254 conns = RB_EMPTY_ROOT(&lgr->conns_all);
255 read_unlock_bh(&lgr->conns_lock);
256 if (!conns) { /* number of lgr connections is no longer zero */
257 spin_unlock_bh(lgr_lock);
258 return;
259 }
260 list_del_init(&lgr->list); /* remove from smc_lgr_list */
261 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
262 spin_unlock_bh(lgr_lock);
263 cancel_delayed_work(&lgr->free_work);
264
265 if (!lgr->is_smcd && !lgr->terminating)
266 smc_llc_send_link_delete_all(lgr, true,
267 SMC_LLC_DEL_PROG_INIT_TERM);
268 if (lgr->is_smcd && !lgr->terminating)
269 smc_ism_signal_shutdown(lgr);
270 if (!lgr->is_smcd)
271 smcr_lgr_link_deactivate_all(lgr);
272 smc_lgr_free(lgr);
273 }
274
smc_lgr_terminate_work(struct work_struct * work)275 static void smc_lgr_terminate_work(struct work_struct *work)
276 {
277 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
278 terminate_work);
279
280 __smc_lgr_terminate(lgr, true);
281 }
282
283 /* return next unique link id for the lgr */
smcr_next_link_id(struct smc_link_group * lgr)284 static u8 smcr_next_link_id(struct smc_link_group *lgr)
285 {
286 u8 link_id;
287 int i;
288
289 while (1) {
290 link_id = ++lgr->next_link_id;
291 if (!link_id) /* skip zero as link_id */
292 link_id = ++lgr->next_link_id;
293 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
294 if (smc_link_usable(&lgr->lnk[i]) &&
295 lgr->lnk[i].link_id == link_id)
296 continue;
297 }
298 break;
299 }
300 return link_id;
301 }
302
smcr_link_init(struct smc_link_group * lgr,struct smc_link * lnk,u8 link_idx,struct smc_init_info * ini)303 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
304 u8 link_idx, struct smc_init_info *ini)
305 {
306 u8 rndvec[3];
307 int rc;
308
309 get_device(&ini->ib_dev->ibdev->dev);
310 atomic_inc(&ini->ib_dev->lnk_cnt);
311 lnk->link_id = smcr_next_link_id(lgr);
312 lnk->lgr = lgr;
313 lnk->link_idx = link_idx;
314 lnk->smcibdev = ini->ib_dev;
315 lnk->ibport = ini->ib_port;
316 lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
317 smc_llc_link_set_uid(lnk);
318 INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
319 if (!ini->ib_dev->initialized) {
320 rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev);
321 if (rc)
322 goto out;
323 }
324 get_random_bytes(rndvec, sizeof(rndvec));
325 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
326 (rndvec[2] << 16);
327 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
328 ini->vlan_id, lnk->gid, &lnk->sgid_index);
329 if (rc)
330 goto out;
331 rc = smc_llc_link_init(lnk);
332 if (rc)
333 goto out;
334 rc = smc_wr_alloc_link_mem(lnk);
335 if (rc)
336 goto clear_llc_lnk;
337 rc = smc_ib_create_protection_domain(lnk);
338 if (rc)
339 goto free_link_mem;
340 rc = smc_ib_create_queue_pair(lnk);
341 if (rc)
342 goto dealloc_pd;
343 rc = smc_wr_create_link(lnk);
344 if (rc)
345 goto destroy_qp;
346 lnk->state = SMC_LNK_ACTIVATING;
347 return 0;
348
349 destroy_qp:
350 smc_ib_destroy_queue_pair(lnk);
351 dealloc_pd:
352 smc_ib_dealloc_protection_domain(lnk);
353 free_link_mem:
354 smc_wr_free_link_mem(lnk);
355 clear_llc_lnk:
356 smc_llc_link_clear(lnk, false);
357 out:
358 put_device(&ini->ib_dev->ibdev->dev);
359 memset(lnk, 0, sizeof(struct smc_link));
360 lnk->state = SMC_LNK_UNUSED;
361 if (!atomic_dec_return(&ini->ib_dev->lnk_cnt))
362 wake_up(&ini->ib_dev->lnks_deleted);
363 return rc;
364 }
365
366 /* create a new SMC link group */
smc_lgr_create(struct smc_sock * smc,struct smc_init_info * ini)367 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
368 {
369 struct smc_link_group *lgr;
370 struct list_head *lgr_list;
371 struct smc_link *lnk;
372 spinlock_t *lgr_lock;
373 u8 link_idx;
374 int rc = 0;
375 int i;
376
377 if (ini->is_smcd && ini->vlan_id) {
378 if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
379 ini->vlan_id)) {
380 rc = SMC_CLC_DECL_ISMVLANERR;
381 goto out;
382 }
383 }
384
385 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
386 if (!lgr) {
387 rc = SMC_CLC_DECL_MEM;
388 goto ism_put_vlan;
389 }
390 lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
391 SMC_LGR_ID_SIZE, &lgr->id);
392 if (!lgr->tx_wq) {
393 rc = -ENOMEM;
394 goto free_lgr;
395 }
396 lgr->is_smcd = ini->is_smcd;
397 lgr->sync_err = 0;
398 lgr->terminating = 0;
399 lgr->freeing = 0;
400 lgr->vlan_id = ini->vlan_id;
401 mutex_init(&lgr->sndbufs_lock);
402 mutex_init(&lgr->rmbs_lock);
403 rwlock_init(&lgr->conns_lock);
404 for (i = 0; i < SMC_RMBE_SIZES; i++) {
405 INIT_LIST_HEAD(&lgr->sndbufs[i]);
406 INIT_LIST_HEAD(&lgr->rmbs[i]);
407 }
408 lgr->next_link_id = 0;
409 smc_lgr_list.num += SMC_LGR_NUM_INCR;
410 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
411 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
412 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
413 lgr->conns_all = RB_ROOT;
414 if (ini->is_smcd) {
415 /* SMC-D specific settings */
416 get_device(&ini->ism_dev[ini->ism_selected]->dev);
417 lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected];
418 lgr->smcd = ini->ism_dev[ini->ism_selected];
419 lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
420 lgr_lock = &lgr->smcd->lgr_lock;
421 lgr->smc_version = ini->smcd_version;
422 lgr->peer_shutdown = 0;
423 atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
424 } else {
425 /* SMC-R specific settings */
426 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
427 memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
428 SMC_SYSTEMID_LEN);
429 memcpy(lgr->pnet_id, ini->ib_dev->pnetid[ini->ib_port - 1],
430 SMC_MAX_PNETID_LEN);
431 smc_llc_lgr_init(lgr, smc);
432
433 link_idx = SMC_SINGLE_LINK;
434 lnk = &lgr->lnk[link_idx];
435 rc = smcr_link_init(lgr, lnk, link_idx, ini);
436 if (rc)
437 goto free_wq;
438 lgr_list = &smc_lgr_list.list;
439 lgr_lock = &smc_lgr_list.lock;
440 atomic_inc(&lgr_cnt);
441 }
442 smc->conn.lgr = lgr;
443 spin_lock_bh(lgr_lock);
444 list_add_tail(&lgr->list, lgr_list);
445 spin_unlock_bh(lgr_lock);
446 return 0;
447
448 free_wq:
449 destroy_workqueue(lgr->tx_wq);
450 free_lgr:
451 kfree(lgr);
452 ism_put_vlan:
453 if (ini->is_smcd && ini->vlan_id)
454 smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
455 out:
456 if (rc < 0) {
457 if (rc == -ENOMEM)
458 rc = SMC_CLC_DECL_MEM;
459 else
460 rc = SMC_CLC_DECL_INTERR;
461 }
462 return rc;
463 }
464
smc_write_space(struct smc_connection * conn)465 static int smc_write_space(struct smc_connection *conn)
466 {
467 int buffer_len = conn->peer_rmbe_size;
468 union smc_host_cursor prod;
469 union smc_host_cursor cons;
470 int space;
471
472 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
473 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
474 /* determine rx_buf space */
475 space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
476 return space;
477 }
478
smc_switch_cursor(struct smc_sock * smc,struct smc_cdc_tx_pend * pend,struct smc_wr_buf * wr_buf)479 static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
480 struct smc_wr_buf *wr_buf)
481 {
482 struct smc_connection *conn = &smc->conn;
483 union smc_host_cursor cons, fin;
484 int rc = 0;
485 int diff;
486
487 smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
488 smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
489 /* set prod cursor to old state, enforce tx_rdma_writes() */
490 smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
491 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
492
493 if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
494 /* cons cursor advanced more than fin, and prod was set
495 * fin above, so now prod is smaller than cons. Fix that.
496 */
497 diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
498 smc_curs_add(conn->sndbuf_desc->len,
499 &conn->tx_curs_sent, diff);
500 smc_curs_add(conn->sndbuf_desc->len,
501 &conn->tx_curs_fin, diff);
502
503 smp_mb__before_atomic();
504 atomic_add(diff, &conn->sndbuf_space);
505 smp_mb__after_atomic();
506
507 smc_curs_add(conn->peer_rmbe_size,
508 &conn->local_tx_ctrl.prod, diff);
509 smc_curs_add(conn->peer_rmbe_size,
510 &conn->local_tx_ctrl_fin, diff);
511 }
512 /* recalculate, value is used by tx_rdma_writes() */
513 atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
514
515 if (smc->sk.sk_state != SMC_INIT &&
516 smc->sk.sk_state != SMC_CLOSED) {
517 rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
518 if (!rc) {
519 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
520 smc->sk.sk_data_ready(&smc->sk);
521 }
522 } else {
523 smc_wr_tx_put_slot(conn->lnk,
524 (struct smc_wr_tx_pend_priv *)pend);
525 }
526 return rc;
527 }
528
smc_switch_conns(struct smc_link_group * lgr,struct smc_link * from_lnk,bool is_dev_err)529 struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
530 struct smc_link *from_lnk, bool is_dev_err)
531 {
532 struct smc_link *to_lnk = NULL;
533 struct smc_cdc_tx_pend *pend;
534 struct smc_connection *conn;
535 struct smc_wr_buf *wr_buf;
536 struct smc_sock *smc;
537 struct rb_node *node;
538 int i, rc = 0;
539
540 /* link is inactive, wake up tx waiters */
541 smc_wr_wakeup_tx_wait(from_lnk);
542
543 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
544 if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
545 continue;
546 if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
547 from_lnk->ibport == lgr->lnk[i].ibport) {
548 continue;
549 }
550 to_lnk = &lgr->lnk[i];
551 break;
552 }
553 if (!to_lnk) {
554 smc_lgr_terminate_sched(lgr);
555 return NULL;
556 }
557 again:
558 read_lock_bh(&lgr->conns_lock);
559 for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
560 conn = rb_entry(node, struct smc_connection, alert_node);
561 if (conn->lnk != from_lnk)
562 continue;
563 smc = container_of(conn, struct smc_sock, conn);
564 /* conn->lnk not yet set in SMC_INIT state */
565 if (smc->sk.sk_state == SMC_INIT)
566 continue;
567 if (smc->sk.sk_state == SMC_CLOSED ||
568 smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
569 smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
570 smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
571 smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
572 smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
573 smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
574 smc->sk.sk_state == SMC_PEERABORTWAIT ||
575 smc->sk.sk_state == SMC_PROCESSABORT) {
576 spin_lock_bh(&conn->send_lock);
577 conn->lnk = to_lnk;
578 spin_unlock_bh(&conn->send_lock);
579 continue;
580 }
581 sock_hold(&smc->sk);
582 read_unlock_bh(&lgr->conns_lock);
583 /* pre-fetch buffer outside of send_lock, might sleep */
584 rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
585 if (rc) {
586 smcr_link_down_cond_sched(to_lnk);
587 return NULL;
588 }
589 /* avoid race with smcr_tx_sndbuf_nonempty() */
590 spin_lock_bh(&conn->send_lock);
591 conn->lnk = to_lnk;
592 rc = smc_switch_cursor(smc, pend, wr_buf);
593 spin_unlock_bh(&conn->send_lock);
594 sock_put(&smc->sk);
595 if (rc) {
596 smcr_link_down_cond_sched(to_lnk);
597 return NULL;
598 }
599 goto again;
600 }
601 read_unlock_bh(&lgr->conns_lock);
602 return to_lnk;
603 }
604
smcr_buf_unuse(struct smc_buf_desc * rmb_desc,struct smc_link_group * lgr)605 static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
606 struct smc_link_group *lgr)
607 {
608 int rc;
609
610 if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
611 /* unregister rmb with peer */
612 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
613 if (!rc) {
614 /* protect against smc_llc_cli_rkey_exchange() */
615 mutex_lock(&lgr->llc_conf_mutex);
616 smc_llc_do_delete_rkey(lgr, rmb_desc);
617 rmb_desc->is_conf_rkey = false;
618 mutex_unlock(&lgr->llc_conf_mutex);
619 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
620 }
621 }
622
623 if (rmb_desc->is_reg_err) {
624 /* buf registration failed, reuse not possible */
625 mutex_lock(&lgr->rmbs_lock);
626 list_del(&rmb_desc->list);
627 mutex_unlock(&lgr->rmbs_lock);
628
629 smc_buf_free(lgr, true, rmb_desc);
630 } else {
631 rmb_desc->used = 0;
632 }
633 }
634
smc_buf_unuse(struct smc_connection * conn,struct smc_link_group * lgr)635 static void smc_buf_unuse(struct smc_connection *conn,
636 struct smc_link_group *lgr)
637 {
638 if (conn->sndbuf_desc)
639 conn->sndbuf_desc->used = 0;
640 if (conn->rmb_desc && lgr->is_smcd)
641 conn->rmb_desc->used = 0;
642 else if (conn->rmb_desc)
643 smcr_buf_unuse(conn->rmb_desc, lgr);
644 }
645
646 /* remove a finished connection from its link group */
smc_conn_free(struct smc_connection * conn)647 void smc_conn_free(struct smc_connection *conn)
648 {
649 struct smc_link_group *lgr = conn->lgr;
650
651 if (!lgr)
652 return;
653 if (lgr->is_smcd) {
654 if (!list_empty(&lgr->list))
655 smc_ism_unset_conn(conn);
656 tasklet_kill(&conn->rx_tsklet);
657 } else {
658 smc_cdc_tx_dismiss_slots(conn);
659 if (current_work() != &conn->abort_work)
660 cancel_work_sync(&conn->abort_work);
661 }
662 if (!list_empty(&lgr->list)) {
663 smc_lgr_unregister_conn(conn);
664 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
665 }
666
667 if (!lgr->conns_num)
668 smc_lgr_schedule_free_work(lgr);
669 }
670
671 /* unregister a link from a buf_desc */
smcr_buf_unmap_link(struct smc_buf_desc * buf_desc,bool is_rmb,struct smc_link * lnk)672 static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
673 struct smc_link *lnk)
674 {
675 if (is_rmb)
676 buf_desc->is_reg_mr[lnk->link_idx] = false;
677 if (!buf_desc->is_map_ib[lnk->link_idx])
678 return;
679 if (is_rmb) {
680 if (buf_desc->mr_rx[lnk->link_idx]) {
681 smc_ib_put_memory_region(
682 buf_desc->mr_rx[lnk->link_idx]);
683 buf_desc->mr_rx[lnk->link_idx] = NULL;
684 }
685 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
686 } else {
687 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
688 }
689 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
690 buf_desc->is_map_ib[lnk->link_idx] = false;
691 }
692
693 /* unmap all buffers of lgr for a deleted link */
smcr_buf_unmap_lgr(struct smc_link * lnk)694 static void smcr_buf_unmap_lgr(struct smc_link *lnk)
695 {
696 struct smc_link_group *lgr = lnk->lgr;
697 struct smc_buf_desc *buf_desc, *bf;
698 int i;
699
700 for (i = 0; i < SMC_RMBE_SIZES; i++) {
701 mutex_lock(&lgr->rmbs_lock);
702 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
703 smcr_buf_unmap_link(buf_desc, true, lnk);
704 mutex_unlock(&lgr->rmbs_lock);
705 mutex_lock(&lgr->sndbufs_lock);
706 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
707 list)
708 smcr_buf_unmap_link(buf_desc, false, lnk);
709 mutex_unlock(&lgr->sndbufs_lock);
710 }
711 }
712
smcr_rtoken_clear_link(struct smc_link * lnk)713 static void smcr_rtoken_clear_link(struct smc_link *lnk)
714 {
715 struct smc_link_group *lgr = lnk->lgr;
716 int i;
717
718 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
719 lgr->rtokens[i][lnk->link_idx].rkey = 0;
720 lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
721 }
722 }
723
724 /* must be called under lgr->llc_conf_mutex lock */
smcr_link_clear(struct smc_link * lnk,bool log)725 void smcr_link_clear(struct smc_link *lnk, bool log)
726 {
727 struct smc_ib_device *smcibdev;
728
729 if (!lnk->lgr || lnk->state == SMC_LNK_UNUSED)
730 return;
731 lnk->peer_qpn = 0;
732 smc_llc_link_clear(lnk, log);
733 smcr_buf_unmap_lgr(lnk);
734 smcr_rtoken_clear_link(lnk);
735 smc_ib_modify_qp_reset(lnk);
736 smc_wr_free_link(lnk);
737 smc_ib_destroy_queue_pair(lnk);
738 smc_ib_dealloc_protection_domain(lnk);
739 smc_wr_free_link_mem(lnk);
740 put_device(&lnk->smcibdev->ibdev->dev);
741 smcibdev = lnk->smcibdev;
742 memset(lnk, 0, sizeof(struct smc_link));
743 lnk->state = SMC_LNK_UNUSED;
744 if (!atomic_dec_return(&smcibdev->lnk_cnt))
745 wake_up(&smcibdev->lnks_deleted);
746 }
747
smcr_buf_free(struct smc_link_group * lgr,bool is_rmb,struct smc_buf_desc * buf_desc)748 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
749 struct smc_buf_desc *buf_desc)
750 {
751 int i;
752
753 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
754 smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
755
756 if (buf_desc->pages)
757 __free_pages(buf_desc->pages, buf_desc->order);
758 kfree(buf_desc);
759 }
760
smcd_buf_free(struct smc_link_group * lgr,bool is_dmb,struct smc_buf_desc * buf_desc)761 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
762 struct smc_buf_desc *buf_desc)
763 {
764 if (is_dmb) {
765 /* restore original buf len */
766 buf_desc->len += sizeof(struct smcd_cdc_msg);
767 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
768 } else {
769 kfree(buf_desc->cpu_addr);
770 }
771 kfree(buf_desc);
772 }
773
smc_buf_free(struct smc_link_group * lgr,bool is_rmb,struct smc_buf_desc * buf_desc)774 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
775 struct smc_buf_desc *buf_desc)
776 {
777 if (lgr->is_smcd)
778 smcd_buf_free(lgr, is_rmb, buf_desc);
779 else
780 smcr_buf_free(lgr, is_rmb, buf_desc);
781 }
782
__smc_lgr_free_bufs(struct smc_link_group * lgr,bool is_rmb)783 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
784 {
785 struct smc_buf_desc *buf_desc, *bf_desc;
786 struct list_head *buf_list;
787 int i;
788
789 for (i = 0; i < SMC_RMBE_SIZES; i++) {
790 if (is_rmb)
791 buf_list = &lgr->rmbs[i];
792 else
793 buf_list = &lgr->sndbufs[i];
794 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
795 list) {
796 list_del(&buf_desc->list);
797 smc_buf_free(lgr, is_rmb, buf_desc);
798 }
799 }
800 }
801
smc_lgr_free_bufs(struct smc_link_group * lgr)802 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
803 {
804 /* free send buffers */
805 __smc_lgr_free_bufs(lgr, false);
806 /* free rmbs */
807 __smc_lgr_free_bufs(lgr, true);
808 }
809
810 /* remove a link group */
smc_lgr_free(struct smc_link_group * lgr)811 static void smc_lgr_free(struct smc_link_group *lgr)
812 {
813 int i;
814
815 if (!lgr->is_smcd) {
816 mutex_lock(&lgr->llc_conf_mutex);
817 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
818 if (lgr->lnk[i].state != SMC_LNK_UNUSED)
819 smcr_link_clear(&lgr->lnk[i], false);
820 }
821 mutex_unlock(&lgr->llc_conf_mutex);
822 smc_llc_lgr_clear(lgr);
823 }
824
825 smc_lgr_free_bufs(lgr);
826 destroy_workqueue(lgr->tx_wq);
827 if (lgr->is_smcd) {
828 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
829 put_device(&lgr->smcd->dev);
830 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
831 wake_up(&lgr->smcd->lgrs_deleted);
832 } else {
833 if (!atomic_dec_return(&lgr_cnt))
834 wake_up(&lgrs_deleted);
835 }
836 kfree(lgr);
837 }
838
smcd_unregister_all_dmbs(struct smc_link_group * lgr)839 static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
840 {
841 int i;
842
843 for (i = 0; i < SMC_RMBE_SIZES; i++) {
844 struct smc_buf_desc *buf_desc;
845
846 list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
847 buf_desc->len += sizeof(struct smcd_cdc_msg);
848 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
849 }
850 }
851 }
852
smc_sk_wake_ups(struct smc_sock * smc)853 static void smc_sk_wake_ups(struct smc_sock *smc)
854 {
855 smc->sk.sk_write_space(&smc->sk);
856 smc->sk.sk_data_ready(&smc->sk);
857 smc->sk.sk_state_change(&smc->sk);
858 }
859
860 /* kill a connection */
smc_conn_kill(struct smc_connection * conn,bool soft)861 static void smc_conn_kill(struct smc_connection *conn, bool soft)
862 {
863 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
864
865 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
866 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
867 else
868 smc_close_abort(conn);
869 conn->killed = 1;
870 smc->sk.sk_err = ECONNABORTED;
871 smc_sk_wake_ups(smc);
872 if (conn->lgr->is_smcd) {
873 smc_ism_unset_conn(conn);
874 if (soft)
875 tasklet_kill(&conn->rx_tsklet);
876 else
877 tasklet_unlock_wait(&conn->rx_tsklet);
878 } else {
879 smc_cdc_tx_dismiss_slots(conn);
880 }
881 smc_lgr_unregister_conn(conn);
882 smc_close_active_abort(smc);
883 }
884
smc_lgr_cleanup(struct smc_link_group * lgr)885 static void smc_lgr_cleanup(struct smc_link_group *lgr)
886 {
887 if (lgr->is_smcd) {
888 smc_ism_signal_shutdown(lgr);
889 smcd_unregister_all_dmbs(lgr);
890 } else {
891 u32 rsn = lgr->llc_termination_rsn;
892
893 if (!rsn)
894 rsn = SMC_LLC_DEL_PROG_INIT_TERM;
895 smc_llc_send_link_delete_all(lgr, false, rsn);
896 smcr_lgr_link_deactivate_all(lgr);
897 }
898 }
899
900 /* terminate link group
901 * @soft: true if link group shutdown can take its time
902 * false if immediate link group shutdown is required
903 */
__smc_lgr_terminate(struct smc_link_group * lgr,bool soft)904 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
905 {
906 struct smc_connection *conn;
907 struct smc_sock *smc;
908 struct rb_node *node;
909
910 if (lgr->terminating)
911 return; /* lgr already terminating */
912 /* cancel free_work sync, will terminate when lgr->freeing is set */
913 cancel_delayed_work_sync(&lgr->free_work);
914 lgr->terminating = 1;
915
916 /* kill remaining link group connections */
917 read_lock_bh(&lgr->conns_lock);
918 node = rb_first(&lgr->conns_all);
919 while (node) {
920 read_unlock_bh(&lgr->conns_lock);
921 conn = rb_entry(node, struct smc_connection, alert_node);
922 smc = container_of(conn, struct smc_sock, conn);
923 sock_hold(&smc->sk); /* sock_put below */
924 lock_sock(&smc->sk);
925 smc_conn_kill(conn, soft);
926 release_sock(&smc->sk);
927 sock_put(&smc->sk); /* sock_hold above */
928 read_lock_bh(&lgr->conns_lock);
929 node = rb_first(&lgr->conns_all);
930 }
931 read_unlock_bh(&lgr->conns_lock);
932 smc_lgr_cleanup(lgr);
933 smc_lgr_free(lgr);
934 }
935
936 /* unlink link group and schedule termination */
smc_lgr_terminate_sched(struct smc_link_group * lgr)937 void smc_lgr_terminate_sched(struct smc_link_group *lgr)
938 {
939 spinlock_t *lgr_lock;
940
941 smc_lgr_list_head(lgr, &lgr_lock);
942 spin_lock_bh(lgr_lock);
943 if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
944 spin_unlock_bh(lgr_lock);
945 return; /* lgr already terminating */
946 }
947 list_del_init(&lgr->list);
948 lgr->freeing = 1;
949 spin_unlock_bh(lgr_lock);
950 schedule_work(&lgr->terminate_work);
951 }
952
953 /* Called when peer lgr shutdown (regularly or abnormally) is received */
smc_smcd_terminate(struct smcd_dev * dev,u64 peer_gid,unsigned short vlan)954 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
955 {
956 struct smc_link_group *lgr, *l;
957 LIST_HEAD(lgr_free_list);
958
959 /* run common cleanup function and build free list */
960 spin_lock_bh(&dev->lgr_lock);
961 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
962 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
963 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
964 if (peer_gid) /* peer triggered termination */
965 lgr->peer_shutdown = 1;
966 list_move(&lgr->list, &lgr_free_list);
967 lgr->freeing = 1;
968 }
969 }
970 spin_unlock_bh(&dev->lgr_lock);
971
972 /* cancel the regular free workers and actually free lgrs */
973 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
974 list_del_init(&lgr->list);
975 schedule_work(&lgr->terminate_work);
976 }
977 }
978
979 /* Called when an SMCD device is removed or the smc module is unloaded */
smc_smcd_terminate_all(struct smcd_dev * smcd)980 void smc_smcd_terminate_all(struct smcd_dev *smcd)
981 {
982 struct smc_link_group *lgr, *lg;
983 LIST_HEAD(lgr_free_list);
984
985 spin_lock_bh(&smcd->lgr_lock);
986 list_splice_init(&smcd->lgr_list, &lgr_free_list);
987 list_for_each_entry(lgr, &lgr_free_list, list)
988 lgr->freeing = 1;
989 spin_unlock_bh(&smcd->lgr_lock);
990
991 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
992 list_del_init(&lgr->list);
993 __smc_lgr_terminate(lgr, false);
994 }
995
996 if (atomic_read(&smcd->lgr_cnt))
997 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
998 }
999
1000 /* Called when an SMCR device is removed or the smc module is unloaded.
1001 * If smcibdev is given, all SMCR link groups using this device are terminated.
1002 * If smcibdev is NULL, all SMCR link groups are terminated.
1003 */
smc_smcr_terminate_all(struct smc_ib_device * smcibdev)1004 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1005 {
1006 struct smc_link_group *lgr, *lg;
1007 LIST_HEAD(lgr_free_list);
1008 int i;
1009
1010 spin_lock_bh(&smc_lgr_list.lock);
1011 if (!smcibdev) {
1012 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
1013 list_for_each_entry(lgr, &lgr_free_list, list)
1014 lgr->freeing = 1;
1015 } else {
1016 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1017 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1018 if (lgr->lnk[i].smcibdev == smcibdev)
1019 smcr_link_down_cond_sched(&lgr->lnk[i]);
1020 }
1021 }
1022 }
1023 spin_unlock_bh(&smc_lgr_list.lock);
1024
1025 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1026 list_del_init(&lgr->list);
1027 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
1028 __smc_lgr_terminate(lgr, false);
1029 }
1030
1031 if (smcibdev) {
1032 if (atomic_read(&smcibdev->lnk_cnt))
1033 wait_event(smcibdev->lnks_deleted,
1034 !atomic_read(&smcibdev->lnk_cnt));
1035 } else {
1036 if (atomic_read(&lgr_cnt))
1037 wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
1038 }
1039 }
1040
1041 /* set new lgr type and clear all asymmetric link tagging */
smcr_lgr_set_type(struct smc_link_group * lgr,enum smc_lgr_type new_type)1042 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
1043 {
1044 char *lgr_type = "";
1045 int i;
1046
1047 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1048 if (smc_link_usable(&lgr->lnk[i]))
1049 lgr->lnk[i].link_is_asym = false;
1050 if (lgr->type == new_type)
1051 return;
1052 lgr->type = new_type;
1053
1054 switch (lgr->type) {
1055 case SMC_LGR_NONE:
1056 lgr_type = "NONE";
1057 break;
1058 case SMC_LGR_SINGLE:
1059 lgr_type = "SINGLE";
1060 break;
1061 case SMC_LGR_SYMMETRIC:
1062 lgr_type = "SYMMETRIC";
1063 break;
1064 case SMC_LGR_ASYMMETRIC_PEER:
1065 lgr_type = "ASYMMETRIC_PEER";
1066 break;
1067 case SMC_LGR_ASYMMETRIC_LOCAL:
1068 lgr_type = "ASYMMETRIC_LOCAL";
1069 break;
1070 }
1071 pr_warn_ratelimited("smc: SMC-R lg %*phN state changed: "
1072 "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
1073 lgr_type, lgr->pnet_id);
1074 }
1075
1076 /* set new lgr type and tag a link as asymmetric */
smcr_lgr_set_type_asym(struct smc_link_group * lgr,enum smc_lgr_type new_type,int asym_lnk_idx)1077 void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
1078 enum smc_lgr_type new_type, int asym_lnk_idx)
1079 {
1080 smcr_lgr_set_type(lgr, new_type);
1081 lgr->lnk[asym_lnk_idx].link_is_asym = true;
1082 }
1083
1084 /* abort connection, abort_work scheduled from tasklet context */
smc_conn_abort_work(struct work_struct * work)1085 static void smc_conn_abort_work(struct work_struct *work)
1086 {
1087 struct smc_connection *conn = container_of(work,
1088 struct smc_connection,
1089 abort_work);
1090 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1091
1092 smc_conn_kill(conn, true);
1093 sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
1094 }
1095
smcr_port_add(struct smc_ib_device * smcibdev,u8 ibport)1096 void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1097 {
1098 struct smc_link_group *lgr, *n;
1099
1100 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1101 struct smc_link *link;
1102
1103 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1104 SMC_MAX_PNETID_LEN) ||
1105 lgr->type == SMC_LGR_SYMMETRIC ||
1106 lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1107 continue;
1108
1109 /* trigger local add link processing */
1110 link = smc_llc_usable_link(lgr);
1111 if (link)
1112 smc_llc_add_link_local(link);
1113 }
1114 }
1115
1116 /* link is down - switch connections to alternate link,
1117 * must be called under lgr->llc_conf_mutex lock
1118 */
smcr_link_down(struct smc_link * lnk)1119 static void smcr_link_down(struct smc_link *lnk)
1120 {
1121 struct smc_link_group *lgr = lnk->lgr;
1122 struct smc_link *to_lnk;
1123 int del_link_id;
1124
1125 if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1126 return;
1127
1128 smc_ib_modify_qp_reset(lnk);
1129 to_lnk = smc_switch_conns(lgr, lnk, true);
1130 if (!to_lnk) { /* no backup link available */
1131 smcr_link_clear(lnk, true);
1132 return;
1133 }
1134 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1135 del_link_id = lnk->link_id;
1136
1137 if (lgr->role == SMC_SERV) {
1138 /* trigger local delete link processing */
1139 smc_llc_srv_delete_link_local(to_lnk, del_link_id);
1140 } else {
1141 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1142 /* another llc task is ongoing */
1143 mutex_unlock(&lgr->llc_conf_mutex);
1144 wait_event_timeout(lgr->llc_flow_waiter,
1145 (list_empty(&lgr->list) ||
1146 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
1147 SMC_LLC_WAIT_TIME);
1148 mutex_lock(&lgr->llc_conf_mutex);
1149 }
1150 if (!list_empty(&lgr->list)) {
1151 smc_llc_send_delete_link(to_lnk, del_link_id,
1152 SMC_LLC_REQ, true,
1153 SMC_LLC_DEL_LOST_PATH);
1154 smcr_link_clear(lnk, true);
1155 }
1156 wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
1157 }
1158 }
1159
1160 /* must be called under lgr->llc_conf_mutex lock */
smcr_link_down_cond(struct smc_link * lnk)1161 void smcr_link_down_cond(struct smc_link *lnk)
1162 {
1163 if (smc_link_downing(&lnk->state))
1164 smcr_link_down(lnk);
1165 }
1166
1167 /* will get the lgr->llc_conf_mutex lock */
smcr_link_down_cond_sched(struct smc_link * lnk)1168 void smcr_link_down_cond_sched(struct smc_link *lnk)
1169 {
1170 if (smc_link_downing(&lnk->state))
1171 schedule_work(&lnk->link_down_wrk);
1172 }
1173
smcr_port_err(struct smc_ib_device * smcibdev,u8 ibport)1174 void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
1175 {
1176 struct smc_link_group *lgr, *n;
1177 int i;
1178
1179 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1180 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1181 SMC_MAX_PNETID_LEN))
1182 continue; /* lgr is not affected */
1183 if (list_empty(&lgr->list))
1184 continue;
1185 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1186 struct smc_link *lnk = &lgr->lnk[i];
1187
1188 if (smc_link_usable(lnk) &&
1189 lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1190 smcr_link_down_cond_sched(lnk);
1191 }
1192 }
1193 }
1194
smc_link_down_work(struct work_struct * work)1195 static void smc_link_down_work(struct work_struct *work)
1196 {
1197 struct smc_link *link = container_of(work, struct smc_link,
1198 link_down_wrk);
1199 struct smc_link_group *lgr = link->lgr;
1200
1201 if (list_empty(&lgr->list))
1202 return;
1203 wake_up_all(&lgr->llc_msg_waiter);
1204 mutex_lock(&lgr->llc_conf_mutex);
1205 smcr_link_down(link);
1206 mutex_unlock(&lgr->llc_conf_mutex);
1207 }
1208
1209 /* Determine vlan of internal TCP socket.
1210 * @vlan_id: address to store the determined vlan id into
1211 */
smc_vlan_by_tcpsk(struct socket * clcsock,struct smc_init_info * ini)1212 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
1213 {
1214 struct dst_entry *dst = sk_dst_get(clcsock->sk);
1215 struct net_device *ndev;
1216 int i, nest_lvl, rc = 0;
1217
1218 ini->vlan_id = 0;
1219 if (!dst) {
1220 rc = -ENOTCONN;
1221 goto out;
1222 }
1223 if (!dst->dev) {
1224 rc = -ENODEV;
1225 goto out_rel;
1226 }
1227
1228 ndev = dst->dev;
1229 if (is_vlan_dev(ndev)) {
1230 ini->vlan_id = vlan_dev_vlan_id(ndev);
1231 goto out_rel;
1232 }
1233
1234 rtnl_lock();
1235 nest_lvl = ndev->lower_level;
1236 for (i = 0; i < nest_lvl; i++) {
1237 struct list_head *lower = &ndev->adj_list.lower;
1238
1239 if (list_empty(lower))
1240 break;
1241 lower = lower->next;
1242 ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
1243 if (is_vlan_dev(ndev)) {
1244 ini->vlan_id = vlan_dev_vlan_id(ndev);
1245 break;
1246 }
1247 }
1248 rtnl_unlock();
1249
1250 out_rel:
1251 dst_release(dst);
1252 out:
1253 return rc;
1254 }
1255
smcr_lgr_match(struct smc_link_group * lgr,struct smc_clc_msg_local * lcl,enum smc_lgr_role role,u32 clcqpn)1256 static bool smcr_lgr_match(struct smc_link_group *lgr,
1257 struct smc_clc_msg_local *lcl,
1258 enum smc_lgr_role role, u32 clcqpn)
1259 {
1260 int i;
1261
1262 if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) ||
1263 lgr->role != role)
1264 return false;
1265
1266 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1267 if (!smc_link_active(&lgr->lnk[i]))
1268 continue;
1269 if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
1270 !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
1271 !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac)))
1272 return true;
1273 }
1274 return false;
1275 }
1276
smcd_lgr_match(struct smc_link_group * lgr,struct smcd_dev * smcismdev,u64 peer_gid)1277 static bool smcd_lgr_match(struct smc_link_group *lgr,
1278 struct smcd_dev *smcismdev, u64 peer_gid)
1279 {
1280 return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
1281 }
1282
1283 /* create a new SMC connection (and a new link group if necessary) */
smc_conn_create(struct smc_sock * smc,struct smc_init_info * ini)1284 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
1285 {
1286 struct smc_connection *conn = &smc->conn;
1287 struct list_head *lgr_list;
1288 struct smc_link_group *lgr;
1289 enum smc_lgr_role role;
1290 spinlock_t *lgr_lock;
1291 int rc = 0;
1292
1293 lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
1294 &smc_lgr_list.list;
1295 lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
1296 &smc_lgr_list.lock;
1297 ini->first_contact_local = 1;
1298 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
1299 if (role == SMC_CLNT && ini->first_contact_peer)
1300 /* create new link group as well */
1301 goto create;
1302
1303 /* determine if an existing link group can be reused */
1304 spin_lock_bh(lgr_lock);
1305 list_for_each_entry(lgr, lgr_list, list) {
1306 write_lock_bh(&lgr->conns_lock);
1307 if ((ini->is_smcd ?
1308 smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
1309 ini->ism_peer_gid[ini->ism_selected]) :
1310 smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
1311 !lgr->sync_err &&
1312 (ini->smcd_version == SMC_V2 ||
1313 lgr->vlan_id == ini->vlan_id) &&
1314 (role == SMC_CLNT || ini->is_smcd ||
1315 lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
1316 /* link group found */
1317 ini->first_contact_local = 0;
1318 conn->lgr = lgr;
1319 rc = smc_lgr_register_conn(conn, false);
1320 write_unlock_bh(&lgr->conns_lock);
1321 if (!rc && delayed_work_pending(&lgr->free_work))
1322 cancel_delayed_work(&lgr->free_work);
1323 break;
1324 }
1325 write_unlock_bh(&lgr->conns_lock);
1326 }
1327 spin_unlock_bh(lgr_lock);
1328 if (rc)
1329 return rc;
1330
1331 if (role == SMC_CLNT && !ini->first_contact_peer &&
1332 ini->first_contact_local) {
1333 /* Server reuses a link group, but Client wants to start
1334 * a new one
1335 * send out_of_sync decline, reason synchr. error
1336 */
1337 return SMC_CLC_DECL_SYNCERR;
1338 }
1339
1340 create:
1341 if (ini->first_contact_local) {
1342 rc = smc_lgr_create(smc, ini);
1343 if (rc)
1344 goto out;
1345 lgr = conn->lgr;
1346 write_lock_bh(&lgr->conns_lock);
1347 rc = smc_lgr_register_conn(conn, true);
1348 write_unlock_bh(&lgr->conns_lock);
1349 if (rc)
1350 goto out;
1351 }
1352 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
1353 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
1354 conn->urg_state = SMC_URG_READ;
1355 INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
1356 if (ini->is_smcd) {
1357 conn->rx_off = sizeof(struct smcd_cdc_msg);
1358 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
1359 } else {
1360 conn->rx_off = 0;
1361 }
1362 #ifndef KERNEL_HAS_ATOMIC64
1363 spin_lock_init(&conn->acurs_lock);
1364 #endif
1365
1366 out:
1367 return rc;
1368 }
1369
1370 /* convert the RMB size into the compressed notation - minimum 16K.
1371 * In contrast to plain ilog2, this rounds towards the next power of 2,
1372 * so the socket application gets at least its desired sndbuf / rcvbuf size.
1373 */
smc_compress_bufsize(int size)1374 static u8 smc_compress_bufsize(int size)
1375 {
1376 u8 compressed;
1377
1378 if (size <= SMC_BUF_MIN_SIZE)
1379 return 0;
1380
1381 size = (size - 1) >> 14;
1382 compressed = ilog2(size) + 1;
1383 if (compressed >= SMC_RMBE_SIZES)
1384 compressed = SMC_RMBE_SIZES - 1;
1385 return compressed;
1386 }
1387
1388 /* convert the RMB size from compressed notation into integer */
smc_uncompress_bufsize(u8 compressed)1389 int smc_uncompress_bufsize(u8 compressed)
1390 {
1391 u32 size;
1392
1393 size = 0x00000001 << (((int)compressed) + 14);
1394 return (int)size;
1395 }
1396
1397 /* try to reuse a sndbuf or rmb description slot for a certain
1398 * buffer size; if not available, return NULL
1399 */
smc_buf_get_slot(int compressed_bufsize,struct mutex * lock,struct list_head * buf_list)1400 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
1401 struct mutex *lock,
1402 struct list_head *buf_list)
1403 {
1404 struct smc_buf_desc *buf_slot;
1405
1406 mutex_lock(lock);
1407 list_for_each_entry(buf_slot, buf_list, list) {
1408 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
1409 mutex_unlock(lock);
1410 return buf_slot;
1411 }
1412 }
1413 mutex_unlock(lock);
1414 return NULL;
1415 }
1416
1417 /* one of the conditions for announcing a receiver's current window size is
1418 * that it "results in a minimum increase in the window size of 10% of the
1419 * receive buffer space" [RFC7609]
1420 */
smc_rmb_wnd_update_limit(int rmbe_size)1421 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
1422 {
1423 return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
1424 }
1425
1426 /* map an rmb buf to a link */
smcr_buf_map_link(struct smc_buf_desc * buf_desc,bool is_rmb,struct smc_link * lnk)1427 static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1428 struct smc_link *lnk)
1429 {
1430 int rc;
1431
1432 if (buf_desc->is_map_ib[lnk->link_idx])
1433 return 0;
1434
1435 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], 1, GFP_KERNEL);
1436 if (rc)
1437 return rc;
1438 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
1439 buf_desc->cpu_addr, buf_desc->len);
1440
1441 /* map sg table to DMA address */
1442 rc = smc_ib_buf_map_sg(lnk, buf_desc,
1443 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1444 /* SMC protocol depends on mapping to one DMA address only */
1445 if (rc != 1) {
1446 rc = -EAGAIN;
1447 goto free_table;
1448 }
1449
1450 /* create a new memory region for the RMB */
1451 if (is_rmb) {
1452 rc = smc_ib_get_memory_region(lnk->roce_pd,
1453 IB_ACCESS_REMOTE_WRITE |
1454 IB_ACCESS_LOCAL_WRITE,
1455 buf_desc, lnk->link_idx);
1456 if (rc)
1457 goto buf_unmap;
1458 smc_ib_sync_sg_for_device(lnk, buf_desc, DMA_FROM_DEVICE);
1459 }
1460 buf_desc->is_map_ib[lnk->link_idx] = true;
1461 return 0;
1462
1463 buf_unmap:
1464 smc_ib_buf_unmap_sg(lnk, buf_desc,
1465 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1466 free_table:
1467 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1468 return rc;
1469 }
1470
1471 /* register a new rmb on IB device,
1472 * must be called under lgr->llc_conf_mutex lock
1473 */
smcr_link_reg_rmb(struct smc_link * link,struct smc_buf_desc * rmb_desc)1474 int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
1475 {
1476 if (list_empty(&link->lgr->list))
1477 return -ENOLINK;
1478 if (!rmb_desc->is_reg_mr[link->link_idx]) {
1479 /* register memory region for new rmb */
1480 if (smc_wr_reg_send(link, rmb_desc->mr_rx[link->link_idx])) {
1481 rmb_desc->is_reg_err = true;
1482 return -EFAULT;
1483 }
1484 rmb_desc->is_reg_mr[link->link_idx] = true;
1485 }
1486 return 0;
1487 }
1488
_smcr_buf_map_lgr(struct smc_link * lnk,struct mutex * lock,struct list_head * lst,bool is_rmb)1489 static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
1490 struct list_head *lst, bool is_rmb)
1491 {
1492 struct smc_buf_desc *buf_desc, *bf;
1493 int rc = 0;
1494
1495 mutex_lock(lock);
1496 list_for_each_entry_safe(buf_desc, bf, lst, list) {
1497 if (!buf_desc->used)
1498 continue;
1499 rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
1500 if (rc)
1501 goto out;
1502 }
1503 out:
1504 mutex_unlock(lock);
1505 return rc;
1506 }
1507
1508 /* map all used buffers of lgr for a new link */
smcr_buf_map_lgr(struct smc_link * lnk)1509 int smcr_buf_map_lgr(struct smc_link *lnk)
1510 {
1511 struct smc_link_group *lgr = lnk->lgr;
1512 int i, rc = 0;
1513
1514 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1515 rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
1516 &lgr->rmbs[i], true);
1517 if (rc)
1518 return rc;
1519 rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
1520 &lgr->sndbufs[i], false);
1521 if (rc)
1522 return rc;
1523 }
1524 return 0;
1525 }
1526
1527 /* register all used buffers of lgr for a new link,
1528 * must be called under lgr->llc_conf_mutex lock
1529 */
smcr_buf_reg_lgr(struct smc_link * lnk)1530 int smcr_buf_reg_lgr(struct smc_link *lnk)
1531 {
1532 struct smc_link_group *lgr = lnk->lgr;
1533 struct smc_buf_desc *buf_desc, *bf;
1534 int i, rc = 0;
1535
1536 mutex_lock(&lgr->rmbs_lock);
1537 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1538 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
1539 if (!buf_desc->used)
1540 continue;
1541 rc = smcr_link_reg_rmb(lnk, buf_desc);
1542 if (rc)
1543 goto out;
1544 }
1545 }
1546 out:
1547 mutex_unlock(&lgr->rmbs_lock);
1548 return rc;
1549 }
1550
smcr_new_buf_create(struct smc_link_group * lgr,bool is_rmb,int bufsize)1551 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
1552 bool is_rmb, int bufsize)
1553 {
1554 struct smc_buf_desc *buf_desc;
1555
1556 /* try to alloc a new buffer */
1557 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1558 if (!buf_desc)
1559 return ERR_PTR(-ENOMEM);
1560
1561 buf_desc->order = get_order(bufsize);
1562 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
1563 __GFP_NOMEMALLOC | __GFP_COMP |
1564 __GFP_NORETRY | __GFP_ZERO,
1565 buf_desc->order);
1566 if (!buf_desc->pages) {
1567 kfree(buf_desc);
1568 return ERR_PTR(-EAGAIN);
1569 }
1570 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
1571 buf_desc->len = bufsize;
1572 return buf_desc;
1573 }
1574
1575 /* map buf_desc on all usable links,
1576 * unused buffers stay mapped as long as the link is up
1577 */
smcr_buf_map_usable_links(struct smc_link_group * lgr,struct smc_buf_desc * buf_desc,bool is_rmb)1578 static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
1579 struct smc_buf_desc *buf_desc, bool is_rmb)
1580 {
1581 int i, rc = 0;
1582
1583 /* protect against parallel link reconfiguration */
1584 mutex_lock(&lgr->llc_conf_mutex);
1585 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1586 struct smc_link *lnk = &lgr->lnk[i];
1587
1588 if (!smc_link_usable(lnk))
1589 continue;
1590 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
1591 rc = -ENOMEM;
1592 goto out;
1593 }
1594 }
1595 out:
1596 mutex_unlock(&lgr->llc_conf_mutex);
1597 return rc;
1598 }
1599
1600 #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1601
smcd_new_buf_create(struct smc_link_group * lgr,bool is_dmb,int bufsize)1602 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
1603 bool is_dmb, int bufsize)
1604 {
1605 struct smc_buf_desc *buf_desc;
1606 int rc;
1607
1608 if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
1609 return ERR_PTR(-EAGAIN);
1610
1611 /* try to alloc a new DMB */
1612 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1613 if (!buf_desc)
1614 return ERR_PTR(-ENOMEM);
1615 if (is_dmb) {
1616 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
1617 if (rc) {
1618 kfree(buf_desc);
1619 if (rc == -ENOMEM)
1620 return ERR_PTR(-EAGAIN);
1621 if (rc == -ENOSPC)
1622 return ERR_PTR(-ENOSPC);
1623 return ERR_PTR(-EIO);
1624 }
1625 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
1626 /* CDC header stored in buf. So, pretend it was smaller */
1627 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
1628 } else {
1629 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
1630 __GFP_NOWARN | __GFP_NORETRY |
1631 __GFP_NOMEMALLOC);
1632 if (!buf_desc->cpu_addr) {
1633 kfree(buf_desc);
1634 return ERR_PTR(-EAGAIN);
1635 }
1636 buf_desc->len = bufsize;
1637 }
1638 return buf_desc;
1639 }
1640
__smc_buf_create(struct smc_sock * smc,bool is_smcd,bool is_rmb)1641 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
1642 {
1643 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
1644 struct smc_connection *conn = &smc->conn;
1645 struct smc_link_group *lgr = conn->lgr;
1646 struct list_head *buf_list;
1647 int bufsize, bufsize_short;
1648 struct mutex *lock; /* lock buffer list */
1649 int sk_buf_size;
1650
1651 if (is_rmb)
1652 /* use socket recv buffer size (w/o overhead) as start value */
1653 sk_buf_size = smc->sk.sk_rcvbuf / 2;
1654 else
1655 /* use socket send buffer size (w/o overhead) as start value */
1656 sk_buf_size = smc->sk.sk_sndbuf / 2;
1657
1658 for (bufsize_short = smc_compress_bufsize(sk_buf_size);
1659 bufsize_short >= 0; bufsize_short--) {
1660
1661 if (is_rmb) {
1662 lock = &lgr->rmbs_lock;
1663 buf_list = &lgr->rmbs[bufsize_short];
1664 } else {
1665 lock = &lgr->sndbufs_lock;
1666 buf_list = &lgr->sndbufs[bufsize_short];
1667 }
1668 bufsize = smc_uncompress_bufsize(bufsize_short);
1669 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
1670 continue;
1671
1672 /* check for reusable slot in the link group */
1673 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
1674 if (buf_desc) {
1675 memset(buf_desc->cpu_addr, 0, bufsize);
1676 break; /* found reusable slot */
1677 }
1678
1679 if (is_smcd)
1680 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
1681 else
1682 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
1683
1684 if (PTR_ERR(buf_desc) == -ENOMEM)
1685 break;
1686 if (IS_ERR(buf_desc))
1687 continue;
1688
1689 buf_desc->used = 1;
1690 mutex_lock(lock);
1691 list_add(&buf_desc->list, buf_list);
1692 mutex_unlock(lock);
1693 break; /* found */
1694 }
1695
1696 if (IS_ERR(buf_desc))
1697 return PTR_ERR(buf_desc);
1698
1699 if (!is_smcd) {
1700 if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
1701 smcr_buf_unuse(buf_desc, lgr);
1702 return -ENOMEM;
1703 }
1704 }
1705
1706 if (is_rmb) {
1707 conn->rmb_desc = buf_desc;
1708 conn->rmbe_size_short = bufsize_short;
1709 smc->sk.sk_rcvbuf = bufsize * 2;
1710 atomic_set(&conn->bytes_to_rcv, 0);
1711 conn->rmbe_update_limit =
1712 smc_rmb_wnd_update_limit(buf_desc->len);
1713 if (is_smcd)
1714 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
1715 } else {
1716 conn->sndbuf_desc = buf_desc;
1717 smc->sk.sk_sndbuf = bufsize * 2;
1718 atomic_set(&conn->sndbuf_space, bufsize);
1719 }
1720 return 0;
1721 }
1722
smc_sndbuf_sync_sg_for_cpu(struct smc_connection * conn)1723 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
1724 {
1725 if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
1726 return;
1727 smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
1728 }
1729
smc_sndbuf_sync_sg_for_device(struct smc_connection * conn)1730 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
1731 {
1732 if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
1733 return;
1734 smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
1735 }
1736
smc_rmb_sync_sg_for_cpu(struct smc_connection * conn)1737 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
1738 {
1739 int i;
1740
1741 if (!conn->lgr || conn->lgr->is_smcd)
1742 return;
1743 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1744 if (!smc_link_active(&conn->lgr->lnk[i]))
1745 continue;
1746 smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
1747 DMA_FROM_DEVICE);
1748 }
1749 }
1750
smc_rmb_sync_sg_for_device(struct smc_connection * conn)1751 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
1752 {
1753 int i;
1754
1755 if (!conn->lgr || conn->lgr->is_smcd)
1756 return;
1757 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1758 if (!smc_link_active(&conn->lgr->lnk[i]))
1759 continue;
1760 smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
1761 DMA_FROM_DEVICE);
1762 }
1763 }
1764
1765 /* create the send and receive buffer for an SMC socket;
1766 * receive buffers are called RMBs;
1767 * (even though the SMC protocol allows more than one RMB-element per RMB,
1768 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
1769 * extra RMB for every connection in a link group
1770 */
smc_buf_create(struct smc_sock * smc,bool is_smcd)1771 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
1772 {
1773 int rc;
1774
1775 /* create send buffer */
1776 rc = __smc_buf_create(smc, is_smcd, false);
1777 if (rc)
1778 return rc;
1779 /* create rmb */
1780 rc = __smc_buf_create(smc, is_smcd, true);
1781 if (rc) {
1782 mutex_lock(&smc->conn.lgr->sndbufs_lock);
1783 list_del(&smc->conn.sndbuf_desc->list);
1784 mutex_unlock(&smc->conn.lgr->sndbufs_lock);
1785 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1786 smc->conn.sndbuf_desc = NULL;
1787 }
1788 return rc;
1789 }
1790
smc_rmb_reserve_rtoken_idx(struct smc_link_group * lgr)1791 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
1792 {
1793 int i;
1794
1795 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
1796 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
1797 return i;
1798 }
1799 return -ENOSPC;
1800 }
1801
smc_rtoken_find_by_link(struct smc_link_group * lgr,int lnk_idx,u32 rkey)1802 static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
1803 u32 rkey)
1804 {
1805 int i;
1806
1807 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1808 if (test_bit(i, lgr->rtokens_used_mask) &&
1809 lgr->rtokens[i][lnk_idx].rkey == rkey)
1810 return i;
1811 }
1812 return -ENOENT;
1813 }
1814
1815 /* set rtoken for a new link to an existing rmb */
smc_rtoken_set(struct smc_link_group * lgr,int link_idx,int link_idx_new,__be32 nw_rkey_known,__be64 nw_vaddr,__be32 nw_rkey)1816 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
1817 __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
1818 {
1819 int rtok_idx;
1820
1821 rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
1822 if (rtok_idx == -ENOENT)
1823 return;
1824 lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
1825 lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
1826 }
1827
1828 /* set rtoken for a new link whose link_id is given */
smc_rtoken_set2(struct smc_link_group * lgr,int rtok_idx,int link_id,__be64 nw_vaddr,__be32 nw_rkey)1829 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
1830 __be64 nw_vaddr, __be32 nw_rkey)
1831 {
1832 u64 dma_addr = be64_to_cpu(nw_vaddr);
1833 u32 rkey = ntohl(nw_rkey);
1834 bool found = false;
1835 int link_idx;
1836
1837 for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
1838 if (lgr->lnk[link_idx].link_id == link_id) {
1839 found = true;
1840 break;
1841 }
1842 }
1843 if (!found)
1844 return;
1845 lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
1846 lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
1847 }
1848
1849 /* add a new rtoken from peer */
smc_rtoken_add(struct smc_link * lnk,__be64 nw_vaddr,__be32 nw_rkey)1850 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
1851 {
1852 struct smc_link_group *lgr = smc_get_lgr(lnk);
1853 u64 dma_addr = be64_to_cpu(nw_vaddr);
1854 u32 rkey = ntohl(nw_rkey);
1855 int i;
1856
1857 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1858 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
1859 lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
1860 test_bit(i, lgr->rtokens_used_mask)) {
1861 /* already in list */
1862 return i;
1863 }
1864 }
1865 i = smc_rmb_reserve_rtoken_idx(lgr);
1866 if (i < 0)
1867 return i;
1868 lgr->rtokens[i][lnk->link_idx].rkey = rkey;
1869 lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
1870 return i;
1871 }
1872
1873 /* delete an rtoken from all links */
smc_rtoken_delete(struct smc_link * lnk,__be32 nw_rkey)1874 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
1875 {
1876 struct smc_link_group *lgr = smc_get_lgr(lnk);
1877 u32 rkey = ntohl(nw_rkey);
1878 int i, j;
1879
1880 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1881 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
1882 test_bit(i, lgr->rtokens_used_mask)) {
1883 for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
1884 lgr->rtokens[i][j].rkey = 0;
1885 lgr->rtokens[i][j].dma_addr = 0;
1886 }
1887 clear_bit(i, lgr->rtokens_used_mask);
1888 return 0;
1889 }
1890 }
1891 return -ENOENT;
1892 }
1893
1894 /* save rkey and dma_addr received from peer during clc handshake */
smc_rmb_rtoken_handling(struct smc_connection * conn,struct smc_link * lnk,struct smc_clc_msg_accept_confirm * clc)1895 int smc_rmb_rtoken_handling(struct smc_connection *conn,
1896 struct smc_link *lnk,
1897 struct smc_clc_msg_accept_confirm *clc)
1898 {
1899 conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
1900 clc->r0.rmb_rkey);
1901 if (conn->rtoken_idx < 0)
1902 return conn->rtoken_idx;
1903 return 0;
1904 }
1905
smc_core_going_away(void)1906 static void smc_core_going_away(void)
1907 {
1908 struct smc_ib_device *smcibdev;
1909 struct smcd_dev *smcd;
1910
1911 mutex_lock(&smc_ib_devices.mutex);
1912 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1913 int i;
1914
1915 for (i = 0; i < SMC_MAX_PORTS; i++)
1916 set_bit(i, smcibdev->ports_going_away);
1917 }
1918 mutex_unlock(&smc_ib_devices.mutex);
1919
1920 mutex_lock(&smcd_dev_list.mutex);
1921 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1922 smcd->going_away = 1;
1923 }
1924 mutex_unlock(&smcd_dev_list.mutex);
1925 }
1926
1927 /* Clean up all SMC link groups */
smc_lgrs_shutdown(void)1928 static void smc_lgrs_shutdown(void)
1929 {
1930 struct smcd_dev *smcd;
1931
1932 smc_core_going_away();
1933
1934 smc_smcr_terminate_all(NULL);
1935
1936 mutex_lock(&smcd_dev_list.mutex);
1937 list_for_each_entry(smcd, &smcd_dev_list.list, list)
1938 smc_smcd_terminate_all(smcd);
1939 mutex_unlock(&smcd_dev_list.mutex);
1940 }
1941
smc_core_reboot_event(struct notifier_block * this,unsigned long event,void * ptr)1942 static int smc_core_reboot_event(struct notifier_block *this,
1943 unsigned long event, void *ptr)
1944 {
1945 smc_lgrs_shutdown();
1946 smc_ib_unregister_client();
1947 return 0;
1948 }
1949
1950 static struct notifier_block smc_reboot_notifier = {
1951 .notifier_call = smc_core_reboot_event,
1952 };
1953
smc_core_init(void)1954 int __init smc_core_init(void)
1955 {
1956 return register_reboot_notifier(&smc_reboot_notifier);
1957 }
1958
1959 /* Called (from smc_exit) when module is removed */
smc_core_exit(void)1960 void smc_core_exit(void)
1961 {
1962 unregister_reboot_notifier(&smc_reboot_notifier);
1963 smc_lgrs_shutdown();
1964 }
1965