Merge tag 'rpmsg-v4.14-fixes' of git://github.com/andersson/remoteproc
[sfrench/cifs-2.6.git] / net / smc / smc_core.c
1 /*
2  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
3  *
4  *  Basic Transport Functions exploiting Infiniband API
5  *
6  *  Copyright IBM Corp. 2016
7  *
8  *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
9  */
10
11 #include <linux/socket.h>
12 #include <linux/if_vlan.h>
13 #include <linux/random.h>
14 #include <linux/workqueue.h>
15 #include <net/tcp.h>
16 #include <net/sock.h>
17 #include <rdma/ib_verbs.h>
18
19 #include "smc.h"
20 #include "smc_clc.h"
21 #include "smc_core.h"
22 #include "smc_ib.h"
23 #include "smc_wr.h"
24 #include "smc_llc.h"
25 #include "smc_cdc.h"
26 #include "smc_close.h"
27
28 #define SMC_LGR_NUM_INCR                256
29 #define SMC_LGR_FREE_DELAY_SERV         (600 * HZ)
30 #define SMC_LGR_FREE_DELAY_CLNT         (SMC_LGR_FREE_DELAY_SERV + 10)
31
32 static u32 smc_lgr_num;                 /* unique link group number */
33
34 /* Register connection's alert token in our lookup structure.
35  * To use rbtrees we have to implement our own insert core.
36  * Requires @conns_lock
37  * @smc         connection to register
38  * Returns 0 on success, != otherwise.
39  */
40 static void smc_lgr_add_alert_token(struct smc_connection *conn)
41 {
42         struct rb_node **link, *parent = NULL;
43         u32 token = conn->alert_token_local;
44
45         link = &conn->lgr->conns_all.rb_node;
46         while (*link) {
47                 struct smc_connection *cur = rb_entry(*link,
48                                         struct smc_connection, alert_node);
49
50                 parent = *link;
51                 if (cur->alert_token_local > token)
52                         link = &parent->rb_left;
53                 else
54                         link = &parent->rb_right;
55         }
56         /* Put the new node there */
57         rb_link_node(&conn->alert_node, parent, link);
58         rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
59 }
60
61 /* Register connection in link group by assigning an alert token
62  * registered in a search tree.
63  * Requires @conns_lock
64  * Note that '0' is a reserved value and not assigned.
65  */
66 static void smc_lgr_register_conn(struct smc_connection *conn)
67 {
68         struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
69         static atomic_t nexttoken = ATOMIC_INIT(0);
70
71         /* find a new alert_token_local value not yet used by some connection
72          * in this link group
73          */
74         sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
75         while (!conn->alert_token_local) {
76                 conn->alert_token_local = atomic_inc_return(&nexttoken);
77                 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
78                         conn->alert_token_local = 0;
79         }
80         smc_lgr_add_alert_token(conn);
81         conn->lgr->conns_num++;
82 }
83
84 /* Unregister connection and reset the alert token of the given connection<
85  */
86 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
87 {
88         struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
89         struct smc_link_group *lgr = conn->lgr;
90
91         rb_erase(&conn->alert_node, &lgr->conns_all);
92         lgr->conns_num--;
93         conn->alert_token_local = 0;
94         conn->lgr = NULL;
95         sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
96 }
97
98 /* Unregister connection and trigger lgr freeing if applicable
99  */
100 static void smc_lgr_unregister_conn(struct smc_connection *conn)
101 {
102         struct smc_link_group *lgr = conn->lgr;
103         int reduced = 0;
104
105         write_lock_bh(&lgr->conns_lock);
106         if (conn->alert_token_local) {
107                 reduced = 1;
108                 __smc_lgr_unregister_conn(conn);
109         }
110         write_unlock_bh(&lgr->conns_lock);
111         if (!reduced || lgr->conns_num)
112                 return;
113         /* client link group creation always follows the server link group
114          * creation. For client use a somewhat higher removal delay time,
115          * otherwise there is a risk of out-of-sync link groups.
116          */
117         mod_delayed_work(system_wq, &lgr->free_work,
118                          lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
119                                                  SMC_LGR_FREE_DELAY_SERV);
120 }
121
122 static void smc_lgr_free_work(struct work_struct *work)
123 {
124         struct smc_link_group *lgr = container_of(to_delayed_work(work),
125                                                   struct smc_link_group,
126                                                   free_work);
127         bool conns;
128
129         spin_lock_bh(&smc_lgr_list.lock);
130         read_lock_bh(&lgr->conns_lock);
131         conns = RB_EMPTY_ROOT(&lgr->conns_all);
132         read_unlock_bh(&lgr->conns_lock);
133         if (!conns) { /* number of lgr connections is no longer zero */
134                 spin_unlock_bh(&smc_lgr_list.lock);
135                 return;
136         }
137         list_del_init(&lgr->list); /* remove from smc_lgr_list */
138         spin_unlock_bh(&smc_lgr_list.lock);
139         smc_lgr_free(lgr);
140 }
141
142 /* create a new SMC link group */
143 static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
144                           struct smc_ib_device *smcibdev, u8 ibport,
145                           char *peer_systemid, unsigned short vlan_id)
146 {
147         struct smc_link_group *lgr;
148         struct smc_link *lnk;
149         u8 rndvec[3];
150         int rc = 0;
151         int i;
152
153         lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
154         if (!lgr) {
155                 rc = -ENOMEM;
156                 goto out;
157         }
158         lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
159         lgr->sync_err = false;
160         lgr->daddr = peer_in_addr;
161         memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
162         lgr->vlan_id = vlan_id;
163         rwlock_init(&lgr->sndbufs_lock);
164         rwlock_init(&lgr->rmbs_lock);
165         for (i = 0; i < SMC_RMBE_SIZES; i++) {
166                 INIT_LIST_HEAD(&lgr->sndbufs[i]);
167                 INIT_LIST_HEAD(&lgr->rmbs[i]);
168         }
169         smc_lgr_num += SMC_LGR_NUM_INCR;
170         memcpy(&lgr->id, (u8 *)&smc_lgr_num, SMC_LGR_ID_SIZE);
171         INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
172         lgr->conns_all = RB_ROOT;
173
174         lnk = &lgr->lnk[SMC_SINGLE_LINK];
175         /* initialize link */
176         lnk->smcibdev = smcibdev;
177         lnk->ibport = ibport;
178         lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
179         if (!smcibdev->initialized)
180                 smc_ib_setup_per_ibdev(smcibdev);
181         get_random_bytes(rndvec, sizeof(rndvec));
182         lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16);
183         rc = smc_wr_alloc_link_mem(lnk);
184         if (rc)
185                 goto free_lgr;
186         rc = smc_ib_create_protection_domain(lnk);
187         if (rc)
188                 goto free_link_mem;
189         rc = smc_ib_create_queue_pair(lnk);
190         if (rc)
191                 goto dealloc_pd;
192         rc = smc_wr_create_link(lnk);
193         if (rc)
194                 goto destroy_qp;
195         init_completion(&lnk->llc_confirm);
196         init_completion(&lnk->llc_confirm_resp);
197
198         smc->conn.lgr = lgr;
199         rwlock_init(&lgr->conns_lock);
200         spin_lock_bh(&smc_lgr_list.lock);
201         list_add(&lgr->list, &smc_lgr_list.list);
202         spin_unlock_bh(&smc_lgr_list.lock);
203         return 0;
204
205 destroy_qp:
206         smc_ib_destroy_queue_pair(lnk);
207 dealloc_pd:
208         smc_ib_dealloc_protection_domain(lnk);
209 free_link_mem:
210         smc_wr_free_link_mem(lnk);
211 free_lgr:
212         kfree(lgr);
213 out:
214         return rc;
215 }
216
217 static void smc_buf_unuse(struct smc_connection *conn)
218 {
219         if (conn->sndbuf_desc) {
220                 conn->sndbuf_desc->used = 0;
221                 conn->sndbuf_size = 0;
222         }
223         if (conn->rmb_desc) {
224                 conn->rmb_desc->reused = true;
225                 conn->rmb_desc->used = 0;
226                 conn->rmbe_size = 0;
227         }
228 }
229
230 /* remove a finished connection from its link group */
231 void smc_conn_free(struct smc_connection *conn)
232 {
233         struct smc_link_group *lgr = conn->lgr;
234
235         if (!lgr)
236                 return;
237         smc_cdc_tx_dismiss_slots(conn);
238         smc_lgr_unregister_conn(conn);
239         smc_buf_unuse(conn);
240 }
241
242 static void smc_link_clear(struct smc_link *lnk)
243 {
244         lnk->peer_qpn = 0;
245         smc_ib_modify_qp_reset(lnk);
246         smc_wr_free_link(lnk);
247         smc_ib_destroy_queue_pair(lnk);
248         smc_ib_dealloc_protection_domain(lnk);
249         smc_wr_free_link_mem(lnk);
250 }
251
252 static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk,
253                          bool is_rmb)
254 {
255         if (is_rmb) {
256                 if (buf_desc->mr_rx[SMC_SINGLE_LINK])
257                         smc_ib_put_memory_region(
258                                         buf_desc->mr_rx[SMC_SINGLE_LINK]);
259                 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
260                                     DMA_FROM_DEVICE);
261         } else {
262                 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
263                                     DMA_TO_DEVICE);
264         }
265         sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
266         if (buf_desc->cpu_addr)
267                 free_pages((unsigned long)buf_desc->cpu_addr, buf_desc->order);
268         kfree(buf_desc);
269 }
270
271 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
272 {
273         struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
274         struct smc_buf_desc *buf_desc, *bf_desc;
275         struct list_head *buf_list;
276         int i;
277
278         for (i = 0; i < SMC_RMBE_SIZES; i++) {
279                 if (is_rmb)
280                         buf_list = &lgr->rmbs[i];
281                 else
282                         buf_list = &lgr->sndbufs[i];
283                 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
284                                          list) {
285                         list_del(&buf_desc->list);
286                         smc_buf_free(buf_desc, lnk, is_rmb);
287                 }
288         }
289 }
290
291 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
292 {
293         /* free send buffers */
294         __smc_lgr_free_bufs(lgr, false);
295         /* free rmbs */
296         __smc_lgr_free_bufs(lgr, true);
297 }
298
299 /* remove a link group */
300 void smc_lgr_free(struct smc_link_group *lgr)
301 {
302         smc_lgr_free_bufs(lgr);
303         smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
304         kfree(lgr);
305 }
306
307 /* terminate linkgroup abnormally */
308 void smc_lgr_terminate(struct smc_link_group *lgr)
309 {
310         struct smc_connection *conn;
311         struct smc_sock *smc;
312         struct rb_node *node;
313
314         spin_lock_bh(&smc_lgr_list.lock);
315         if (list_empty(&lgr->list)) {
316                 /* termination already triggered */
317                 spin_unlock_bh(&smc_lgr_list.lock);
318                 return;
319         }
320         /* do not use this link group for new connections */
321         list_del_init(&lgr->list);
322         spin_unlock_bh(&smc_lgr_list.lock);
323
324         write_lock_bh(&lgr->conns_lock);
325         node = rb_first(&lgr->conns_all);
326         while (node) {
327                 conn = rb_entry(node, struct smc_connection, alert_node);
328                 smc = container_of(conn, struct smc_sock, conn);
329                 sock_hold(&smc->sk);
330                 __smc_lgr_unregister_conn(conn);
331                 schedule_work(&conn->close_work);
332                 sock_put(&smc->sk);
333                 node = rb_first(&lgr->conns_all);
334         }
335         write_unlock_bh(&lgr->conns_lock);
336 }
337
338 /* Determine vlan of internal TCP socket.
339  * @vlan_id: address to store the determined vlan id into
340  */
341 static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
342 {
343         struct dst_entry *dst = sk_dst_get(clcsock->sk);
344         int rc = 0;
345
346         *vlan_id = 0;
347         if (!dst) {
348                 rc = -ENOTCONN;
349                 goto out;
350         }
351         if (!dst->dev) {
352                 rc = -ENODEV;
353                 goto out_rel;
354         }
355
356         if (is_vlan_dev(dst->dev))
357                 *vlan_id = vlan_dev_vlan_id(dst->dev);
358
359 out_rel:
360         dst_release(dst);
361 out:
362         return rc;
363 }
364
365 /* determine the link gid matching the vlan id of the link group */
366 static int smc_link_determine_gid(struct smc_link_group *lgr)
367 {
368         struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
369         struct ib_gid_attr gattr;
370         union ib_gid gid;
371         int i;
372
373         if (!lgr->vlan_id) {
374                 lnk->gid = lnk->smcibdev->gid[lnk->ibport - 1];
375                 return 0;
376         }
377
378         for (i = 0; i < lnk->smcibdev->pattr[lnk->ibport - 1].gid_tbl_len;
379              i++) {
380                 if (ib_query_gid(lnk->smcibdev->ibdev, lnk->ibport, i, &gid,
381                                  &gattr))
382                         continue;
383                 if (gattr.ndev &&
384                     (vlan_dev_vlan_id(gattr.ndev) == lgr->vlan_id)) {
385                         lnk->gid = gid;
386                         return 0;
387                 }
388         }
389         return -ENODEV;
390 }
391
392 /* create a new SMC connection (and a new link group if necessary) */
393 int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr,
394                     struct smc_ib_device *smcibdev, u8 ibport,
395                     struct smc_clc_msg_local *lcl, int srv_first_contact)
396 {
397         struct smc_connection *conn = &smc->conn;
398         struct smc_link_group *lgr;
399         unsigned short vlan_id;
400         enum smc_lgr_role role;
401         int local_contact = SMC_FIRST_CONTACT;
402         int rc = 0;
403
404         role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
405         rc = smc_vlan_by_tcpsk(smc->clcsock, &vlan_id);
406         if (rc)
407                 return rc;
408
409         if ((role == SMC_CLNT) && srv_first_contact)
410                 /* create new link group as well */
411                 goto create;
412
413         /* determine if an existing link group can be reused */
414         spin_lock_bh(&smc_lgr_list.lock);
415         list_for_each_entry(lgr, &smc_lgr_list.list, list) {
416                 write_lock_bh(&lgr->conns_lock);
417                 if (!memcmp(lgr->peer_systemid, lcl->id_for_peer,
418                             SMC_SYSTEMID_LEN) &&
419                     !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
420                             SMC_GID_SIZE) &&
421                     !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
422                             sizeof(lcl->mac)) &&
423                     !lgr->sync_err &&
424                     (lgr->role == role) &&
425                     (lgr->vlan_id == vlan_id) &&
426                     ((role == SMC_CLNT) ||
427                      (lgr->conns_num < SMC_RMBS_PER_LGR_MAX))) {
428                         /* link group found */
429                         local_contact = SMC_REUSE_CONTACT;
430                         conn->lgr = lgr;
431                         smc_lgr_register_conn(conn); /* add smc conn to lgr */
432                         write_unlock_bh(&lgr->conns_lock);
433                         break;
434                 }
435                 write_unlock_bh(&lgr->conns_lock);
436         }
437         spin_unlock_bh(&smc_lgr_list.lock);
438
439         if (role == SMC_CLNT && !srv_first_contact &&
440             (local_contact == SMC_FIRST_CONTACT)) {
441                 /* Server reuses a link group, but Client wants to start
442                  * a new one
443                  * send out_of_sync decline, reason synchr. error
444                  */
445                 return -ENOLINK;
446         }
447
448 create:
449         if (local_contact == SMC_FIRST_CONTACT) {
450                 rc = smc_lgr_create(smc, peer_in_addr, smcibdev, ibport,
451                                     lcl->id_for_peer, vlan_id);
452                 if (rc)
453                         goto out;
454                 smc_lgr_register_conn(conn); /* add smc conn to lgr */
455                 rc = smc_link_determine_gid(conn->lgr);
456         }
457         conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
458         conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg);
459 #ifndef KERNEL_HAS_ATOMIC64
460         spin_lock_init(&conn->acurs_lock);
461 #endif
462
463 out:
464         return rc ? rc : local_contact;
465 }
466
467 /* try to reuse a sndbuf or rmb description slot for a certain
468  * buffer size; if not available, return NULL
469  */
470 static inline
471 struct smc_buf_desc *smc_buf_get_slot(struct smc_link_group *lgr,
472                                       int compressed_bufsize,
473                                       rwlock_t *lock,
474                                       struct list_head *buf_list)
475 {
476         struct smc_buf_desc *buf_slot;
477
478         read_lock_bh(lock);
479         list_for_each_entry(buf_slot, buf_list, list) {
480                 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
481                         read_unlock_bh(lock);
482                         return buf_slot;
483                 }
484         }
485         read_unlock_bh(lock);
486         return NULL;
487 }
488
489 /* one of the conditions for announcing a receiver's current window size is
490  * that it "results in a minimum increase in the window size of 10% of the
491  * receive buffer space" [RFC7609]
492  */
493 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
494 {
495         return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
496 }
497
498 static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr,
499                                                bool is_rmb, int bufsize)
500 {
501         struct smc_buf_desc *buf_desc;
502         struct smc_link *lnk;
503         int rc;
504
505         /* try to alloc a new buffer */
506         buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
507         if (!buf_desc)
508                 return ERR_PTR(-ENOMEM);
509
510         buf_desc->cpu_addr =
511                 (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
512                                          __GFP_NOMEMALLOC |
513                                          __GFP_NORETRY | __GFP_ZERO,
514                                          get_order(bufsize));
515         if (!buf_desc->cpu_addr) {
516                 kfree(buf_desc);
517                 return ERR_PTR(-EAGAIN);
518         }
519         buf_desc->order = get_order(bufsize);
520
521         /* build the sg table from the pages */
522         lnk = &lgr->lnk[SMC_SINGLE_LINK];
523         rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
524                             GFP_KERNEL);
525         if (rc) {
526                 smc_buf_free(buf_desc, lnk, is_rmb);
527                 return ERR_PTR(rc);
528         }
529         sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
530                    buf_desc->cpu_addr, bufsize);
531
532         /* map sg table to DMA address */
533         rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
534                                is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
535         /* SMC protocol depends on mapping to one DMA address only */
536         if (rc != 1)  {
537                 smc_buf_free(buf_desc, lnk, is_rmb);
538                 return ERR_PTR(-EAGAIN);
539         }
540
541         /* create a new memory region for the RMB */
542         if (is_rmb) {
543                 rc = smc_ib_get_memory_region(lnk->roce_pd,
544                                               IB_ACCESS_REMOTE_WRITE |
545                                               IB_ACCESS_LOCAL_WRITE,
546                                               buf_desc);
547                 if (rc) {
548                         smc_buf_free(buf_desc, lnk, is_rmb);
549                         return ERR_PTR(rc);
550                 }
551         }
552
553         return buf_desc;
554 }
555
556 static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
557 {
558         struct smc_connection *conn = &smc->conn;
559         struct smc_link_group *lgr = conn->lgr;
560         struct smc_buf_desc *buf_desc = NULL;
561         struct list_head *buf_list;
562         int bufsize, bufsize_short;
563         int sk_buf_size;
564         rwlock_t *lock;
565
566         if (is_rmb)
567                 /* use socket recv buffer size (w/o overhead) as start value */
568                 sk_buf_size = smc->sk.sk_rcvbuf / 2;
569         else
570                 /* use socket send buffer size (w/o overhead) as start value */
571                 sk_buf_size = smc->sk.sk_sndbuf / 2;
572
573         for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2);
574              bufsize_short >= 0; bufsize_short--) {
575
576                 if (is_rmb) {
577                         lock = &lgr->rmbs_lock;
578                         buf_list = &lgr->rmbs[bufsize_short];
579                 } else {
580                         lock = &lgr->sndbufs_lock;
581                         buf_list = &lgr->sndbufs[bufsize_short];
582                 }
583                 bufsize = smc_uncompress_bufsize(bufsize_short);
584                 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
585                         continue;
586
587                 /* check for reusable slot in the link group */
588                 buf_desc = smc_buf_get_slot(lgr, bufsize_short, lock, buf_list);
589                 if (buf_desc) {
590                         memset(buf_desc->cpu_addr, 0, bufsize);
591                         break; /* found reusable slot */
592                 }
593
594                 buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize);
595                 if (PTR_ERR(buf_desc) == -ENOMEM)
596                         break;
597                 if (IS_ERR(buf_desc))
598                         continue;
599
600                 buf_desc->used = 1;
601                 write_lock_bh(lock);
602                 list_add(&buf_desc->list, buf_list);
603                 write_unlock_bh(lock);
604                 break; /* found */
605         }
606
607         if (IS_ERR(buf_desc))
608                 return -ENOMEM;
609
610         if (is_rmb) {
611                 conn->rmb_desc = buf_desc;
612                 conn->rmbe_size = bufsize;
613                 conn->rmbe_size_short = bufsize_short;
614                 smc->sk.sk_rcvbuf = bufsize * 2;
615                 atomic_set(&conn->bytes_to_rcv, 0);
616                 conn->rmbe_update_limit = smc_rmb_wnd_update_limit(bufsize);
617         } else {
618                 conn->sndbuf_desc = buf_desc;
619                 conn->sndbuf_size = bufsize;
620                 smc->sk.sk_sndbuf = bufsize * 2;
621                 atomic_set(&conn->sndbuf_space, bufsize);
622         }
623         return 0;
624 }
625
626 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
627 {
628         struct smc_link_group *lgr = conn->lgr;
629
630         smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
631                                conn->sndbuf_desc, DMA_TO_DEVICE);
632 }
633
634 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
635 {
636         struct smc_link_group *lgr = conn->lgr;
637
638         smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
639                                   conn->sndbuf_desc, DMA_TO_DEVICE);
640 }
641
642 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
643 {
644         struct smc_link_group *lgr = conn->lgr;
645
646         smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
647                                conn->rmb_desc, DMA_FROM_DEVICE);
648 }
649
650 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
651 {
652         struct smc_link_group *lgr = conn->lgr;
653
654         smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
655                                   conn->rmb_desc, DMA_FROM_DEVICE);
656 }
657
658 /* create the send and receive buffer for an SMC socket;
659  * receive buffers are called RMBs;
660  * (even though the SMC protocol allows more than one RMB-element per RMB,
661  * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
662  * extra RMB for every connection in a link group
663  */
664 int smc_buf_create(struct smc_sock *smc)
665 {
666         int rc;
667
668         /* create send buffer */
669         rc = __smc_buf_create(smc, false);
670         if (rc)
671                 return rc;
672         /* create rmb */
673         rc = __smc_buf_create(smc, true);
674         if (rc)
675                 smc_buf_free(smc->conn.sndbuf_desc,
676                              &smc->conn.lgr->lnk[SMC_SINGLE_LINK], false);
677         return rc;
678 }
679
680 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
681 {
682         int i;
683
684         for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
685                 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
686                         return i;
687         }
688         return -ENOSPC;
689 }
690
691 /* save rkey and dma_addr received from peer during clc handshake */
692 int smc_rmb_rtoken_handling(struct smc_connection *conn,
693                             struct smc_clc_msg_accept_confirm *clc)
694 {
695         u64 dma_addr = be64_to_cpu(clc->rmb_dma_addr);
696         struct smc_link_group *lgr = conn->lgr;
697         u32 rkey = ntohl(clc->rmb_rkey);
698         int i;
699
700         for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
701                 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
702                     (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
703                     test_bit(i, lgr->rtokens_used_mask)) {
704                         conn->rtoken_idx = i;
705                         return 0;
706                 }
707         }
708         conn->rtoken_idx = smc_rmb_reserve_rtoken_idx(lgr);
709         if (conn->rtoken_idx < 0)
710                 return conn->rtoken_idx;
711         lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey = rkey;
712         lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr = dma_addr;
713         return 0;
714 }