Merge branch 'for-linus' into test
[sfrench/cifs-2.6.git] / net / rds / send.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/moduleparam.h>
35 #include <linux/gfp.h>
36 #include <net/sock.h>
37 #include <linux/in.h>
38 #include <linux/list.h>
39 #include <linux/ratelimit.h>
40 #include <linux/export.h>
41 #include <linux/sizes.h>
42
43 #include "rds.h"
44
45 /* When transmitting messages in rds_send_xmit, we need to emerge from
46  * time to time and briefly release the CPU. Otherwise the softlock watchdog
47  * will kick our shin.
48  * Also, it seems fairer to not let one busy connection stall all the
49  * others.
50  *
51  * send_batch_count is the number of times we'll loop in send_xmit. Setting
52  * it to 0 will restore the old behavior (where we looped until we had
53  * drained the queue).
54  */
55 static int send_batch_count = SZ_1K;
56 module_param(send_batch_count, int, 0444);
57 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
58
59 static void rds_send_remove_from_sock(struct list_head *messages, int status);
60
61 /*
62  * Reset the send state.  Callers must ensure that this doesn't race with
63  * rds_send_xmit().
64  */
65 void rds_send_path_reset(struct rds_conn_path *cp)
66 {
67         struct rds_message *rm, *tmp;
68         unsigned long flags;
69
70         if (cp->cp_xmit_rm) {
71                 rm = cp->cp_xmit_rm;
72                 cp->cp_xmit_rm = NULL;
73                 /* Tell the user the RDMA op is no longer mapped by the
74                  * transport. This isn't entirely true (it's flushed out
75                  * independently) but as the connection is down, there's
76                  * no ongoing RDMA to/from that memory */
77                 rds_message_unmapped(rm);
78                 rds_message_put(rm);
79         }
80
81         cp->cp_xmit_sg = 0;
82         cp->cp_xmit_hdr_off = 0;
83         cp->cp_xmit_data_off = 0;
84         cp->cp_xmit_atomic_sent = 0;
85         cp->cp_xmit_rdma_sent = 0;
86         cp->cp_xmit_data_sent = 0;
87
88         cp->cp_conn->c_map_queued = 0;
89
90         cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
91         cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
92
93         /* Mark messages as retransmissions, and move them to the send q */
94         spin_lock_irqsave(&cp->cp_lock, flags);
95         list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
96                 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97                 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
98         }
99         list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
100         spin_unlock_irqrestore(&cp->cp_lock, flags);
101 }
102 EXPORT_SYMBOL_GPL(rds_send_path_reset);
103
104 static int acquire_in_xmit(struct rds_conn_path *cp)
105 {
106         return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
107 }
108
109 static void release_in_xmit(struct rds_conn_path *cp)
110 {
111         clear_bit(RDS_IN_XMIT, &cp->cp_flags);
112         smp_mb__after_atomic();
113         /*
114          * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
115          * hot path and finding waiters is very rare.  We don't want to walk
116          * the system-wide hashed waitqueue buckets in the fast path only to
117          * almost never find waiters.
118          */
119         if (waitqueue_active(&cp->cp_waitq))
120                 wake_up_all(&cp->cp_waitq);
121 }
122
123 /*
124  * We're making the conscious trade-off here to only send one message
125  * down the connection at a time.
126  *   Pro:
127  *      - tx queueing is a simple fifo list
128  *      - reassembly is optional and easily done by transports per conn
129  *      - no per flow rx lookup at all, straight to the socket
130  *      - less per-frag memory and wire overhead
131  *   Con:
132  *      - queued acks can be delayed behind large messages
133  *   Depends:
134  *      - small message latency is higher behind queued large messages
135  *      - large message latency isn't starved by intervening small sends
136  */
137 int rds_send_xmit(struct rds_conn_path *cp)
138 {
139         struct rds_connection *conn = cp->cp_conn;
140         struct rds_message *rm;
141         unsigned long flags;
142         unsigned int tmp;
143         struct scatterlist *sg;
144         int ret = 0;
145         LIST_HEAD(to_be_dropped);
146         int batch_count;
147         unsigned long send_gen = 0;
148
149 restart:
150         batch_count = 0;
151
152         /*
153          * sendmsg calls here after having queued its message on the send
154          * queue.  We only have one task feeding the connection at a time.  If
155          * another thread is already feeding the queue then we back off.  This
156          * avoids blocking the caller and trading per-connection data between
157          * caches per message.
158          */
159         if (!acquire_in_xmit(cp)) {
160                 rds_stats_inc(s_send_lock_contention);
161                 ret = -ENOMEM;
162                 goto out;
163         }
164
165         if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
166                 release_in_xmit(cp);
167                 ret = -ENETUNREACH; /* dont requeue send work */
168                 goto out;
169         }
170
171         /*
172          * we record the send generation after doing the xmit acquire.
173          * if someone else manages to jump in and do some work, we'll use
174          * this to avoid a goto restart farther down.
175          *
176          * The acquire_in_xmit() check above ensures that only one
177          * caller can increment c_send_gen at any time.
178          */
179         send_gen = READ_ONCE(cp->cp_send_gen) + 1;
180         WRITE_ONCE(cp->cp_send_gen, send_gen);
181
182         /*
183          * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
184          * we do the opposite to avoid races.
185          */
186         if (!rds_conn_path_up(cp)) {
187                 release_in_xmit(cp);
188                 ret = 0;
189                 goto out;
190         }
191
192         if (conn->c_trans->xmit_path_prepare)
193                 conn->c_trans->xmit_path_prepare(cp);
194
195         /*
196          * spin trying to push headers and data down the connection until
197          * the connection doesn't make forward progress.
198          */
199         while (1) {
200
201                 rm = cp->cp_xmit_rm;
202
203                 /*
204                  * If between sending messages, we can send a pending congestion
205                  * map update.
206                  */
207                 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
208                         rm = rds_cong_update_alloc(conn);
209                         if (IS_ERR(rm)) {
210                                 ret = PTR_ERR(rm);
211                                 break;
212                         }
213                         rm->data.op_active = 1;
214                         rm->m_inc.i_conn_path = cp;
215                         rm->m_inc.i_conn = cp->cp_conn;
216
217                         cp->cp_xmit_rm = rm;
218                 }
219
220                 /*
221                  * If not already working on one, grab the next message.
222                  *
223                  * cp_xmit_rm holds a ref while we're sending this message down
224                  * the connction.  We can use this ref while holding the
225                  * send_sem.. rds_send_reset() is serialized with it.
226                  */
227                 if (!rm) {
228                         unsigned int len;
229
230                         batch_count++;
231
232                         /* we want to process as big a batch as we can, but
233                          * we also want to avoid softlockups.  If we've been
234                          * through a lot of messages, lets back off and see
235                          * if anyone else jumps in
236                          */
237                         if (batch_count >= send_batch_count)
238                                 goto over_batch;
239
240                         spin_lock_irqsave(&cp->cp_lock, flags);
241
242                         if (!list_empty(&cp->cp_send_queue)) {
243                                 rm = list_entry(cp->cp_send_queue.next,
244                                                 struct rds_message,
245                                                 m_conn_item);
246                                 rds_message_addref(rm);
247
248                                 /*
249                                  * Move the message from the send queue to the retransmit
250                                  * list right away.
251                                  */
252                                 list_move_tail(&rm->m_conn_item,
253                                                &cp->cp_retrans);
254                         }
255
256                         spin_unlock_irqrestore(&cp->cp_lock, flags);
257
258                         if (!rm)
259                                 break;
260
261                         /* Unfortunately, the way Infiniband deals with
262                          * RDMA to a bad MR key is by moving the entire
263                          * queue pair to error state. We cold possibly
264                          * recover from that, but right now we drop the
265                          * connection.
266                          * Therefore, we never retransmit messages with RDMA ops.
267                          */
268                         if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
269                             (rm->rdma.op_active &&
270                             test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
271                                 spin_lock_irqsave(&cp->cp_lock, flags);
272                                 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
273                                         list_move(&rm->m_conn_item, &to_be_dropped);
274                                 spin_unlock_irqrestore(&cp->cp_lock, flags);
275                                 continue;
276                         }
277
278                         /* Require an ACK every once in a while */
279                         len = ntohl(rm->m_inc.i_hdr.h_len);
280                         if (cp->cp_unacked_packets == 0 ||
281                             cp->cp_unacked_bytes < len) {
282                                 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
283
284                                 cp->cp_unacked_packets =
285                                         rds_sysctl_max_unacked_packets;
286                                 cp->cp_unacked_bytes =
287                                         rds_sysctl_max_unacked_bytes;
288                                 rds_stats_inc(s_send_ack_required);
289                         } else {
290                                 cp->cp_unacked_bytes -= len;
291                                 cp->cp_unacked_packets--;
292                         }
293
294                         cp->cp_xmit_rm = rm;
295                 }
296
297                 /* The transport either sends the whole rdma or none of it */
298                 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
299                         rm->m_final_op = &rm->rdma;
300                         /* The transport owns the mapped memory for now.
301                          * You can't unmap it while it's on the send queue
302                          */
303                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
304                         ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
305                         if (ret) {
306                                 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
307                                 wake_up_interruptible(&rm->m_flush_wait);
308                                 break;
309                         }
310                         cp->cp_xmit_rdma_sent = 1;
311
312                 }
313
314                 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
315                         rm->m_final_op = &rm->atomic;
316                         /* The transport owns the mapped memory for now.
317                          * You can't unmap it while it's on the send queue
318                          */
319                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
320                         ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
321                         if (ret) {
322                                 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
323                                 wake_up_interruptible(&rm->m_flush_wait);
324                                 break;
325                         }
326                         cp->cp_xmit_atomic_sent = 1;
327
328                 }
329
330                 /*
331                  * A number of cases require an RDS header to be sent
332                  * even if there is no data.
333                  * We permit 0-byte sends; rds-ping depends on this.
334                  * However, if there are exclusively attached silent ops,
335                  * we skip the hdr/data send, to enable silent operation.
336                  */
337                 if (rm->data.op_nents == 0) {
338                         int ops_present;
339                         int all_ops_are_silent = 1;
340
341                         ops_present = (rm->atomic.op_active || rm->rdma.op_active);
342                         if (rm->atomic.op_active && !rm->atomic.op_silent)
343                                 all_ops_are_silent = 0;
344                         if (rm->rdma.op_active && !rm->rdma.op_silent)
345                                 all_ops_are_silent = 0;
346
347                         if (ops_present && all_ops_are_silent
348                             && !rm->m_rdma_cookie)
349                                 rm->data.op_active = 0;
350                 }
351
352                 if (rm->data.op_active && !cp->cp_xmit_data_sent) {
353                         rm->m_final_op = &rm->data;
354
355                         ret = conn->c_trans->xmit(conn, rm,
356                                                   cp->cp_xmit_hdr_off,
357                                                   cp->cp_xmit_sg,
358                                                   cp->cp_xmit_data_off);
359                         if (ret <= 0)
360                                 break;
361
362                         if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
363                                 tmp = min_t(int, ret,
364                                             sizeof(struct rds_header) -
365                                             cp->cp_xmit_hdr_off);
366                                 cp->cp_xmit_hdr_off += tmp;
367                                 ret -= tmp;
368                         }
369
370                         sg = &rm->data.op_sg[cp->cp_xmit_sg];
371                         while (ret) {
372                                 tmp = min_t(int, ret, sg->length -
373                                                       cp->cp_xmit_data_off);
374                                 cp->cp_xmit_data_off += tmp;
375                                 ret -= tmp;
376                                 if (cp->cp_xmit_data_off == sg->length) {
377                                         cp->cp_xmit_data_off = 0;
378                                         sg++;
379                                         cp->cp_xmit_sg++;
380                                         BUG_ON(ret != 0 && cp->cp_xmit_sg ==
381                                                rm->data.op_nents);
382                                 }
383                         }
384
385                         if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
386                             (cp->cp_xmit_sg == rm->data.op_nents))
387                                 cp->cp_xmit_data_sent = 1;
388                 }
389
390                 /*
391                  * A rm will only take multiple times through this loop
392                  * if there is a data op. Thus, if the data is sent (or there was
393                  * none), then we're done with the rm.
394                  */
395                 if (!rm->data.op_active || cp->cp_xmit_data_sent) {
396                         cp->cp_xmit_rm = NULL;
397                         cp->cp_xmit_sg = 0;
398                         cp->cp_xmit_hdr_off = 0;
399                         cp->cp_xmit_data_off = 0;
400                         cp->cp_xmit_rdma_sent = 0;
401                         cp->cp_xmit_atomic_sent = 0;
402                         cp->cp_xmit_data_sent = 0;
403
404                         rds_message_put(rm);
405                 }
406         }
407
408 over_batch:
409         if (conn->c_trans->xmit_path_complete)
410                 conn->c_trans->xmit_path_complete(cp);
411         release_in_xmit(cp);
412
413         /* Nuke any messages we decided not to retransmit. */
414         if (!list_empty(&to_be_dropped)) {
415                 /* irqs on here, so we can put(), unlike above */
416                 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
417                         rds_message_put(rm);
418                 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
419         }
420
421         /*
422          * Other senders can queue a message after we last test the send queue
423          * but before we clear RDS_IN_XMIT.  In that case they'd back off and
424          * not try and send their newly queued message.  We need to check the
425          * send queue after having cleared RDS_IN_XMIT so that their message
426          * doesn't get stuck on the send queue.
427          *
428          * If the transport cannot continue (i.e ret != 0), then it must
429          * call us when more room is available, such as from the tx
430          * completion handler.
431          *
432          * We have an extra generation check here so that if someone manages
433          * to jump in after our release_in_xmit, we'll see that they have done
434          * some work and we will skip our goto
435          */
436         if (ret == 0) {
437                 bool raced;
438
439                 smp_mb();
440                 raced = send_gen != READ_ONCE(cp->cp_send_gen);
441
442                 if ((test_bit(0, &conn->c_map_queued) ||
443                     !list_empty(&cp->cp_send_queue)) && !raced) {
444                         if (batch_count < send_batch_count)
445                                 goto restart;
446                         rcu_read_lock();
447                         if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
448                                 ret = -ENETUNREACH;
449                         else
450                                 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
451                         rcu_read_unlock();
452                 } else if (raced) {
453                         rds_stats_inc(s_send_lock_queue_raced);
454                 }
455         }
456 out:
457         return ret;
458 }
459 EXPORT_SYMBOL_GPL(rds_send_xmit);
460
461 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
462 {
463         u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
464
465         assert_spin_locked(&rs->rs_lock);
466
467         BUG_ON(rs->rs_snd_bytes < len);
468         rs->rs_snd_bytes -= len;
469
470         if (rs->rs_snd_bytes == 0)
471                 rds_stats_inc(s_send_queue_empty);
472 }
473
474 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
475                                     is_acked_func is_acked)
476 {
477         if (is_acked)
478                 return is_acked(rm, ack);
479         return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
480 }
481
482 /*
483  * This is pretty similar to what happens below in the ACK
484  * handling code - except that we call here as soon as we get
485  * the IB send completion on the RDMA op and the accompanying
486  * message.
487  */
488 void rds_rdma_send_complete(struct rds_message *rm, int status)
489 {
490         struct rds_sock *rs = NULL;
491         struct rm_rdma_op *ro;
492         struct rds_notifier *notifier;
493         unsigned long flags;
494         unsigned int notify = 0;
495
496         spin_lock_irqsave(&rm->m_rs_lock, flags);
497
498         notify =  rm->rdma.op_notify | rm->data.op_notify;
499         ro = &rm->rdma;
500         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
501             ro->op_active && notify && ro->op_notifier) {
502                 notifier = ro->op_notifier;
503                 rs = rm->m_rs;
504                 sock_hold(rds_rs_to_sk(rs));
505
506                 notifier->n_status = status;
507                 spin_lock(&rs->rs_lock);
508                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
509                 spin_unlock(&rs->rs_lock);
510
511                 ro->op_notifier = NULL;
512         }
513
514         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
515
516         if (rs) {
517                 rds_wake_sk_sleep(rs);
518                 sock_put(rds_rs_to_sk(rs));
519         }
520 }
521 EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
522
523 /*
524  * Just like above, except looks at atomic op
525  */
526 void rds_atomic_send_complete(struct rds_message *rm, int status)
527 {
528         struct rds_sock *rs = NULL;
529         struct rm_atomic_op *ao;
530         struct rds_notifier *notifier;
531         unsigned long flags;
532
533         spin_lock_irqsave(&rm->m_rs_lock, flags);
534
535         ao = &rm->atomic;
536         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
537             && ao->op_active && ao->op_notify && ao->op_notifier) {
538                 notifier = ao->op_notifier;
539                 rs = rm->m_rs;
540                 sock_hold(rds_rs_to_sk(rs));
541
542                 notifier->n_status = status;
543                 spin_lock(&rs->rs_lock);
544                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
545                 spin_unlock(&rs->rs_lock);
546
547                 ao->op_notifier = NULL;
548         }
549
550         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
551
552         if (rs) {
553                 rds_wake_sk_sleep(rs);
554                 sock_put(rds_rs_to_sk(rs));
555         }
556 }
557 EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
558
559 /*
560  * This is the same as rds_rdma_send_complete except we
561  * don't do any locking - we have all the ingredients (message,
562  * socket, socket lock) and can just move the notifier.
563  */
564 static inline void
565 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
566 {
567         struct rm_rdma_op *ro;
568         struct rm_atomic_op *ao;
569
570         ro = &rm->rdma;
571         if (ro->op_active && ro->op_notify && ro->op_notifier) {
572                 ro->op_notifier->n_status = status;
573                 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
574                 ro->op_notifier = NULL;
575         }
576
577         ao = &rm->atomic;
578         if (ao->op_active && ao->op_notify && ao->op_notifier) {
579                 ao->op_notifier->n_status = status;
580                 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
581                 ao->op_notifier = NULL;
582         }
583
584         /* No need to wake the app - caller does this */
585 }
586
587 /*
588  * This removes messages from the socket's list if they're on it.  The list
589  * argument must be private to the caller, we must be able to modify it
590  * without locks.  The messages must have a reference held for their
591  * position on the list.  This function will drop that reference after
592  * removing the messages from the 'messages' list regardless of if it found
593  * the messages on the socket list or not.
594  */
595 static void rds_send_remove_from_sock(struct list_head *messages, int status)
596 {
597         unsigned long flags;
598         struct rds_sock *rs = NULL;
599         struct rds_message *rm;
600
601         while (!list_empty(messages)) {
602                 int was_on_sock = 0;
603
604                 rm = list_entry(messages->next, struct rds_message,
605                                 m_conn_item);
606                 list_del_init(&rm->m_conn_item);
607
608                 /*
609                  * If we see this flag cleared then we're *sure* that someone
610                  * else beat us to removing it from the sock.  If we race
611                  * with their flag update we'll get the lock and then really
612                  * see that the flag has been cleared.
613                  *
614                  * The message spinlock makes sure nobody clears rm->m_rs
615                  * while we're messing with it. It does not prevent the
616                  * message from being removed from the socket, though.
617                  */
618                 spin_lock_irqsave(&rm->m_rs_lock, flags);
619                 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
620                         goto unlock_and_drop;
621
622                 if (rs != rm->m_rs) {
623                         if (rs) {
624                                 rds_wake_sk_sleep(rs);
625                                 sock_put(rds_rs_to_sk(rs));
626                         }
627                         rs = rm->m_rs;
628                         if (rs)
629                                 sock_hold(rds_rs_to_sk(rs));
630                 }
631                 if (!rs)
632                         goto unlock_and_drop;
633                 spin_lock(&rs->rs_lock);
634
635                 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
636                         struct rm_rdma_op *ro = &rm->rdma;
637                         struct rds_notifier *notifier;
638
639                         list_del_init(&rm->m_sock_item);
640                         rds_send_sndbuf_remove(rs, rm);
641
642                         if (ro->op_active && ro->op_notifier &&
643                                (ro->op_notify || (ro->op_recverr && status))) {
644                                 notifier = ro->op_notifier;
645                                 list_add_tail(&notifier->n_list,
646                                                 &rs->rs_notify_queue);
647                                 if (!notifier->n_status)
648                                         notifier->n_status = status;
649                                 rm->rdma.op_notifier = NULL;
650                         }
651                         was_on_sock = 1;
652                         rm->m_rs = NULL;
653                 }
654                 spin_unlock(&rs->rs_lock);
655
656 unlock_and_drop:
657                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
658                 rds_message_put(rm);
659                 if (was_on_sock)
660                         rds_message_put(rm);
661         }
662
663         if (rs) {
664                 rds_wake_sk_sleep(rs);
665                 sock_put(rds_rs_to_sk(rs));
666         }
667 }
668
669 /*
670  * Transports call here when they've determined that the receiver queued
671  * messages up to, and including, the given sequence number.  Messages are
672  * moved to the retrans queue when rds_send_xmit picks them off the send
673  * queue. This means that in the TCP case, the message may not have been
674  * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
675  * checks the RDS_MSG_HAS_ACK_SEQ bit.
676  */
677 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
678                               is_acked_func is_acked)
679 {
680         struct rds_message *rm, *tmp;
681         unsigned long flags;
682         LIST_HEAD(list);
683
684         spin_lock_irqsave(&cp->cp_lock, flags);
685
686         list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
687                 if (!rds_send_is_acked(rm, ack, is_acked))
688                         break;
689
690                 list_move(&rm->m_conn_item, &list);
691                 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
692         }
693
694         /* order flag updates with spin locks */
695         if (!list_empty(&list))
696                 smp_mb__after_atomic();
697
698         spin_unlock_irqrestore(&cp->cp_lock, flags);
699
700         /* now remove the messages from the sock list as needed */
701         rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
702 }
703 EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
704
705 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
706                          is_acked_func is_acked)
707 {
708         WARN_ON(conn->c_trans->t_mp_capable);
709         rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
710 }
711 EXPORT_SYMBOL_GPL(rds_send_drop_acked);
712
713 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
714 {
715         struct rds_message *rm, *tmp;
716         struct rds_connection *conn;
717         struct rds_conn_path *cp;
718         unsigned long flags;
719         LIST_HEAD(list);
720
721         /* get all the messages we're dropping under the rs lock */
722         spin_lock_irqsave(&rs->rs_lock, flags);
723
724         list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
725                 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
726                              dest->sin_port != rm->m_inc.i_hdr.h_dport))
727                         continue;
728
729                 list_move(&rm->m_sock_item, &list);
730                 rds_send_sndbuf_remove(rs, rm);
731                 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
732         }
733
734         /* order flag updates with the rs lock */
735         smp_mb__after_atomic();
736
737         spin_unlock_irqrestore(&rs->rs_lock, flags);
738
739         if (list_empty(&list))
740                 return;
741
742         /* Remove the messages from the conn */
743         list_for_each_entry(rm, &list, m_sock_item) {
744
745                 conn = rm->m_inc.i_conn;
746                 if (conn->c_trans->t_mp_capable)
747                         cp = rm->m_inc.i_conn_path;
748                 else
749                         cp = &conn->c_path[0];
750
751                 spin_lock_irqsave(&cp->cp_lock, flags);
752                 /*
753                  * Maybe someone else beat us to removing rm from the conn.
754                  * If we race with their flag update we'll get the lock and
755                  * then really see that the flag has been cleared.
756                  */
757                 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
758                         spin_unlock_irqrestore(&cp->cp_lock, flags);
759                         spin_lock_irqsave(&rm->m_rs_lock, flags);
760                         rm->m_rs = NULL;
761                         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
762                         continue;
763                 }
764                 list_del_init(&rm->m_conn_item);
765                 spin_unlock_irqrestore(&cp->cp_lock, flags);
766
767                 /*
768                  * Couldn't grab m_rs_lock in top loop (lock ordering),
769                  * but we can now.
770                  */
771                 spin_lock_irqsave(&rm->m_rs_lock, flags);
772
773                 spin_lock(&rs->rs_lock);
774                 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
775                 spin_unlock(&rs->rs_lock);
776
777                 rm->m_rs = NULL;
778                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
779
780                 rds_message_put(rm);
781         }
782
783         rds_wake_sk_sleep(rs);
784
785         while (!list_empty(&list)) {
786                 rm = list_entry(list.next, struct rds_message, m_sock_item);
787                 list_del_init(&rm->m_sock_item);
788                 rds_message_wait(rm);
789
790                 /* just in case the code above skipped this message
791                  * because RDS_MSG_ON_CONN wasn't set, run it again here
792                  * taking m_rs_lock is the only thing that keeps us
793                  * from racing with ack processing.
794                  */
795                 spin_lock_irqsave(&rm->m_rs_lock, flags);
796
797                 spin_lock(&rs->rs_lock);
798                 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
799                 spin_unlock(&rs->rs_lock);
800
801                 rm->m_rs = NULL;
802                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
803
804                 rds_message_put(rm);
805         }
806 }
807
808 /*
809  * we only want this to fire once so we use the callers 'queued'.  It's
810  * possible that another thread can race with us and remove the
811  * message from the flow with RDS_CANCEL_SENT_TO.
812  */
813 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
814                              struct rds_conn_path *cp,
815                              struct rds_message *rm, __be16 sport,
816                              __be16 dport, int *queued)
817 {
818         unsigned long flags;
819         u32 len;
820
821         if (*queued)
822                 goto out;
823
824         len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
825
826         /* this is the only place which holds both the socket's rs_lock
827          * and the connection's c_lock */
828         spin_lock_irqsave(&rs->rs_lock, flags);
829
830         /*
831          * If there is a little space in sndbuf, we don't queue anything,
832          * and userspace gets -EAGAIN. But poll() indicates there's send
833          * room. This can lead to bad behavior (spinning) if snd_bytes isn't
834          * freed up by incoming acks. So we check the *old* value of
835          * rs_snd_bytes here to allow the last msg to exceed the buffer,
836          * and poll() now knows no more data can be sent.
837          */
838         if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
839                 rs->rs_snd_bytes += len;
840
841                 /* let recv side know we are close to send space exhaustion.
842                  * This is probably not the optimal way to do it, as this
843                  * means we set the flag on *all* messages as soon as our
844                  * throughput hits a certain threshold.
845                  */
846                 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
847                         set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
848
849                 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
850                 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
851                 rds_message_addref(rm);
852                 rm->m_rs = rs;
853
854                 /* The code ordering is a little weird, but we're
855                    trying to minimize the time we hold c_lock */
856                 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
857                 rm->m_inc.i_conn = conn;
858                 rm->m_inc.i_conn_path = cp;
859                 rds_message_addref(rm);
860
861                 spin_lock(&cp->cp_lock);
862                 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
863                 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
864                 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
865                 spin_unlock(&cp->cp_lock);
866
867                 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
868                          rm, len, rs, rs->rs_snd_bytes,
869                          (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
870
871                 *queued = 1;
872         }
873
874         spin_unlock_irqrestore(&rs->rs_lock, flags);
875 out:
876         return *queued;
877 }
878
879 /*
880  * rds_message is getting to be quite complicated, and we'd like to allocate
881  * it all in one go. This figures out how big it needs to be up front.
882  */
883 static int rds_rm_size(struct msghdr *msg, int data_len)
884 {
885         struct cmsghdr *cmsg;
886         int size = 0;
887         int cmsg_groups = 0;
888         int retval;
889
890         for_each_cmsghdr(cmsg, msg) {
891                 if (!CMSG_OK(msg, cmsg))
892                         return -EINVAL;
893
894                 if (cmsg->cmsg_level != SOL_RDS)
895                         continue;
896
897                 switch (cmsg->cmsg_type) {
898                 case RDS_CMSG_RDMA_ARGS:
899                         cmsg_groups |= 1;
900                         retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
901                         if (retval < 0)
902                                 return retval;
903                         size += retval;
904
905                         break;
906
907                 case RDS_CMSG_RDMA_DEST:
908                 case RDS_CMSG_RDMA_MAP:
909                         cmsg_groups |= 2;
910                         /* these are valid but do no add any size */
911                         break;
912
913                 case RDS_CMSG_ATOMIC_CSWP:
914                 case RDS_CMSG_ATOMIC_FADD:
915                 case RDS_CMSG_MASKED_ATOMIC_CSWP:
916                 case RDS_CMSG_MASKED_ATOMIC_FADD:
917                         cmsg_groups |= 1;
918                         size += sizeof(struct scatterlist);
919                         break;
920
921                 default:
922                         return -EINVAL;
923                 }
924
925         }
926
927         size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
928
929         /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
930         if (cmsg_groups == 3)
931                 return -EINVAL;
932
933         return size;
934 }
935
936 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
937                          struct msghdr *msg, int *allocated_mr)
938 {
939         struct cmsghdr *cmsg;
940         int ret = 0;
941
942         for_each_cmsghdr(cmsg, msg) {
943                 if (!CMSG_OK(msg, cmsg))
944                         return -EINVAL;
945
946                 if (cmsg->cmsg_level != SOL_RDS)
947                         continue;
948
949                 /* As a side effect, RDMA_DEST and RDMA_MAP will set
950                  * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
951                  */
952                 switch (cmsg->cmsg_type) {
953                 case RDS_CMSG_RDMA_ARGS:
954                         ret = rds_cmsg_rdma_args(rs, rm, cmsg);
955                         break;
956
957                 case RDS_CMSG_RDMA_DEST:
958                         ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
959                         break;
960
961                 case RDS_CMSG_RDMA_MAP:
962                         ret = rds_cmsg_rdma_map(rs, rm, cmsg);
963                         if (!ret)
964                                 *allocated_mr = 1;
965                         else if (ret == -ENODEV)
966                                 /* Accommodate the get_mr() case which can fail
967                                  * if connection isn't established yet.
968                                  */
969                                 ret = -EAGAIN;
970                         break;
971                 case RDS_CMSG_ATOMIC_CSWP:
972                 case RDS_CMSG_ATOMIC_FADD:
973                 case RDS_CMSG_MASKED_ATOMIC_CSWP:
974                 case RDS_CMSG_MASKED_ATOMIC_FADD:
975                         ret = rds_cmsg_atomic(rs, rm, cmsg);
976                         break;
977
978                 default:
979                         return -EINVAL;
980                 }
981
982                 if (ret)
983                         break;
984         }
985
986         return ret;
987 }
988
989 static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
990 {
991         int hash;
992
993         if (conn->c_npaths == 0)
994                 hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
995         else
996                 hash = RDS_MPATH_HASH(rs, conn->c_npaths);
997         if (conn->c_npaths == 0 && hash != 0) {
998                 rds_send_ping(conn, 0);
999
1000                 if (conn->c_npaths == 0) {
1001                         wait_event_interruptible(conn->c_hs_waitq,
1002                                                  (conn->c_npaths != 0));
1003                 }
1004                 if (conn->c_npaths == 1)
1005                         hash = 0;
1006         }
1007         return hash;
1008 }
1009
1010 static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
1011 {
1012         struct rds_rdma_args *args;
1013         struct cmsghdr *cmsg;
1014
1015         for_each_cmsghdr(cmsg, msg) {
1016                 if (!CMSG_OK(msg, cmsg))
1017                         return -EINVAL;
1018
1019                 if (cmsg->cmsg_level != SOL_RDS)
1020                         continue;
1021
1022                 if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
1023                         if (cmsg->cmsg_len <
1024                             CMSG_LEN(sizeof(struct rds_rdma_args)))
1025                                 return -EINVAL;
1026                         args = CMSG_DATA(cmsg);
1027                         *rdma_bytes += args->remote_vec.bytes;
1028                 }
1029         }
1030         return 0;
1031 }
1032
1033 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1034 {
1035         struct sock *sk = sock->sk;
1036         struct rds_sock *rs = rds_sk_to_rs(sk);
1037         DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
1038         __be32 daddr;
1039         __be16 dport;
1040         struct rds_message *rm = NULL;
1041         struct rds_connection *conn;
1042         int ret = 0;
1043         int queued = 0, allocated_mr = 0;
1044         int nonblock = msg->msg_flags & MSG_DONTWAIT;
1045         long timeo = sock_sndtimeo(sk, nonblock);
1046         struct rds_conn_path *cpath;
1047         size_t total_payload_len = payload_len, rdma_payload_len = 0;
1048
1049         /* Mirror Linux UDP mirror of BSD error message compatibility */
1050         /* XXX: Perhaps MSG_MORE someday */
1051         if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
1052                 ret = -EOPNOTSUPP;
1053                 goto out;
1054         }
1055
1056         if (msg->msg_namelen) {
1057                 /* XXX fail non-unicast destination IPs? */
1058                 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1059                         ret = -EINVAL;
1060                         goto out;
1061                 }
1062                 daddr = usin->sin_addr.s_addr;
1063                 dport = usin->sin_port;
1064         } else {
1065                 /* We only care about consistency with ->connect() */
1066                 lock_sock(sk);
1067                 daddr = rs->rs_conn_addr;
1068                 dport = rs->rs_conn_port;
1069                 release_sock(sk);
1070         }
1071
1072         lock_sock(sk);
1073         if (daddr == 0 || rs->rs_bound_addr == 0) {
1074                 release_sock(sk);
1075                 ret = -ENOTCONN; /* XXX not a great errno */
1076                 goto out;
1077         }
1078         release_sock(sk);
1079
1080         ret = rds_rdma_bytes(msg, &rdma_payload_len);
1081         if (ret)
1082                 goto out;
1083
1084         total_payload_len += rdma_payload_len;
1085         if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
1086                 ret = -EMSGSIZE;
1087                 goto out;
1088         }
1089
1090         if (payload_len > rds_sk_sndbuf(rs)) {
1091                 ret = -EMSGSIZE;
1092                 goto out;
1093         }
1094
1095         /* size of rm including all sgs */
1096         ret = rds_rm_size(msg, payload_len);
1097         if (ret < 0)
1098                 goto out;
1099
1100         rm = rds_message_alloc(ret, GFP_KERNEL);
1101         if (!rm) {
1102                 ret = -ENOMEM;
1103                 goto out;
1104         }
1105
1106         /* Attach data to the rm */
1107         if (payload_len) {
1108                 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1109                 if (!rm->data.op_sg) {
1110                         ret = -ENOMEM;
1111                         goto out;
1112                 }
1113                 ret = rds_message_copy_from_user(rm, &msg->msg_iter);
1114                 if (ret)
1115                         goto out;
1116         }
1117         rm->data.op_active = 1;
1118
1119         rm->m_daddr = daddr;
1120
1121         /* rds_conn_create has a spinlock that runs with IRQ off.
1122          * Caching the conn in the socket helps a lot. */
1123         if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1124                 conn = rs->rs_conn;
1125         else {
1126                 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1127                                                 rs->rs_bound_addr, daddr,
1128                                         rs->rs_transport,
1129                                         sock->sk->sk_allocation);
1130                 if (IS_ERR(conn)) {
1131                         ret = PTR_ERR(conn);
1132                         goto out;
1133                 }
1134                 rs->rs_conn = conn;
1135         }
1136
1137         /* Parse any control messages the user may have included. */
1138         ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1139         if (ret) {
1140                 /* Trigger connection so that its ready for the next retry */
1141                 if (ret ==  -EAGAIN)
1142                         rds_conn_connect_if_down(conn);
1143                 goto out;
1144         }
1145
1146         if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1147                 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1148                                &rm->rdma, conn->c_trans->xmit_rdma);
1149                 ret = -EOPNOTSUPP;
1150                 goto out;
1151         }
1152
1153         if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1154                 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1155                                &rm->atomic, conn->c_trans->xmit_atomic);
1156                 ret = -EOPNOTSUPP;
1157                 goto out;
1158         }
1159
1160         if (conn->c_trans->t_mp_capable)
1161                 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
1162         else
1163                 cpath = &conn->c_path[0];
1164
1165         if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) {
1166                 ret = -EAGAIN;
1167                 goto out;
1168         }
1169
1170         rds_conn_path_connect_if_down(cpath);
1171
1172         ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1173         if (ret) {
1174                 rs->rs_seen_congestion = 1;
1175                 goto out;
1176         }
1177         while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
1178                                   dport, &queued)) {
1179                 rds_stats_inc(s_send_queue_full);
1180
1181                 if (nonblock) {
1182                         ret = -EAGAIN;
1183                         goto out;
1184                 }
1185
1186                 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1187                                         rds_send_queue_rm(rs, conn, cpath, rm,
1188                                                           rs->rs_bound_port,
1189                                                           dport,
1190                                                           &queued),
1191                                         timeo);
1192                 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1193                 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1194                         continue;
1195
1196                 ret = timeo;
1197                 if (ret == 0)
1198                         ret = -ETIMEDOUT;
1199                 goto out;
1200         }
1201
1202         /*
1203          * By now we've committed to the send.  We reuse rds_send_worker()
1204          * to retry sends in the rds thread if the transport asks us to.
1205          */
1206         rds_stats_inc(s_send_queued);
1207
1208         ret = rds_send_xmit(cpath);
1209         if (ret == -ENOMEM || ret == -EAGAIN) {
1210                 ret = 0;
1211                 rcu_read_lock();
1212                 if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags))
1213                         ret = -ENETUNREACH;
1214                 else
1215                         queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
1216                 rcu_read_unlock();
1217         }
1218         if (ret)
1219                 goto out;
1220         rds_message_put(rm);
1221         return payload_len;
1222
1223 out:
1224         /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1225          * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1226          * or in any other way, we need to destroy the MR again */
1227         if (allocated_mr)
1228                 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1229
1230         if (rm)
1231                 rds_message_put(rm);
1232         return ret;
1233 }
1234
1235 /*
1236  * send out a probe. Can be shared by rds_send_ping,
1237  * rds_send_pong, rds_send_hb.
1238  * rds_send_hb should use h_flags
1239  *   RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED
1240  * or
1241  *   RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED
1242  */
1243 static int
1244 rds_send_probe(struct rds_conn_path *cp, __be16 sport,
1245                __be16 dport, u8 h_flags)
1246 {
1247         struct rds_message *rm;
1248         unsigned long flags;
1249         int ret = 0;
1250
1251         rm = rds_message_alloc(0, GFP_ATOMIC);
1252         if (!rm) {
1253                 ret = -ENOMEM;
1254                 goto out;
1255         }
1256
1257         rm->m_daddr = cp->cp_conn->c_faddr;
1258         rm->data.op_active = 1;
1259
1260         rds_conn_path_connect_if_down(cp);
1261
1262         ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
1263         if (ret)
1264                 goto out;
1265
1266         spin_lock_irqsave(&cp->cp_lock, flags);
1267         list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
1268         set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1269         rds_message_addref(rm);
1270         rm->m_inc.i_conn = cp->cp_conn;
1271         rm->m_inc.i_conn_path = cp;
1272
1273         rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
1274                                     cp->cp_next_tx_seq);
1275         rm->m_inc.i_hdr.h_flags |= h_flags;
1276         cp->cp_next_tx_seq++;
1277
1278         if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
1279             cp->cp_conn->c_trans->t_mp_capable) {
1280                 u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
1281                 u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
1282
1283                 rds_message_add_extension(&rm->m_inc.i_hdr,
1284                                           RDS_EXTHDR_NPATHS, &npaths,
1285                                           sizeof(npaths));
1286                 rds_message_add_extension(&rm->m_inc.i_hdr,
1287                                           RDS_EXTHDR_GEN_NUM,
1288                                           &my_gen_num,
1289                                           sizeof(u32));
1290         }
1291         spin_unlock_irqrestore(&cp->cp_lock, flags);
1292
1293         rds_stats_inc(s_send_queued);
1294         rds_stats_inc(s_send_pong);
1295
1296         /* schedule the send work on rds_wq */
1297         rcu_read_lock();
1298         if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
1299                 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1300         rcu_read_unlock();
1301
1302         rds_message_put(rm);
1303         return 0;
1304
1305 out:
1306         if (rm)
1307                 rds_message_put(rm);
1308         return ret;
1309 }
1310
1311 int
1312 rds_send_pong(struct rds_conn_path *cp, __be16 dport)
1313 {
1314         return rds_send_probe(cp, 0, dport, 0);
1315 }
1316
1317 void
1318 rds_send_ping(struct rds_connection *conn, int cp_index)
1319 {
1320         unsigned long flags;
1321         struct rds_conn_path *cp = &conn->c_path[cp_index];
1322
1323         spin_lock_irqsave(&cp->cp_lock, flags);
1324         if (conn->c_ping_triggered) {
1325                 spin_unlock_irqrestore(&cp->cp_lock, flags);
1326                 return;
1327         }
1328         conn->c_ping_triggered = 1;
1329         spin_unlock_irqrestore(&cp->cp_lock, flags);
1330         rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0);
1331 }
1332 EXPORT_SYMBOL_GPL(rds_send_ping);