Merge tag 'platform-drivers-x86-v4.3-1' of git://git.infradead.org/users/dvhart/linux...
[cascardo/linux.git] / net / rds / send.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/moduleparam.h>
35 #include <linux/gfp.h>
36 #include <net/sock.h>
37 #include <linux/in.h>
38 #include <linux/list.h>
39 #include <linux/ratelimit.h>
40 #include <linux/export.h>
41
42 #include "rds.h"
43
44 /* When transmitting messages in rds_send_xmit, we need to emerge from
45  * time to time and briefly release the CPU. Otherwise the softlock watchdog
46  * will kick our shin.
47  * Also, it seems fairer to not let one busy connection stall all the
48  * others.
49  *
50  * send_batch_count is the number of times we'll loop in send_xmit. Setting
51  * it to 0 will restore the old behavior (where we looped until we had
52  * drained the queue).
53  */
54 static int send_batch_count = 64;
55 module_param(send_batch_count, int, 0444);
56 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
57
58 static void rds_send_remove_from_sock(struct list_head *messages, int status);
59
60 /*
61  * Reset the send state.  Callers must ensure that this doesn't race with
62  * rds_send_xmit().
63  */
64 void rds_send_reset(struct rds_connection *conn)
65 {
66         struct rds_message *rm, *tmp;
67         unsigned long flags;
68
69         if (conn->c_xmit_rm) {
70                 rm = conn->c_xmit_rm;
71                 conn->c_xmit_rm = NULL;
72                 /* Tell the user the RDMA op is no longer mapped by the
73                  * transport. This isn't entirely true (it's flushed out
74                  * independently) but as the connection is down, there's
75                  * no ongoing RDMA to/from that memory */
76                 rds_message_unmapped(rm);
77                 rds_message_put(rm);
78         }
79
80         conn->c_xmit_sg = 0;
81         conn->c_xmit_hdr_off = 0;
82         conn->c_xmit_data_off = 0;
83         conn->c_xmit_atomic_sent = 0;
84         conn->c_xmit_rdma_sent = 0;
85         conn->c_xmit_data_sent = 0;
86
87         conn->c_map_queued = 0;
88
89         conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
90         conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
91
92         /* Mark messages as retransmissions, and move them to the send q */
93         spin_lock_irqsave(&conn->c_lock, flags);
94         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
95                 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
96                 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
97         }
98         list_splice_init(&conn->c_retrans, &conn->c_send_queue);
99         spin_unlock_irqrestore(&conn->c_lock, flags);
100 }
101
102 static int acquire_in_xmit(struct rds_connection *conn)
103 {
104         return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
105 }
106
107 static void release_in_xmit(struct rds_connection *conn)
108 {
109         clear_bit(RDS_IN_XMIT, &conn->c_flags);
110         smp_mb__after_atomic();
111         /*
112          * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
113          * hot path and finding waiters is very rare.  We don't want to walk
114          * the system-wide hashed waitqueue buckets in the fast path only to
115          * almost never find waiters.
116          */
117         if (waitqueue_active(&conn->c_waitq))
118                 wake_up_all(&conn->c_waitq);
119 }
120
121 /*
122  * We're making the conscious trade-off here to only send one message
123  * down the connection at a time.
124  *   Pro:
125  *      - tx queueing is a simple fifo list
126  *      - reassembly is optional and easily done by transports per conn
127  *      - no per flow rx lookup at all, straight to the socket
128  *      - less per-frag memory and wire overhead
129  *   Con:
130  *      - queued acks can be delayed behind large messages
131  *   Depends:
132  *      - small message latency is higher behind queued large messages
133  *      - large message latency isn't starved by intervening small sends
134  */
135 int rds_send_xmit(struct rds_connection *conn)
136 {
137         struct rds_message *rm;
138         unsigned long flags;
139         unsigned int tmp;
140         struct scatterlist *sg;
141         int ret = 0;
142         LIST_HEAD(to_be_dropped);
143         int batch_count;
144         unsigned long send_gen = 0;
145
146 restart:
147         batch_count = 0;
148
149         /*
150          * sendmsg calls here after having queued its message on the send
151          * queue.  We only have one task feeding the connection at a time.  If
152          * another thread is already feeding the queue then we back off.  This
153          * avoids blocking the caller and trading per-connection data between
154          * caches per message.
155          */
156         if (!acquire_in_xmit(conn)) {
157                 rds_stats_inc(s_send_lock_contention);
158                 ret = -ENOMEM;
159                 goto out;
160         }
161
162         /*
163          * we record the send generation after doing the xmit acquire.
164          * if someone else manages to jump in and do some work, we'll use
165          * this to avoid a goto restart farther down.
166          *
167          * The acquire_in_xmit() check above ensures that only one
168          * caller can increment c_send_gen at any time.
169          */
170         conn->c_send_gen++;
171         send_gen = conn->c_send_gen;
172
173         /*
174          * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
175          * we do the opposite to avoid races.
176          */
177         if (!rds_conn_up(conn)) {
178                 release_in_xmit(conn);
179                 ret = 0;
180                 goto out;
181         }
182
183         if (conn->c_trans->xmit_prepare)
184                 conn->c_trans->xmit_prepare(conn);
185
186         /*
187          * spin trying to push headers and data down the connection until
188          * the connection doesn't make forward progress.
189          */
190         while (1) {
191
192                 rm = conn->c_xmit_rm;
193
194                 /*
195                  * If between sending messages, we can send a pending congestion
196                  * map update.
197                  */
198                 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
199                         rm = rds_cong_update_alloc(conn);
200                         if (IS_ERR(rm)) {
201                                 ret = PTR_ERR(rm);
202                                 break;
203                         }
204                         rm->data.op_active = 1;
205
206                         conn->c_xmit_rm = rm;
207                 }
208
209                 /*
210                  * If not already working on one, grab the next message.
211                  *
212                  * c_xmit_rm holds a ref while we're sending this message down
213                  * the connction.  We can use this ref while holding the
214                  * send_sem.. rds_send_reset() is serialized with it.
215                  */
216                 if (!rm) {
217                         unsigned int len;
218
219                         batch_count++;
220
221                         /* we want to process as big a batch as we can, but
222                          * we also want to avoid softlockups.  If we've been
223                          * through a lot of messages, lets back off and see
224                          * if anyone else jumps in
225                          */
226                         if (batch_count >= 1024)
227                                 goto over_batch;
228
229                         spin_lock_irqsave(&conn->c_lock, flags);
230
231                         if (!list_empty(&conn->c_send_queue)) {
232                                 rm = list_entry(conn->c_send_queue.next,
233                                                 struct rds_message,
234                                                 m_conn_item);
235                                 rds_message_addref(rm);
236
237                                 /*
238                                  * Move the message from the send queue to the retransmit
239                                  * list right away.
240                                  */
241                                 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
242                         }
243
244                         spin_unlock_irqrestore(&conn->c_lock, flags);
245
246                         if (!rm)
247                                 break;
248
249                         /* Unfortunately, the way Infiniband deals with
250                          * RDMA to a bad MR key is by moving the entire
251                          * queue pair to error state. We cold possibly
252                          * recover from that, but right now we drop the
253                          * connection.
254                          * Therefore, we never retransmit messages with RDMA ops.
255                          */
256                         if (rm->rdma.op_active &&
257                             test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
258                                 spin_lock_irqsave(&conn->c_lock, flags);
259                                 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
260                                         list_move(&rm->m_conn_item, &to_be_dropped);
261                                 spin_unlock_irqrestore(&conn->c_lock, flags);
262                                 continue;
263                         }
264
265                         /* Require an ACK every once in a while */
266                         len = ntohl(rm->m_inc.i_hdr.h_len);
267                         if (conn->c_unacked_packets == 0 ||
268                             conn->c_unacked_bytes < len) {
269                                 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
270
271                                 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
272                                 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
273                                 rds_stats_inc(s_send_ack_required);
274                         } else {
275                                 conn->c_unacked_bytes -= len;
276                                 conn->c_unacked_packets--;
277                         }
278
279                         conn->c_xmit_rm = rm;
280                 }
281
282                 /* The transport either sends the whole rdma or none of it */
283                 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
284                         rm->m_final_op = &rm->rdma;
285                         /* The transport owns the mapped memory for now.
286                          * You can't unmap it while it's on the send queue
287                          */
288                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
289                         ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
290                         if (ret) {
291                                 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
292                                 wake_up_interruptible(&rm->m_flush_wait);
293                                 break;
294                         }
295                         conn->c_xmit_rdma_sent = 1;
296
297                 }
298
299                 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
300                         rm->m_final_op = &rm->atomic;
301                         /* The transport owns the mapped memory for now.
302                          * You can't unmap it while it's on the send queue
303                          */
304                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
305                         ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
306                         if (ret) {
307                                 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
308                                 wake_up_interruptible(&rm->m_flush_wait);
309                                 break;
310                         }
311                         conn->c_xmit_atomic_sent = 1;
312
313                 }
314
315                 /*
316                  * A number of cases require an RDS header to be sent
317                  * even if there is no data.
318                  * We permit 0-byte sends; rds-ping depends on this.
319                  * However, if there are exclusively attached silent ops,
320                  * we skip the hdr/data send, to enable silent operation.
321                  */
322                 if (rm->data.op_nents == 0) {
323                         int ops_present;
324                         int all_ops_are_silent = 1;
325
326                         ops_present = (rm->atomic.op_active || rm->rdma.op_active);
327                         if (rm->atomic.op_active && !rm->atomic.op_silent)
328                                 all_ops_are_silent = 0;
329                         if (rm->rdma.op_active && !rm->rdma.op_silent)
330                                 all_ops_are_silent = 0;
331
332                         if (ops_present && all_ops_are_silent
333                             && !rm->m_rdma_cookie)
334                                 rm->data.op_active = 0;
335                 }
336
337                 if (rm->data.op_active && !conn->c_xmit_data_sent) {
338                         rm->m_final_op = &rm->data;
339                         ret = conn->c_trans->xmit(conn, rm,
340                                                   conn->c_xmit_hdr_off,
341                                                   conn->c_xmit_sg,
342                                                   conn->c_xmit_data_off);
343                         if (ret <= 0)
344                                 break;
345
346                         if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
347                                 tmp = min_t(int, ret,
348                                             sizeof(struct rds_header) -
349                                             conn->c_xmit_hdr_off);
350                                 conn->c_xmit_hdr_off += tmp;
351                                 ret -= tmp;
352                         }
353
354                         sg = &rm->data.op_sg[conn->c_xmit_sg];
355                         while (ret) {
356                                 tmp = min_t(int, ret, sg->length -
357                                                       conn->c_xmit_data_off);
358                                 conn->c_xmit_data_off += tmp;
359                                 ret -= tmp;
360                                 if (conn->c_xmit_data_off == sg->length) {
361                                         conn->c_xmit_data_off = 0;
362                                         sg++;
363                                         conn->c_xmit_sg++;
364                                         BUG_ON(ret != 0 &&
365                                                conn->c_xmit_sg == rm->data.op_nents);
366                                 }
367                         }
368
369                         if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
370                             (conn->c_xmit_sg == rm->data.op_nents))
371                                 conn->c_xmit_data_sent = 1;
372                 }
373
374                 /*
375                  * A rm will only take multiple times through this loop
376                  * if there is a data op. Thus, if the data is sent (or there was
377                  * none), then we're done with the rm.
378                  */
379                 if (!rm->data.op_active || conn->c_xmit_data_sent) {
380                         conn->c_xmit_rm = NULL;
381                         conn->c_xmit_sg = 0;
382                         conn->c_xmit_hdr_off = 0;
383                         conn->c_xmit_data_off = 0;
384                         conn->c_xmit_rdma_sent = 0;
385                         conn->c_xmit_atomic_sent = 0;
386                         conn->c_xmit_data_sent = 0;
387
388                         rds_message_put(rm);
389                 }
390         }
391
392 over_batch:
393         if (conn->c_trans->xmit_complete)
394                 conn->c_trans->xmit_complete(conn);
395         release_in_xmit(conn);
396
397         /* Nuke any messages we decided not to retransmit. */
398         if (!list_empty(&to_be_dropped)) {
399                 /* irqs on here, so we can put(), unlike above */
400                 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
401                         rds_message_put(rm);
402                 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
403         }
404
405         /*
406          * Other senders can queue a message after we last test the send queue
407          * but before we clear RDS_IN_XMIT.  In that case they'd back off and
408          * not try and send their newly queued message.  We need to check the
409          * send queue after having cleared RDS_IN_XMIT so that their message
410          * doesn't get stuck on the send queue.
411          *
412          * If the transport cannot continue (i.e ret != 0), then it must
413          * call us when more room is available, such as from the tx
414          * completion handler.
415          *
416          * We have an extra generation check here so that if someone manages
417          * to jump in after our release_in_xmit, we'll see that they have done
418          * some work and we will skip our goto
419          */
420         if (ret == 0) {
421                 smp_mb();
422                 if ((test_bit(0, &conn->c_map_queued) ||
423                      !list_empty(&conn->c_send_queue)) &&
424                     send_gen == conn->c_send_gen) {
425                         rds_stats_inc(s_send_lock_queue_raced);
426                         goto restart;
427                 }
428         }
429 out:
430         return ret;
431 }
432
433 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
434 {
435         u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
436
437         assert_spin_locked(&rs->rs_lock);
438
439         BUG_ON(rs->rs_snd_bytes < len);
440         rs->rs_snd_bytes -= len;
441
442         if (rs->rs_snd_bytes == 0)
443                 rds_stats_inc(s_send_queue_empty);
444 }
445
446 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
447                                     is_acked_func is_acked)
448 {
449         if (is_acked)
450                 return is_acked(rm, ack);
451         return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
452 }
453
454 /*
455  * This is pretty similar to what happens below in the ACK
456  * handling code - except that we call here as soon as we get
457  * the IB send completion on the RDMA op and the accompanying
458  * message.
459  */
460 void rds_rdma_send_complete(struct rds_message *rm, int status)
461 {
462         struct rds_sock *rs = NULL;
463         struct rm_rdma_op *ro;
464         struct rds_notifier *notifier;
465         unsigned long flags;
466
467         spin_lock_irqsave(&rm->m_rs_lock, flags);
468
469         ro = &rm->rdma;
470         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
471             ro->op_active && ro->op_notify && ro->op_notifier) {
472                 notifier = ro->op_notifier;
473                 rs = rm->m_rs;
474                 sock_hold(rds_rs_to_sk(rs));
475
476                 notifier->n_status = status;
477                 spin_lock(&rs->rs_lock);
478                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
479                 spin_unlock(&rs->rs_lock);
480
481                 ro->op_notifier = NULL;
482         }
483
484         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
485
486         if (rs) {
487                 rds_wake_sk_sleep(rs);
488                 sock_put(rds_rs_to_sk(rs));
489         }
490 }
491 EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
492
493 /*
494  * Just like above, except looks at atomic op
495  */
496 void rds_atomic_send_complete(struct rds_message *rm, int status)
497 {
498         struct rds_sock *rs = NULL;
499         struct rm_atomic_op *ao;
500         struct rds_notifier *notifier;
501         unsigned long flags;
502
503         spin_lock_irqsave(&rm->m_rs_lock, flags);
504
505         ao = &rm->atomic;
506         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
507             && ao->op_active && ao->op_notify && ao->op_notifier) {
508                 notifier = ao->op_notifier;
509                 rs = rm->m_rs;
510                 sock_hold(rds_rs_to_sk(rs));
511
512                 notifier->n_status = status;
513                 spin_lock(&rs->rs_lock);
514                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
515                 spin_unlock(&rs->rs_lock);
516
517                 ao->op_notifier = NULL;
518         }
519
520         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
521
522         if (rs) {
523                 rds_wake_sk_sleep(rs);
524                 sock_put(rds_rs_to_sk(rs));
525         }
526 }
527 EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
528
529 /*
530  * This is the same as rds_rdma_send_complete except we
531  * don't do any locking - we have all the ingredients (message,
532  * socket, socket lock) and can just move the notifier.
533  */
534 static inline void
535 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
536 {
537         struct rm_rdma_op *ro;
538         struct rm_atomic_op *ao;
539
540         ro = &rm->rdma;
541         if (ro->op_active && ro->op_notify && ro->op_notifier) {
542                 ro->op_notifier->n_status = status;
543                 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
544                 ro->op_notifier = NULL;
545         }
546
547         ao = &rm->atomic;
548         if (ao->op_active && ao->op_notify && ao->op_notifier) {
549                 ao->op_notifier->n_status = status;
550                 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
551                 ao->op_notifier = NULL;
552         }
553
554         /* No need to wake the app - caller does this */
555 }
556
557 /*
558  * This is called from the IB send completion when we detect
559  * a RDMA operation that failed with remote access error.
560  * So speed is not an issue here.
561  */
562 struct rds_message *rds_send_get_message(struct rds_connection *conn,
563                                          struct rm_rdma_op *op)
564 {
565         struct rds_message *rm, *tmp, *found = NULL;
566         unsigned long flags;
567
568         spin_lock_irqsave(&conn->c_lock, flags);
569
570         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
571                 if (&rm->rdma == op) {
572                         atomic_inc(&rm->m_refcount);
573                         found = rm;
574                         goto out;
575                 }
576         }
577
578         list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
579                 if (&rm->rdma == op) {
580                         atomic_inc(&rm->m_refcount);
581                         found = rm;
582                         break;
583                 }
584         }
585
586 out:
587         spin_unlock_irqrestore(&conn->c_lock, flags);
588
589         return found;
590 }
591 EXPORT_SYMBOL_GPL(rds_send_get_message);
592
593 /*
594  * This removes messages from the socket's list if they're on it.  The list
595  * argument must be private to the caller, we must be able to modify it
596  * without locks.  The messages must have a reference held for their
597  * position on the list.  This function will drop that reference after
598  * removing the messages from the 'messages' list regardless of if it found
599  * the messages on the socket list or not.
600  */
601 static void rds_send_remove_from_sock(struct list_head *messages, int status)
602 {
603         unsigned long flags;
604         struct rds_sock *rs = NULL;
605         struct rds_message *rm;
606
607         while (!list_empty(messages)) {
608                 int was_on_sock = 0;
609
610                 rm = list_entry(messages->next, struct rds_message,
611                                 m_conn_item);
612                 list_del_init(&rm->m_conn_item);
613
614                 /*
615                  * If we see this flag cleared then we're *sure* that someone
616                  * else beat us to removing it from the sock.  If we race
617                  * with their flag update we'll get the lock and then really
618                  * see that the flag has been cleared.
619                  *
620                  * The message spinlock makes sure nobody clears rm->m_rs
621                  * while we're messing with it. It does not prevent the
622                  * message from being removed from the socket, though.
623                  */
624                 spin_lock_irqsave(&rm->m_rs_lock, flags);
625                 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
626                         goto unlock_and_drop;
627
628                 if (rs != rm->m_rs) {
629                         if (rs) {
630                                 rds_wake_sk_sleep(rs);
631                                 sock_put(rds_rs_to_sk(rs));
632                         }
633                         rs = rm->m_rs;
634                         if (rs)
635                                 sock_hold(rds_rs_to_sk(rs));
636                 }
637                 if (!rs)
638                         goto unlock_and_drop;
639                 spin_lock(&rs->rs_lock);
640
641                 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
642                         struct rm_rdma_op *ro = &rm->rdma;
643                         struct rds_notifier *notifier;
644
645                         list_del_init(&rm->m_sock_item);
646                         rds_send_sndbuf_remove(rs, rm);
647
648                         if (ro->op_active && ro->op_notifier &&
649                                (ro->op_notify || (ro->op_recverr && status))) {
650                                 notifier = ro->op_notifier;
651                                 list_add_tail(&notifier->n_list,
652                                                 &rs->rs_notify_queue);
653                                 if (!notifier->n_status)
654                                         notifier->n_status = status;
655                                 rm->rdma.op_notifier = NULL;
656                         }
657                         was_on_sock = 1;
658                         rm->m_rs = NULL;
659                 }
660                 spin_unlock(&rs->rs_lock);
661
662 unlock_and_drop:
663                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
664                 rds_message_put(rm);
665                 if (was_on_sock)
666                         rds_message_put(rm);
667         }
668
669         if (rs) {
670                 rds_wake_sk_sleep(rs);
671                 sock_put(rds_rs_to_sk(rs));
672         }
673 }
674
675 /*
676  * Transports call here when they've determined that the receiver queued
677  * messages up to, and including, the given sequence number.  Messages are
678  * moved to the retrans queue when rds_send_xmit picks them off the send
679  * queue. This means that in the TCP case, the message may not have been
680  * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
681  * checks the RDS_MSG_HAS_ACK_SEQ bit.
682  */
683 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
684                          is_acked_func is_acked)
685 {
686         struct rds_message *rm, *tmp;
687         unsigned long flags;
688         LIST_HEAD(list);
689
690         spin_lock_irqsave(&conn->c_lock, flags);
691
692         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
693                 if (!rds_send_is_acked(rm, ack, is_acked))
694                         break;
695
696                 list_move(&rm->m_conn_item, &list);
697                 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
698         }
699
700         /* order flag updates with spin locks */
701         if (!list_empty(&list))
702                 smp_mb__after_atomic();
703
704         spin_unlock_irqrestore(&conn->c_lock, flags);
705
706         /* now remove the messages from the sock list as needed */
707         rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
708 }
709 EXPORT_SYMBOL_GPL(rds_send_drop_acked);
710
711 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
712 {
713         struct rds_message *rm, *tmp;
714         struct rds_connection *conn;
715         unsigned long flags;
716         LIST_HEAD(list);
717
718         /* get all the messages we're dropping under the rs lock */
719         spin_lock_irqsave(&rs->rs_lock, flags);
720
721         list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
722                 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
723                              dest->sin_port != rm->m_inc.i_hdr.h_dport))
724                         continue;
725
726                 list_move(&rm->m_sock_item, &list);
727                 rds_send_sndbuf_remove(rs, rm);
728                 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
729         }
730
731         /* order flag updates with the rs lock */
732         smp_mb__after_atomic();
733
734         spin_unlock_irqrestore(&rs->rs_lock, flags);
735
736         if (list_empty(&list))
737                 return;
738
739         /* Remove the messages from the conn */
740         list_for_each_entry(rm, &list, m_sock_item) {
741
742                 conn = rm->m_inc.i_conn;
743
744                 spin_lock_irqsave(&conn->c_lock, flags);
745                 /*
746                  * Maybe someone else beat us to removing rm from the conn.
747                  * If we race with their flag update we'll get the lock and
748                  * then really see that the flag has been cleared.
749                  */
750                 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
751                         spin_unlock_irqrestore(&conn->c_lock, flags);
752                         spin_lock_irqsave(&rm->m_rs_lock, flags);
753                         rm->m_rs = NULL;
754                         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
755                         continue;
756                 }
757                 list_del_init(&rm->m_conn_item);
758                 spin_unlock_irqrestore(&conn->c_lock, flags);
759
760                 /*
761                  * Couldn't grab m_rs_lock in top loop (lock ordering),
762                  * but we can now.
763                  */
764                 spin_lock_irqsave(&rm->m_rs_lock, flags);
765
766                 spin_lock(&rs->rs_lock);
767                 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
768                 spin_unlock(&rs->rs_lock);
769
770                 rm->m_rs = NULL;
771                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
772
773                 rds_message_put(rm);
774         }
775
776         rds_wake_sk_sleep(rs);
777
778         while (!list_empty(&list)) {
779                 rm = list_entry(list.next, struct rds_message, m_sock_item);
780                 list_del_init(&rm->m_sock_item);
781                 rds_message_wait(rm);
782
783                 /* just in case the code above skipped this message
784                  * because RDS_MSG_ON_CONN wasn't set, run it again here
785                  * taking m_rs_lock is the only thing that keeps us
786                  * from racing with ack processing.
787                  */
788                 spin_lock_irqsave(&rm->m_rs_lock, flags);
789
790                 spin_lock(&rs->rs_lock);
791                 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
792                 spin_unlock(&rs->rs_lock);
793
794                 rm->m_rs = NULL;
795                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
796
797                 rds_message_put(rm);
798         }
799 }
800
801 /*
802  * we only want this to fire once so we use the callers 'queued'.  It's
803  * possible that another thread can race with us and remove the
804  * message from the flow with RDS_CANCEL_SENT_TO.
805  */
806 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
807                              struct rds_message *rm, __be16 sport,
808                              __be16 dport, int *queued)
809 {
810         unsigned long flags;
811         u32 len;
812
813         if (*queued)
814                 goto out;
815
816         len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
817
818         /* this is the only place which holds both the socket's rs_lock
819          * and the connection's c_lock */
820         spin_lock_irqsave(&rs->rs_lock, flags);
821
822         /*
823          * If there is a little space in sndbuf, we don't queue anything,
824          * and userspace gets -EAGAIN. But poll() indicates there's send
825          * room. This can lead to bad behavior (spinning) if snd_bytes isn't
826          * freed up by incoming acks. So we check the *old* value of
827          * rs_snd_bytes here to allow the last msg to exceed the buffer,
828          * and poll() now knows no more data can be sent.
829          */
830         if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
831                 rs->rs_snd_bytes += len;
832
833                 /* let recv side know we are close to send space exhaustion.
834                  * This is probably not the optimal way to do it, as this
835                  * means we set the flag on *all* messages as soon as our
836                  * throughput hits a certain threshold.
837                  */
838                 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
839                         __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
840
841                 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
842                 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
843                 rds_message_addref(rm);
844                 rm->m_rs = rs;
845
846                 /* The code ordering is a little weird, but we're
847                    trying to minimize the time we hold c_lock */
848                 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
849                 rm->m_inc.i_conn = conn;
850                 rds_message_addref(rm);
851
852                 spin_lock(&conn->c_lock);
853                 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
854                 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
855                 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
856                 spin_unlock(&conn->c_lock);
857
858                 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
859                          rm, len, rs, rs->rs_snd_bytes,
860                          (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
861
862                 *queued = 1;
863         }
864
865         spin_unlock_irqrestore(&rs->rs_lock, flags);
866 out:
867         return *queued;
868 }
869
870 /*
871  * rds_message is getting to be quite complicated, and we'd like to allocate
872  * it all in one go. This figures out how big it needs to be up front.
873  */
874 static int rds_rm_size(struct msghdr *msg, int data_len)
875 {
876         struct cmsghdr *cmsg;
877         int size = 0;
878         int cmsg_groups = 0;
879         int retval;
880
881         for_each_cmsghdr(cmsg, msg) {
882                 if (!CMSG_OK(msg, cmsg))
883                         return -EINVAL;
884
885                 if (cmsg->cmsg_level != SOL_RDS)
886                         continue;
887
888                 switch (cmsg->cmsg_type) {
889                 case RDS_CMSG_RDMA_ARGS:
890                         cmsg_groups |= 1;
891                         retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
892                         if (retval < 0)
893                                 return retval;
894                         size += retval;
895
896                         break;
897
898                 case RDS_CMSG_RDMA_DEST:
899                 case RDS_CMSG_RDMA_MAP:
900                         cmsg_groups |= 2;
901                         /* these are valid but do no add any size */
902                         break;
903
904                 case RDS_CMSG_ATOMIC_CSWP:
905                 case RDS_CMSG_ATOMIC_FADD:
906                 case RDS_CMSG_MASKED_ATOMIC_CSWP:
907                 case RDS_CMSG_MASKED_ATOMIC_FADD:
908                         cmsg_groups |= 1;
909                         size += sizeof(struct scatterlist);
910                         break;
911
912                 default:
913                         return -EINVAL;
914                 }
915
916         }
917
918         size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
919
920         /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
921         if (cmsg_groups == 3)
922                 return -EINVAL;
923
924         return size;
925 }
926
927 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
928                          struct msghdr *msg, int *allocated_mr)
929 {
930         struct cmsghdr *cmsg;
931         int ret = 0;
932
933         for_each_cmsghdr(cmsg, msg) {
934                 if (!CMSG_OK(msg, cmsg))
935                         return -EINVAL;
936
937                 if (cmsg->cmsg_level != SOL_RDS)
938                         continue;
939
940                 /* As a side effect, RDMA_DEST and RDMA_MAP will set
941                  * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
942                  */
943                 switch (cmsg->cmsg_type) {
944                 case RDS_CMSG_RDMA_ARGS:
945                         ret = rds_cmsg_rdma_args(rs, rm, cmsg);
946                         break;
947
948                 case RDS_CMSG_RDMA_DEST:
949                         ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
950                         break;
951
952                 case RDS_CMSG_RDMA_MAP:
953                         ret = rds_cmsg_rdma_map(rs, rm, cmsg);
954                         if (!ret)
955                                 *allocated_mr = 1;
956                         break;
957                 case RDS_CMSG_ATOMIC_CSWP:
958                 case RDS_CMSG_ATOMIC_FADD:
959                 case RDS_CMSG_MASKED_ATOMIC_CSWP:
960                 case RDS_CMSG_MASKED_ATOMIC_FADD:
961                         ret = rds_cmsg_atomic(rs, rm, cmsg);
962                         break;
963
964                 default:
965                         return -EINVAL;
966                 }
967
968                 if (ret)
969                         break;
970         }
971
972         return ret;
973 }
974
975 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
976 {
977         struct sock *sk = sock->sk;
978         struct rds_sock *rs = rds_sk_to_rs(sk);
979         DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
980         __be32 daddr;
981         __be16 dport;
982         struct rds_message *rm = NULL;
983         struct rds_connection *conn;
984         int ret = 0;
985         int queued = 0, allocated_mr = 0;
986         int nonblock = msg->msg_flags & MSG_DONTWAIT;
987         long timeo = sock_sndtimeo(sk, nonblock);
988
989         /* Mirror Linux UDP mirror of BSD error message compatibility */
990         /* XXX: Perhaps MSG_MORE someday */
991         if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
992                 ret = -EOPNOTSUPP;
993                 goto out;
994         }
995
996         if (msg->msg_namelen) {
997                 /* XXX fail non-unicast destination IPs? */
998                 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
999                         ret = -EINVAL;
1000                         goto out;
1001                 }
1002                 daddr = usin->sin_addr.s_addr;
1003                 dport = usin->sin_port;
1004         } else {
1005                 /* We only care about consistency with ->connect() */
1006                 lock_sock(sk);
1007                 daddr = rs->rs_conn_addr;
1008                 dport = rs->rs_conn_port;
1009                 release_sock(sk);
1010         }
1011
1012         /* racing with another thread binding seems ok here */
1013         if (daddr == 0 || rs->rs_bound_addr == 0) {
1014                 ret = -ENOTCONN; /* XXX not a great errno */
1015                 goto out;
1016         }
1017
1018         if (payload_len > rds_sk_sndbuf(rs)) {
1019                 ret = -EMSGSIZE;
1020                 goto out;
1021         }
1022
1023         /* size of rm including all sgs */
1024         ret = rds_rm_size(msg, payload_len);
1025         if (ret < 0)
1026                 goto out;
1027
1028         rm = rds_message_alloc(ret, GFP_KERNEL);
1029         if (!rm) {
1030                 ret = -ENOMEM;
1031                 goto out;
1032         }
1033
1034         /* Attach data to the rm */
1035         if (payload_len) {
1036                 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1037                 if (!rm->data.op_sg) {
1038                         ret = -ENOMEM;
1039                         goto out;
1040                 }
1041                 ret = rds_message_copy_from_user(rm, &msg->msg_iter);
1042                 if (ret)
1043                         goto out;
1044         }
1045         rm->data.op_active = 1;
1046
1047         rm->m_daddr = daddr;
1048
1049         /* rds_conn_create has a spinlock that runs with IRQ off.
1050          * Caching the conn in the socket helps a lot. */
1051         if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1052                 conn = rs->rs_conn;
1053         else {
1054                 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1055                                                 rs->rs_bound_addr, daddr,
1056                                         rs->rs_transport,
1057                                         sock->sk->sk_allocation);
1058                 if (IS_ERR(conn)) {
1059                         ret = PTR_ERR(conn);
1060                         goto out;
1061                 }
1062                 rs->rs_conn = conn;
1063         }
1064
1065         /* Parse any control messages the user may have included. */
1066         ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1067         if (ret)
1068                 goto out;
1069
1070         if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1071                 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1072                                &rm->rdma, conn->c_trans->xmit_rdma);
1073                 ret = -EOPNOTSUPP;
1074                 goto out;
1075         }
1076
1077         if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1078                 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1079                                &rm->atomic, conn->c_trans->xmit_atomic);
1080                 ret = -EOPNOTSUPP;
1081                 goto out;
1082         }
1083
1084         rds_conn_connect_if_down(conn);
1085
1086         ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1087         if (ret) {
1088                 rs->rs_seen_congestion = 1;
1089                 goto out;
1090         }
1091
1092         while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1093                                   dport, &queued)) {
1094                 rds_stats_inc(s_send_queue_full);
1095
1096                 if (nonblock) {
1097                         ret = -EAGAIN;
1098                         goto out;
1099                 }
1100
1101                 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1102                                         rds_send_queue_rm(rs, conn, rm,
1103                                                           rs->rs_bound_port,
1104                                                           dport,
1105                                                           &queued),
1106                                         timeo);
1107                 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1108                 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1109                         continue;
1110
1111                 ret = timeo;
1112                 if (ret == 0)
1113                         ret = -ETIMEDOUT;
1114                 goto out;
1115         }
1116
1117         /*
1118          * By now we've committed to the send.  We reuse rds_send_worker()
1119          * to retry sends in the rds thread if the transport asks us to.
1120          */
1121         rds_stats_inc(s_send_queued);
1122
1123         if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1124                 rds_send_xmit(conn);
1125
1126         rds_message_put(rm);
1127         return payload_len;
1128
1129 out:
1130         /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1131          * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1132          * or in any other way, we need to destroy the MR again */
1133         if (allocated_mr)
1134                 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1135
1136         if (rm)
1137                 rds_message_put(rm);
1138         return ret;
1139 }
1140
1141 /*
1142  * Reply to a ping packet.
1143  */
1144 int
1145 rds_send_pong(struct rds_connection *conn, __be16 dport)
1146 {
1147         struct rds_message *rm;
1148         unsigned long flags;
1149         int ret = 0;
1150
1151         rm = rds_message_alloc(0, GFP_ATOMIC);
1152         if (!rm) {
1153                 ret = -ENOMEM;
1154                 goto out;
1155         }
1156
1157         rm->m_daddr = conn->c_faddr;
1158         rm->data.op_active = 1;
1159
1160         rds_conn_connect_if_down(conn);
1161
1162         ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1163         if (ret)
1164                 goto out;
1165
1166         spin_lock_irqsave(&conn->c_lock, flags);
1167         list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1168         set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1169         rds_message_addref(rm);
1170         rm->m_inc.i_conn = conn;
1171
1172         rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1173                                     conn->c_next_tx_seq);
1174         conn->c_next_tx_seq++;
1175         spin_unlock_irqrestore(&conn->c_lock, flags);
1176
1177         rds_stats_inc(s_send_queued);
1178         rds_stats_inc(s_send_pong);
1179
1180         if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1181                 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1182
1183         rds_message_put(rm);
1184         return 0;
1185
1186 out:
1187         if (rm)
1188                 rds_message_put(rm);
1189         return ret;
1190 }