Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[cascardo/linux.git] / net / rds / send.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
35 #include <net/sock.h>
36 #include <linux/in.h>
37 #include <linux/list.h>
38
39 #include "rds.h"
40
41 /* When transmitting messages in rds_send_xmit, we need to emerge from
42  * time to time and briefly release the CPU. Otherwise the softlock watchdog
43  * will kick our shin.
44  * Also, it seems fairer to not let one busy connection stall all the
45  * others.
46  *
47  * send_batch_count is the number of times we'll loop in send_xmit. Setting
48  * it to 0 will restore the old behavior (where we looped until we had
49  * drained the queue).
50  */
51 static int send_batch_count = 64;
52 module_param(send_batch_count, int, 0444);
53 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
54
55 /*
56  * Reset the send state.  Callers must ensure that this doesn't race with
57  * rds_send_xmit().
58  */
59 void rds_send_reset(struct rds_connection *conn)
60 {
61         struct rds_message *rm, *tmp;
62         unsigned long flags;
63
64         if (conn->c_xmit_rm) {
65                 rm = conn->c_xmit_rm;
66                 conn->c_xmit_rm = NULL;
67                 /* Tell the user the RDMA op is no longer mapped by the
68                  * transport. This isn't entirely true (it's flushed out
69                  * independently) but as the connection is down, there's
70                  * no ongoing RDMA to/from that memory */
71                 rds_message_unmapped(rm);
72                 rds_message_put(rm);
73         }
74
75         conn->c_xmit_sg = 0;
76         conn->c_xmit_hdr_off = 0;
77         conn->c_xmit_data_off = 0;
78         conn->c_xmit_atomic_sent = 0;
79         conn->c_xmit_rdma_sent = 0;
80         conn->c_xmit_data_sent = 0;
81
82         conn->c_map_queued = 0;
83
84         conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
85         conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
86
87         /* Mark messages as retransmissions, and move them to the send q */
88         spin_lock_irqsave(&conn->c_lock, flags);
89         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
90                 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
91                 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
92         }
93         list_splice_init(&conn->c_retrans, &conn->c_send_queue);
94         spin_unlock_irqrestore(&conn->c_lock, flags);
95 }
96
97 static int acquire_in_xmit(struct rds_connection *conn)
98 {
99         return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
100 }
101
102 static void release_in_xmit(struct rds_connection *conn)
103 {
104         clear_bit(RDS_IN_XMIT, &conn->c_flags);
105         smp_mb__after_clear_bit();
106         /*
107          * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
108          * hot path and finding waiters is very rare.  We don't want to walk
109          * the system-wide hashed waitqueue buckets in the fast path only to
110          * almost never find waiters.
111          */
112         if (waitqueue_active(&conn->c_waitq))
113                 wake_up_all(&conn->c_waitq);
114 }
115
116 /*
117  * We're making the concious trade-off here to only send one message
118  * down the connection at a time.
119  *   Pro:
120  *      - tx queueing is a simple fifo list
121  *      - reassembly is optional and easily done by transports per conn
122  *      - no per flow rx lookup at all, straight to the socket
123  *      - less per-frag memory and wire overhead
124  *   Con:
125  *      - queued acks can be delayed behind large messages
126  *   Depends:
127  *      - small message latency is higher behind queued large messages
128  *      - large message latency isn't starved by intervening small sends
129  */
130 int rds_send_xmit(struct rds_connection *conn)
131 {
132         struct rds_message *rm;
133         unsigned long flags;
134         unsigned int tmp;
135         struct scatterlist *sg;
136         int ret = 0;
137         LIST_HEAD(to_be_dropped);
138
139 restart:
140
141         /*
142          * sendmsg calls here after having queued its message on the send
143          * queue.  We only have one task feeding the connection at a time.  If
144          * another thread is already feeding the queue then we back off.  This
145          * avoids blocking the caller and trading per-connection data between
146          * caches per message.
147          */
148         if (!acquire_in_xmit(conn)) {
149                 rds_stats_inc(s_send_lock_contention);
150                 ret = -ENOMEM;
151                 goto out;
152         }
153
154         /*
155          * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
156          * we do the opposite to avoid races.
157          */
158         if (!rds_conn_up(conn)) {
159                 release_in_xmit(conn);
160                 ret = 0;
161                 goto out;
162         }
163
164         if (conn->c_trans->xmit_prepare)
165                 conn->c_trans->xmit_prepare(conn);
166
167         /*
168          * spin trying to push headers and data down the connection until
169          * the connection doesn't make forward progress.
170          */
171         while (1) {
172
173                 rm = conn->c_xmit_rm;
174
175                 /*
176                  * If between sending messages, we can send a pending congestion
177                  * map update.
178                  */
179                 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
180                         rm = rds_cong_update_alloc(conn);
181                         if (IS_ERR(rm)) {
182                                 ret = PTR_ERR(rm);
183                                 break;
184                         }
185                         rm->data.op_active = 1;
186
187                         conn->c_xmit_rm = rm;
188                 }
189
190                 /*
191                  * If not already working on one, grab the next message.
192                  *
193                  * c_xmit_rm holds a ref while we're sending this message down
194                  * the connction.  We can use this ref while holding the
195                  * send_sem.. rds_send_reset() is serialized with it.
196                  */
197                 if (!rm) {
198                         unsigned int len;
199
200                         spin_lock_irqsave(&conn->c_lock, flags);
201
202                         if (!list_empty(&conn->c_send_queue)) {
203                                 rm = list_entry(conn->c_send_queue.next,
204                                                 struct rds_message,
205                                                 m_conn_item);
206                                 rds_message_addref(rm);
207
208                                 /*
209                                  * Move the message from the send queue to the retransmit
210                                  * list right away.
211                                  */
212                                 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
213                         }
214
215                         spin_unlock_irqrestore(&conn->c_lock, flags);
216
217                         if (!rm)
218                                 break;
219
220                         /* Unfortunately, the way Infiniband deals with
221                          * RDMA to a bad MR key is by moving the entire
222                          * queue pair to error state. We cold possibly
223                          * recover from that, but right now we drop the
224                          * connection.
225                          * Therefore, we never retransmit messages with RDMA ops.
226                          */
227                         if (rm->rdma.op_active &&
228                             test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
229                                 spin_lock_irqsave(&conn->c_lock, flags);
230                                 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
231                                         list_move(&rm->m_conn_item, &to_be_dropped);
232                                 spin_unlock_irqrestore(&conn->c_lock, flags);
233                                 continue;
234                         }
235
236                         /* Require an ACK every once in a while */
237                         len = ntohl(rm->m_inc.i_hdr.h_len);
238                         if (conn->c_unacked_packets == 0 ||
239                             conn->c_unacked_bytes < len) {
240                                 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
241
242                                 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
243                                 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
244                                 rds_stats_inc(s_send_ack_required);
245                         } else {
246                                 conn->c_unacked_bytes -= len;
247                                 conn->c_unacked_packets--;
248                         }
249
250                         conn->c_xmit_rm = rm;
251                 }
252
253                 /* The transport either sends the whole rdma or none of it */
254                 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
255                         rm->m_final_op = &rm->rdma;
256                         ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
257                         if (ret)
258                                 break;
259                         conn->c_xmit_rdma_sent = 1;
260
261                         /* The transport owns the mapped memory for now.
262                          * You can't unmap it while it's on the send queue */
263                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
264                 }
265
266                 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
267                         rm->m_final_op = &rm->atomic;
268                         ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
269                         if (ret)
270                                 break;
271                         conn->c_xmit_atomic_sent = 1;
272
273                         /* The transport owns the mapped memory for now.
274                          * You can't unmap it while it's on the send queue */
275                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
276                 }
277
278                 /*
279                  * A number of cases require an RDS header to be sent
280                  * even if there is no data.
281                  * We permit 0-byte sends; rds-ping depends on this.
282                  * However, if there are exclusively attached silent ops,
283                  * we skip the hdr/data send, to enable silent operation.
284                  */
285                 if (rm->data.op_nents == 0) {
286                         int ops_present;
287                         int all_ops_are_silent = 1;
288
289                         ops_present = (rm->atomic.op_active || rm->rdma.op_active);
290                         if (rm->atomic.op_active && !rm->atomic.op_silent)
291                                 all_ops_are_silent = 0;
292                         if (rm->rdma.op_active && !rm->rdma.op_silent)
293                                 all_ops_are_silent = 0;
294
295                         if (ops_present && all_ops_are_silent
296                             && !rm->m_rdma_cookie)
297                                 rm->data.op_active = 0;
298                 }
299
300                 if (rm->data.op_active && !conn->c_xmit_data_sent) {
301                         rm->m_final_op = &rm->data;
302                         ret = conn->c_trans->xmit(conn, rm,
303                                                   conn->c_xmit_hdr_off,
304                                                   conn->c_xmit_sg,
305                                                   conn->c_xmit_data_off);
306                         if (ret <= 0)
307                                 break;
308
309                         if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
310                                 tmp = min_t(int, ret,
311                                             sizeof(struct rds_header) -
312                                             conn->c_xmit_hdr_off);
313                                 conn->c_xmit_hdr_off += tmp;
314                                 ret -= tmp;
315                         }
316
317                         sg = &rm->data.op_sg[conn->c_xmit_sg];
318                         while (ret) {
319                                 tmp = min_t(int, ret, sg->length -
320                                                       conn->c_xmit_data_off);
321                                 conn->c_xmit_data_off += tmp;
322                                 ret -= tmp;
323                                 if (conn->c_xmit_data_off == sg->length) {
324                                         conn->c_xmit_data_off = 0;
325                                         sg++;
326                                         conn->c_xmit_sg++;
327                                         BUG_ON(ret != 0 &&
328                                                conn->c_xmit_sg == rm->data.op_nents);
329                                 }
330                         }
331
332                         if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
333                             (conn->c_xmit_sg == rm->data.op_nents))
334                                 conn->c_xmit_data_sent = 1;
335                 }
336
337                 /*
338                  * A rm will only take multiple times through this loop
339                  * if there is a data op. Thus, if the data is sent (or there was
340                  * none), then we're done with the rm.
341                  */
342                 if (!rm->data.op_active || conn->c_xmit_data_sent) {
343                         conn->c_xmit_rm = NULL;
344                         conn->c_xmit_sg = 0;
345                         conn->c_xmit_hdr_off = 0;
346                         conn->c_xmit_data_off = 0;
347                         conn->c_xmit_rdma_sent = 0;
348                         conn->c_xmit_atomic_sent = 0;
349                         conn->c_xmit_data_sent = 0;
350
351                         rds_message_put(rm);
352                 }
353         }
354
355         if (conn->c_trans->xmit_complete)
356                 conn->c_trans->xmit_complete(conn);
357
358         release_in_xmit(conn);
359
360         /* Nuke any messages we decided not to retransmit. */
361         if (!list_empty(&to_be_dropped)) {
362                 /* irqs on here, so we can put(), unlike above */
363                 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
364                         rds_message_put(rm);
365                 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
366         }
367
368         /*
369          * Other senders can queue a message after we last test the send queue
370          * but before we clear RDS_IN_XMIT.  In that case they'd back off and
371          * not try and send their newly queued message.  We need to check the
372          * send queue after having cleared RDS_IN_XMIT so that their message
373          * doesn't get stuck on the send queue.
374          *
375          * If the transport cannot continue (i.e ret != 0), then it must
376          * call us when more room is available, such as from the tx
377          * completion handler.
378          */
379         if (ret == 0) {
380                 smp_mb();
381                 if (!list_empty(&conn->c_send_queue)) {
382                         rds_stats_inc(s_send_lock_queue_raced);
383                         goto restart;
384                 }
385         }
386 out:
387         return ret;
388 }
389
390 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
391 {
392         u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
393
394         assert_spin_locked(&rs->rs_lock);
395
396         BUG_ON(rs->rs_snd_bytes < len);
397         rs->rs_snd_bytes -= len;
398
399         if (rs->rs_snd_bytes == 0)
400                 rds_stats_inc(s_send_queue_empty);
401 }
402
403 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
404                                     is_acked_func is_acked)
405 {
406         if (is_acked)
407                 return is_acked(rm, ack);
408         return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
409 }
410
411 /*
412  * This is pretty similar to what happens below in the ACK
413  * handling code - except that we call here as soon as we get
414  * the IB send completion on the RDMA op and the accompanying
415  * message.
416  */
417 void rds_rdma_send_complete(struct rds_message *rm, int status)
418 {
419         struct rds_sock *rs = NULL;
420         struct rm_rdma_op *ro;
421         struct rds_notifier *notifier;
422         unsigned long flags;
423
424         spin_lock_irqsave(&rm->m_rs_lock, flags);
425
426         ro = &rm->rdma;
427         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
428             ro->op_active && ro->op_notify && ro->op_notifier) {
429                 notifier = ro->op_notifier;
430                 rs = rm->m_rs;
431                 sock_hold(rds_rs_to_sk(rs));
432
433                 notifier->n_status = status;
434                 spin_lock(&rs->rs_lock);
435                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
436                 spin_unlock(&rs->rs_lock);
437
438                 ro->op_notifier = NULL;
439         }
440
441         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
442
443         if (rs) {
444                 rds_wake_sk_sleep(rs);
445                 sock_put(rds_rs_to_sk(rs));
446         }
447 }
448 EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
449
450 /*
451  * Just like above, except looks at atomic op
452  */
453 void rds_atomic_send_complete(struct rds_message *rm, int status)
454 {
455         struct rds_sock *rs = NULL;
456         struct rm_atomic_op *ao;
457         struct rds_notifier *notifier;
458         unsigned long flags;
459
460         spin_lock_irqsave(&rm->m_rs_lock, flags);
461
462         ao = &rm->atomic;
463         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
464             && ao->op_active && ao->op_notify && ao->op_notifier) {
465                 notifier = ao->op_notifier;
466                 rs = rm->m_rs;
467                 sock_hold(rds_rs_to_sk(rs));
468
469                 notifier->n_status = status;
470                 spin_lock(&rs->rs_lock);
471                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
472                 spin_unlock(&rs->rs_lock);
473
474                 ao->op_notifier = NULL;
475         }
476
477         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
478
479         if (rs) {
480                 rds_wake_sk_sleep(rs);
481                 sock_put(rds_rs_to_sk(rs));
482         }
483 }
484 EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
485
486 /*
487  * This is the same as rds_rdma_send_complete except we
488  * don't do any locking - we have all the ingredients (message,
489  * socket, socket lock) and can just move the notifier.
490  */
491 static inline void
492 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
493 {
494         struct rm_rdma_op *ro;
495         struct rm_atomic_op *ao;
496
497         ro = &rm->rdma;
498         if (ro->op_active && ro->op_notify && ro->op_notifier) {
499                 ro->op_notifier->n_status = status;
500                 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
501                 ro->op_notifier = NULL;
502         }
503
504         ao = &rm->atomic;
505         if (ao->op_active && ao->op_notify && ao->op_notifier) {
506                 ao->op_notifier->n_status = status;
507                 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
508                 ao->op_notifier = NULL;
509         }
510
511         /* No need to wake the app - caller does this */
512 }
513
514 /*
515  * This is called from the IB send completion when we detect
516  * a RDMA operation that failed with remote access error.
517  * So speed is not an issue here.
518  */
519 struct rds_message *rds_send_get_message(struct rds_connection *conn,
520                                          struct rm_rdma_op *op)
521 {
522         struct rds_message *rm, *tmp, *found = NULL;
523         unsigned long flags;
524
525         spin_lock_irqsave(&conn->c_lock, flags);
526
527         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
528                 if (&rm->rdma == op) {
529                         atomic_inc(&rm->m_refcount);
530                         found = rm;
531                         goto out;
532                 }
533         }
534
535         list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
536                 if (&rm->rdma == op) {
537                         atomic_inc(&rm->m_refcount);
538                         found = rm;
539                         break;
540                 }
541         }
542
543 out:
544         spin_unlock_irqrestore(&conn->c_lock, flags);
545
546         return found;
547 }
548 EXPORT_SYMBOL_GPL(rds_send_get_message);
549
550 /*
551  * This removes messages from the socket's list if they're on it.  The list
552  * argument must be private to the caller, we must be able to modify it
553  * without locks.  The messages must have a reference held for their
554  * position on the list.  This function will drop that reference after
555  * removing the messages from the 'messages' list regardless of if it found
556  * the messages on the socket list or not.
557  */
558 void rds_send_remove_from_sock(struct list_head *messages, int status)
559 {
560         unsigned long flags;
561         struct rds_sock *rs = NULL;
562         struct rds_message *rm;
563
564         while (!list_empty(messages)) {
565                 int was_on_sock = 0;
566
567                 rm = list_entry(messages->next, struct rds_message,
568                                 m_conn_item);
569                 list_del_init(&rm->m_conn_item);
570
571                 /*
572                  * If we see this flag cleared then we're *sure* that someone
573                  * else beat us to removing it from the sock.  If we race
574                  * with their flag update we'll get the lock and then really
575                  * see that the flag has been cleared.
576                  *
577                  * The message spinlock makes sure nobody clears rm->m_rs
578                  * while we're messing with it. It does not prevent the
579                  * message from being removed from the socket, though.
580                  */
581                 spin_lock_irqsave(&rm->m_rs_lock, flags);
582                 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
583                         goto unlock_and_drop;
584
585                 if (rs != rm->m_rs) {
586                         if (rs) {
587                                 rds_wake_sk_sleep(rs);
588                                 sock_put(rds_rs_to_sk(rs));
589                         }
590                         rs = rm->m_rs;
591                         sock_hold(rds_rs_to_sk(rs));
592                 }
593                 spin_lock(&rs->rs_lock);
594
595                 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
596                         struct rm_rdma_op *ro = &rm->rdma;
597                         struct rds_notifier *notifier;
598
599                         list_del_init(&rm->m_sock_item);
600                         rds_send_sndbuf_remove(rs, rm);
601
602                         if (ro->op_active && ro->op_notifier &&
603                                (ro->op_notify || (ro->op_recverr && status))) {
604                                 notifier = ro->op_notifier;
605                                 list_add_tail(&notifier->n_list,
606                                                 &rs->rs_notify_queue);
607                                 if (!notifier->n_status)
608                                         notifier->n_status = status;
609                                 rm->rdma.op_notifier = NULL;
610                         }
611                         was_on_sock = 1;
612                         rm->m_rs = NULL;
613                 }
614                 spin_unlock(&rs->rs_lock);
615
616 unlock_and_drop:
617                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
618                 rds_message_put(rm);
619                 if (was_on_sock)
620                         rds_message_put(rm);
621         }
622
623         if (rs) {
624                 rds_wake_sk_sleep(rs);
625                 sock_put(rds_rs_to_sk(rs));
626         }
627 }
628
629 /*
630  * Transports call here when they've determined that the receiver queued
631  * messages up to, and including, the given sequence number.  Messages are
632  * moved to the retrans queue when rds_send_xmit picks them off the send
633  * queue. This means that in the TCP case, the message may not have been
634  * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
635  * checks the RDS_MSG_HAS_ACK_SEQ bit.
636  *
637  * XXX It's not clear to me how this is safely serialized with socket
638  * destruction.  Maybe it should bail if it sees SOCK_DEAD.
639  */
640 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
641                          is_acked_func is_acked)
642 {
643         struct rds_message *rm, *tmp;
644         unsigned long flags;
645         LIST_HEAD(list);
646
647         spin_lock_irqsave(&conn->c_lock, flags);
648
649         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
650                 if (!rds_send_is_acked(rm, ack, is_acked))
651                         break;
652
653                 list_move(&rm->m_conn_item, &list);
654                 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
655         }
656
657         /* order flag updates with spin locks */
658         if (!list_empty(&list))
659                 smp_mb__after_clear_bit();
660
661         spin_unlock_irqrestore(&conn->c_lock, flags);
662
663         /* now remove the messages from the sock list as needed */
664         rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
665 }
666 EXPORT_SYMBOL_GPL(rds_send_drop_acked);
667
668 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
669 {
670         struct rds_message *rm, *tmp;
671         struct rds_connection *conn;
672         unsigned long flags;
673         LIST_HEAD(list);
674
675         /* get all the messages we're dropping under the rs lock */
676         spin_lock_irqsave(&rs->rs_lock, flags);
677
678         list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
679                 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
680                              dest->sin_port != rm->m_inc.i_hdr.h_dport))
681                         continue;
682
683                 list_move(&rm->m_sock_item, &list);
684                 rds_send_sndbuf_remove(rs, rm);
685                 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
686         }
687
688         /* order flag updates with the rs lock */
689         smp_mb__after_clear_bit();
690
691         spin_unlock_irqrestore(&rs->rs_lock, flags);
692
693         if (list_empty(&list))
694                 return;
695
696         /* Remove the messages from the conn */
697         list_for_each_entry(rm, &list, m_sock_item) {
698
699                 conn = rm->m_inc.i_conn;
700
701                 spin_lock_irqsave(&conn->c_lock, flags);
702                 /*
703                  * Maybe someone else beat us to removing rm from the conn.
704                  * If we race with their flag update we'll get the lock and
705                  * then really see that the flag has been cleared.
706                  */
707                 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
708                         spin_unlock_irqrestore(&conn->c_lock, flags);
709                         continue;
710                 }
711                 list_del_init(&rm->m_conn_item);
712                 spin_unlock_irqrestore(&conn->c_lock, flags);
713
714                 /*
715                  * Couldn't grab m_rs_lock in top loop (lock ordering),
716                  * but we can now.
717                  */
718                 spin_lock_irqsave(&rm->m_rs_lock, flags);
719
720                 spin_lock(&rs->rs_lock);
721                 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
722                 spin_unlock(&rs->rs_lock);
723
724                 rm->m_rs = NULL;
725                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
726
727                 rds_message_put(rm);
728         }
729
730         rds_wake_sk_sleep(rs);
731
732         while (!list_empty(&list)) {
733                 rm = list_entry(list.next, struct rds_message, m_sock_item);
734                 list_del_init(&rm->m_sock_item);
735
736                 rds_message_wait(rm);
737                 rds_message_put(rm);
738         }
739 }
740
741 /*
742  * we only want this to fire once so we use the callers 'queued'.  It's
743  * possible that another thread can race with us and remove the
744  * message from the flow with RDS_CANCEL_SENT_TO.
745  */
746 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
747                              struct rds_message *rm, __be16 sport,
748                              __be16 dport, int *queued)
749 {
750         unsigned long flags;
751         u32 len;
752
753         if (*queued)
754                 goto out;
755
756         len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
757
758         /* this is the only place which holds both the socket's rs_lock
759          * and the connection's c_lock */
760         spin_lock_irqsave(&rs->rs_lock, flags);
761
762         /*
763          * If there is a little space in sndbuf, we don't queue anything,
764          * and userspace gets -EAGAIN. But poll() indicates there's send
765          * room. This can lead to bad behavior (spinning) if snd_bytes isn't
766          * freed up by incoming acks. So we check the *old* value of
767          * rs_snd_bytes here to allow the last msg to exceed the buffer,
768          * and poll() now knows no more data can be sent.
769          */
770         if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
771                 rs->rs_snd_bytes += len;
772
773                 /* let recv side know we are close to send space exhaustion.
774                  * This is probably not the optimal way to do it, as this
775                  * means we set the flag on *all* messages as soon as our
776                  * throughput hits a certain threshold.
777                  */
778                 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
779                         __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
780
781                 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
782                 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
783                 rds_message_addref(rm);
784                 rm->m_rs = rs;
785
786                 /* The code ordering is a little weird, but we're
787                    trying to minimize the time we hold c_lock */
788                 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
789                 rm->m_inc.i_conn = conn;
790                 rds_message_addref(rm);
791
792                 spin_lock(&conn->c_lock);
793                 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
794                 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
795                 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
796                 spin_unlock(&conn->c_lock);
797
798                 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
799                          rm, len, rs, rs->rs_snd_bytes,
800                          (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
801
802                 *queued = 1;
803         }
804
805         spin_unlock_irqrestore(&rs->rs_lock, flags);
806 out:
807         return *queued;
808 }
809
810 /*
811  * rds_message is getting to be quite complicated, and we'd like to allocate
812  * it all in one go. This figures out how big it needs to be up front.
813  */
814 static int rds_rm_size(struct msghdr *msg, int data_len)
815 {
816         struct cmsghdr *cmsg;
817         int size = 0;
818         int cmsg_groups = 0;
819         int retval;
820
821         for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
822                 if (!CMSG_OK(msg, cmsg))
823                         return -EINVAL;
824
825                 if (cmsg->cmsg_level != SOL_RDS)
826                         continue;
827
828                 switch (cmsg->cmsg_type) {
829                 case RDS_CMSG_RDMA_ARGS:
830                         cmsg_groups |= 1;
831                         retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
832                         if (retval < 0)
833                                 return retval;
834                         size += retval;
835
836                         break;
837
838                 case RDS_CMSG_RDMA_DEST:
839                 case RDS_CMSG_RDMA_MAP:
840                         cmsg_groups |= 2;
841                         /* these are valid but do no add any size */
842                         break;
843
844                 case RDS_CMSG_ATOMIC_CSWP:
845                 case RDS_CMSG_ATOMIC_FADD:
846                 case RDS_CMSG_MASKED_ATOMIC_CSWP:
847                 case RDS_CMSG_MASKED_ATOMIC_FADD:
848                         cmsg_groups |= 1;
849                         size += sizeof(struct scatterlist);
850                         break;
851
852                 default:
853                         return -EINVAL;
854                 }
855
856         }
857
858         size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
859
860         /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
861         if (cmsg_groups == 3)
862                 return -EINVAL;
863
864         return size;
865 }
866
867 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
868                          struct msghdr *msg, int *allocated_mr)
869 {
870         struct cmsghdr *cmsg;
871         int ret = 0;
872
873         for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
874                 if (!CMSG_OK(msg, cmsg))
875                         return -EINVAL;
876
877                 if (cmsg->cmsg_level != SOL_RDS)
878                         continue;
879
880                 /* As a side effect, RDMA_DEST and RDMA_MAP will set
881                  * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
882                  */
883                 switch (cmsg->cmsg_type) {
884                 case RDS_CMSG_RDMA_ARGS:
885                         ret = rds_cmsg_rdma_args(rs, rm, cmsg);
886                         break;
887
888                 case RDS_CMSG_RDMA_DEST:
889                         ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
890                         break;
891
892                 case RDS_CMSG_RDMA_MAP:
893                         ret = rds_cmsg_rdma_map(rs, rm, cmsg);
894                         if (!ret)
895                                 *allocated_mr = 1;
896                         break;
897                 case RDS_CMSG_ATOMIC_CSWP:
898                 case RDS_CMSG_ATOMIC_FADD:
899                 case RDS_CMSG_MASKED_ATOMIC_CSWP:
900                 case RDS_CMSG_MASKED_ATOMIC_FADD:
901                         ret = rds_cmsg_atomic(rs, rm, cmsg);
902                         break;
903
904                 default:
905                         return -EINVAL;
906                 }
907
908                 if (ret)
909                         break;
910         }
911
912         return ret;
913 }
914
915 int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
916                 size_t payload_len)
917 {
918         struct sock *sk = sock->sk;
919         struct rds_sock *rs = rds_sk_to_rs(sk);
920         struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
921         __be32 daddr;
922         __be16 dport;
923         struct rds_message *rm = NULL;
924         struct rds_connection *conn;
925         int ret = 0;
926         int queued = 0, allocated_mr = 0;
927         int nonblock = msg->msg_flags & MSG_DONTWAIT;
928         long timeo = sock_sndtimeo(sk, nonblock);
929
930         /* Mirror Linux UDP mirror of BSD error message compatibility */
931         /* XXX: Perhaps MSG_MORE someday */
932         if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
933                 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
934                 ret = -EOPNOTSUPP;
935                 goto out;
936         }
937
938         if (msg->msg_namelen) {
939                 /* XXX fail non-unicast destination IPs? */
940                 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
941                         ret = -EINVAL;
942                         goto out;
943                 }
944                 daddr = usin->sin_addr.s_addr;
945                 dport = usin->sin_port;
946         } else {
947                 /* We only care about consistency with ->connect() */
948                 lock_sock(sk);
949                 daddr = rs->rs_conn_addr;
950                 dport = rs->rs_conn_port;
951                 release_sock(sk);
952         }
953
954         /* racing with another thread binding seems ok here */
955         if (daddr == 0 || rs->rs_bound_addr == 0) {
956                 ret = -ENOTCONN; /* XXX not a great errno */
957                 goto out;
958         }
959
960         /* size of rm including all sgs */
961         ret = rds_rm_size(msg, payload_len);
962         if (ret < 0)
963                 goto out;
964
965         rm = rds_message_alloc(ret, GFP_KERNEL);
966         if (!rm) {
967                 ret = -ENOMEM;
968                 goto out;
969         }
970
971         /* Attach data to the rm */
972         if (payload_len) {
973                 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
974                 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
975                 if (ret)
976                         goto out;
977         }
978         rm->data.op_active = 1;
979
980         rm->m_daddr = daddr;
981
982         /* rds_conn_create has a spinlock that runs with IRQ off.
983          * Caching the conn in the socket helps a lot. */
984         if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
985                 conn = rs->rs_conn;
986         else {
987                 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
988                                         rs->rs_transport,
989                                         sock->sk->sk_allocation);
990                 if (IS_ERR(conn)) {
991                         ret = PTR_ERR(conn);
992                         goto out;
993                 }
994                 rs->rs_conn = conn;
995         }
996
997         /* Parse any control messages the user may have included. */
998         ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
999         if (ret)
1000                 goto out;
1001
1002         if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1003                 if (printk_ratelimit())
1004                         printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1005                                &rm->rdma, conn->c_trans->xmit_rdma);
1006                 ret = -EOPNOTSUPP;
1007                 goto out;
1008         }
1009
1010         if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1011                 if (printk_ratelimit())
1012                         printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1013                                &rm->atomic, conn->c_trans->xmit_atomic);
1014                 ret = -EOPNOTSUPP;
1015                 goto out;
1016         }
1017
1018         rds_conn_connect_if_down(conn);
1019
1020         ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1021         if (ret) {
1022                 rs->rs_seen_congestion = 1;
1023                 goto out;
1024         }
1025
1026         while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1027                                   dport, &queued)) {
1028                 rds_stats_inc(s_send_queue_full);
1029                 /* XXX make sure this is reasonable */
1030                 if (payload_len > rds_sk_sndbuf(rs)) {
1031                         ret = -EMSGSIZE;
1032                         goto out;
1033                 }
1034                 if (nonblock) {
1035                         ret = -EAGAIN;
1036                         goto out;
1037                 }
1038
1039                 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1040                                         rds_send_queue_rm(rs, conn, rm,
1041                                                           rs->rs_bound_port,
1042                                                           dport,
1043                                                           &queued),
1044                                         timeo);
1045                 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1046                 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1047                         continue;
1048
1049                 ret = timeo;
1050                 if (ret == 0)
1051                         ret = -ETIMEDOUT;
1052                 goto out;
1053         }
1054
1055         /*
1056          * By now we've committed to the send.  We reuse rds_send_worker()
1057          * to retry sends in the rds thread if the transport asks us to.
1058          */
1059         rds_stats_inc(s_send_queued);
1060
1061         if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1062                 rds_send_xmit(conn);
1063
1064         rds_message_put(rm);
1065         return payload_len;
1066
1067 out:
1068         /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1069          * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1070          * or in any other way, we need to destroy the MR again */
1071         if (allocated_mr)
1072                 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1073
1074         if (rm)
1075                 rds_message_put(rm);
1076         return ret;
1077 }
1078
1079 /*
1080  * Reply to a ping packet.
1081  */
1082 int
1083 rds_send_pong(struct rds_connection *conn, __be16 dport)
1084 {
1085         struct rds_message *rm;
1086         unsigned long flags;
1087         int ret = 0;
1088
1089         rm = rds_message_alloc(0, GFP_ATOMIC);
1090         if (!rm) {
1091                 ret = -ENOMEM;
1092                 goto out;
1093         }
1094
1095         rm->m_daddr = conn->c_faddr;
1096         rm->data.op_active = 1;
1097
1098         rds_conn_connect_if_down(conn);
1099
1100         ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1101         if (ret)
1102                 goto out;
1103
1104         spin_lock_irqsave(&conn->c_lock, flags);
1105         list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1106         set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1107         rds_message_addref(rm);
1108         rm->m_inc.i_conn = conn;
1109
1110         rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1111                                     conn->c_next_tx_seq);
1112         conn->c_next_tx_seq++;
1113         spin_unlock_irqrestore(&conn->c_lock, flags);
1114
1115         rds_stats_inc(s_send_queued);
1116         rds_stats_inc(s_send_pong);
1117
1118         if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1119                 rds_send_xmit(conn);
1120
1121         rds_message_put(rm);
1122         return 0;
1123
1124 out:
1125         if (rm)
1126                 rds_message_put(rm);
1127         return ret;
1128 }