skb_splice_bits(): get rid of callback
[cascardo/linux.git] / net / kcm / kcmsock.c
1 #include <linux/bpf.h>
2 #include <linux/errno.h>
3 #include <linux/errqueue.h>
4 #include <linux/file.h>
5 #include <linux/in.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/net.h>
9 #include <linux/netdevice.h>
10 #include <linux/poll.h>
11 #include <linux/rculist.h>
12 #include <linux/skbuff.h>
13 #include <linux/socket.h>
14 #include <linux/uaccess.h>
15 #include <linux/workqueue.h>
16 #include <linux/syscalls.h>
17 #include <net/kcm.h>
18 #include <net/netns/generic.h>
19 #include <net/sock.h>
20 #include <net/tcp.h>
21 #include <uapi/linux/kcm.h>
22
23 unsigned int kcm_net_id;
24
25 static struct kmem_cache *kcm_psockp __read_mostly;
26 static struct kmem_cache *kcm_muxp __read_mostly;
27 static struct workqueue_struct *kcm_wq;
28
29 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
30 {
31         return (struct kcm_sock *)sk;
32 }
33
34 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
35 {
36         return (struct kcm_tx_msg *)skb->cb;
37 }
38
39 static inline struct kcm_rx_msg *kcm_rx_msg(struct sk_buff *skb)
40 {
41         return (struct kcm_rx_msg *)((void *)skb->cb +
42                                      offsetof(struct qdisc_skb_cb, data));
43 }
44
45 static void report_csk_error(struct sock *csk, int err)
46 {
47         csk->sk_err = EPIPE;
48         csk->sk_error_report(csk);
49 }
50
51 /* Callback lock held */
52 static void kcm_abort_rx_psock(struct kcm_psock *psock, int err,
53                                struct sk_buff *skb)
54 {
55         struct sock *csk = psock->sk;
56
57         /* Unrecoverable error in receive */
58
59         del_timer(&psock->rx_msg_timer);
60
61         if (psock->rx_stopped)
62                 return;
63
64         psock->rx_stopped = 1;
65         KCM_STATS_INCR(psock->stats.rx_aborts);
66
67         /* Report an error on the lower socket */
68         report_csk_error(csk, err);
69 }
70
71 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
72                                bool wakeup_kcm)
73 {
74         struct sock *csk = psock->sk;
75         struct kcm_mux *mux = psock->mux;
76
77         /* Unrecoverable error in transmit */
78
79         spin_lock_bh(&mux->lock);
80
81         if (psock->tx_stopped) {
82                 spin_unlock_bh(&mux->lock);
83                 return;
84         }
85
86         psock->tx_stopped = 1;
87         KCM_STATS_INCR(psock->stats.tx_aborts);
88
89         if (!psock->tx_kcm) {
90                 /* Take off psocks_avail list */
91                 list_del(&psock->psock_avail_list);
92         } else if (wakeup_kcm) {
93                 /* In this case psock is being aborted while outside of
94                  * write_msgs and psock is reserved. Schedule tx_work
95                  * to handle the failure there. Need to commit tx_stopped
96                  * before queuing work.
97                  */
98                 smp_mb();
99
100                 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
101         }
102
103         spin_unlock_bh(&mux->lock);
104
105         /* Report error on lower socket */
106         report_csk_error(csk, err);
107 }
108
109 /* RX mux lock held. */
110 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
111                                     struct kcm_psock *psock)
112 {
113         KCM_STATS_ADD(mux->stats.rx_bytes,
114                       psock->stats.rx_bytes - psock->saved_rx_bytes);
115         mux->stats.rx_msgs +=
116                 psock->stats.rx_msgs - psock->saved_rx_msgs;
117         psock->saved_rx_msgs = psock->stats.rx_msgs;
118         psock->saved_rx_bytes = psock->stats.rx_bytes;
119 }
120
121 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
122                                     struct kcm_psock *psock)
123 {
124         KCM_STATS_ADD(mux->stats.tx_bytes,
125                       psock->stats.tx_bytes - psock->saved_tx_bytes);
126         mux->stats.tx_msgs +=
127                 psock->stats.tx_msgs - psock->saved_tx_msgs;
128         psock->saved_tx_msgs = psock->stats.tx_msgs;
129         psock->saved_tx_bytes = psock->stats.tx_bytes;
130 }
131
132 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
133
134 /* KCM is ready to receive messages on its queue-- either the KCM is new or
135  * has become unblocked after being blocked on full socket buffer. Queue any
136  * pending ready messages on a psock. RX mux lock held.
137  */
138 static void kcm_rcv_ready(struct kcm_sock *kcm)
139 {
140         struct kcm_mux *mux = kcm->mux;
141         struct kcm_psock *psock;
142         struct sk_buff *skb;
143
144         if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
145                 return;
146
147         while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
148                 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
149                         /* Assuming buffer limit has been reached */
150                         skb_queue_head(&mux->rx_hold_queue, skb);
151                         WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
152                         return;
153                 }
154         }
155
156         while (!list_empty(&mux->psocks_ready)) {
157                 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
158                                          psock_ready_list);
159
160                 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
161                         /* Assuming buffer limit has been reached */
162                         WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
163                         return;
164                 }
165
166                 /* Consumed the ready message on the psock. Schedule rx_work to
167                  * get more messages.
168                  */
169                 list_del(&psock->psock_ready_list);
170                 psock->ready_rx_msg = NULL;
171
172                 /* Commit clearing of ready_rx_msg for queuing work */
173                 smp_mb();
174
175                 queue_work(kcm_wq, &psock->rx_work);
176         }
177
178         /* Buffer limit is okay now, add to ready list */
179         list_add_tail(&kcm->wait_rx_list,
180                       &kcm->mux->kcm_rx_waiters);
181         kcm->rx_wait = true;
182 }
183
184 static void kcm_rfree(struct sk_buff *skb)
185 {
186         struct sock *sk = skb->sk;
187         struct kcm_sock *kcm = kcm_sk(sk);
188         struct kcm_mux *mux = kcm->mux;
189         unsigned int len = skb->truesize;
190
191         sk_mem_uncharge(sk, len);
192         atomic_sub(len, &sk->sk_rmem_alloc);
193
194         /* For reading rx_wait and rx_psock without holding lock */
195         smp_mb__after_atomic();
196
197         if (!kcm->rx_wait && !kcm->rx_psock &&
198             sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
199                 spin_lock_bh(&mux->rx_lock);
200                 kcm_rcv_ready(kcm);
201                 spin_unlock_bh(&mux->rx_lock);
202         }
203 }
204
205 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
206 {
207         struct sk_buff_head *list = &sk->sk_receive_queue;
208
209         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
210                 return -ENOMEM;
211
212         if (!sk_rmem_schedule(sk, skb, skb->truesize))
213                 return -ENOBUFS;
214
215         skb->dev = NULL;
216
217         skb_orphan(skb);
218         skb->sk = sk;
219         skb->destructor = kcm_rfree;
220         atomic_add(skb->truesize, &sk->sk_rmem_alloc);
221         sk_mem_charge(sk, skb->truesize);
222
223         skb_queue_tail(list, skb);
224
225         if (!sock_flag(sk, SOCK_DEAD))
226                 sk->sk_data_ready(sk);
227
228         return 0;
229 }
230
231 /* Requeue received messages for a kcm socket to other kcm sockets. This is
232  * called with a kcm socket is receive disabled.
233  * RX mux lock held.
234  */
235 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
236 {
237         struct sk_buff *skb;
238         struct kcm_sock *kcm;
239
240         while ((skb = __skb_dequeue(head))) {
241                 /* Reset destructor to avoid calling kcm_rcv_ready */
242                 skb->destructor = sock_rfree;
243                 skb_orphan(skb);
244 try_again:
245                 if (list_empty(&mux->kcm_rx_waiters)) {
246                         skb_queue_tail(&mux->rx_hold_queue, skb);
247                         continue;
248                 }
249
250                 kcm = list_first_entry(&mux->kcm_rx_waiters,
251                                        struct kcm_sock, wait_rx_list);
252
253                 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
254                         /* Should mean socket buffer full */
255                         list_del(&kcm->wait_rx_list);
256                         kcm->rx_wait = false;
257
258                         /* Commit rx_wait to read in kcm_free */
259                         smp_wmb();
260
261                         goto try_again;
262                 }
263         }
264 }
265
266 /* Lower sock lock held */
267 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
268                                        struct sk_buff *head)
269 {
270         struct kcm_mux *mux = psock->mux;
271         struct kcm_sock *kcm;
272
273         WARN_ON(psock->ready_rx_msg);
274
275         if (psock->rx_kcm)
276                 return psock->rx_kcm;
277
278         spin_lock_bh(&mux->rx_lock);
279
280         if (psock->rx_kcm) {
281                 spin_unlock_bh(&mux->rx_lock);
282                 return psock->rx_kcm;
283         }
284
285         kcm_update_rx_mux_stats(mux, psock);
286
287         if (list_empty(&mux->kcm_rx_waiters)) {
288                 psock->ready_rx_msg = head;
289                 list_add_tail(&psock->psock_ready_list,
290                               &mux->psocks_ready);
291                 spin_unlock_bh(&mux->rx_lock);
292                 return NULL;
293         }
294
295         kcm = list_first_entry(&mux->kcm_rx_waiters,
296                                struct kcm_sock, wait_rx_list);
297         list_del(&kcm->wait_rx_list);
298         kcm->rx_wait = false;
299
300         psock->rx_kcm = kcm;
301         kcm->rx_psock = psock;
302
303         spin_unlock_bh(&mux->rx_lock);
304
305         return kcm;
306 }
307
308 static void kcm_done(struct kcm_sock *kcm);
309
310 static void kcm_done_work(struct work_struct *w)
311 {
312         kcm_done(container_of(w, struct kcm_sock, done_work));
313 }
314
315 /* Lower sock held */
316 static void unreserve_rx_kcm(struct kcm_psock *psock,
317                              bool rcv_ready)
318 {
319         struct kcm_sock *kcm = psock->rx_kcm;
320         struct kcm_mux *mux = psock->mux;
321
322         if (!kcm)
323                 return;
324
325         spin_lock_bh(&mux->rx_lock);
326
327         psock->rx_kcm = NULL;
328         kcm->rx_psock = NULL;
329
330         /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
331          * kcm_rfree
332          */
333         smp_mb();
334
335         if (unlikely(kcm->done)) {
336                 spin_unlock_bh(&mux->rx_lock);
337
338                 /* Need to run kcm_done in a task since we need to qcquire
339                  * callback locks which may already be held here.
340                  */
341                 INIT_WORK(&kcm->done_work, kcm_done_work);
342                 schedule_work(&kcm->done_work);
343                 return;
344         }
345
346         if (unlikely(kcm->rx_disabled)) {
347                 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
348         } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
349                 /* Check for degenerative race with rx_wait that all
350                  * data was dequeued (accounted for in kcm_rfree).
351                  */
352                 kcm_rcv_ready(kcm);
353         }
354         spin_unlock_bh(&mux->rx_lock);
355 }
356
357 static void kcm_start_rx_timer(struct kcm_psock *psock)
358 {
359         if (psock->sk->sk_rcvtimeo)
360                 mod_timer(&psock->rx_msg_timer, psock->sk->sk_rcvtimeo);
361 }
362
363 /* Macro to invoke filter function. */
364 #define KCM_RUN_FILTER(prog, ctx) \
365         (*prog->bpf_func)(ctx, prog->insnsi)
366
367 /* Lower socket lock held */
368 static int kcm_tcp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
369                         unsigned int orig_offset, size_t orig_len)
370 {
371         struct kcm_psock *psock = (struct kcm_psock *)desc->arg.data;
372         struct kcm_rx_msg *rxm;
373         struct kcm_sock *kcm;
374         struct sk_buff *head, *skb;
375         size_t eaten = 0, cand_len;
376         ssize_t extra;
377         int err;
378         bool cloned_orig = false;
379
380         if (psock->ready_rx_msg)
381                 return 0;
382
383         head = psock->rx_skb_head;
384         if (head) {
385                 /* Message already in progress */
386
387                 rxm = kcm_rx_msg(head);
388                 if (unlikely(rxm->early_eaten)) {
389                         /* Already some number of bytes on the receive sock
390                          * data saved in rx_skb_head, just indicate they
391                          * are consumed.
392                          */
393                         eaten = orig_len <= rxm->early_eaten ?
394                                 orig_len : rxm->early_eaten;
395                         rxm->early_eaten -= eaten;
396
397                         return eaten;
398                 }
399
400                 if (unlikely(orig_offset)) {
401                         /* Getting data with a non-zero offset when a message is
402                          * in progress is not expected. If it does happen, we
403                          * need to clone and pull since we can't deal with
404                          * offsets in the skbs for a message expect in the head.
405                          */
406                         orig_skb = skb_clone(orig_skb, GFP_ATOMIC);
407                         if (!orig_skb) {
408                                 KCM_STATS_INCR(psock->stats.rx_mem_fail);
409                                 desc->error = -ENOMEM;
410                                 return 0;
411                         }
412                         if (!pskb_pull(orig_skb, orig_offset)) {
413                                 KCM_STATS_INCR(psock->stats.rx_mem_fail);
414                                 kfree_skb(orig_skb);
415                                 desc->error = -ENOMEM;
416                                 return 0;
417                         }
418                         cloned_orig = true;
419                         orig_offset = 0;
420                 }
421
422                 if (!psock->rx_skb_nextp) {
423                         /* We are going to append to the frags_list of head.
424                          * Need to unshare the frag_list.
425                          */
426                         err = skb_unclone(head, GFP_ATOMIC);
427                         if (err) {
428                                 KCM_STATS_INCR(psock->stats.rx_mem_fail);
429                                 desc->error = err;
430                                 return 0;
431                         }
432
433                         if (unlikely(skb_shinfo(head)->frag_list)) {
434                                 /* We can't append to an sk_buff that already
435                                  * has a frag_list. We create a new head, point
436                                  * the frag_list of that to the old head, and
437                                  * then are able to use the old head->next for
438                                  * appending to the message.
439                                  */
440                                 if (WARN_ON(head->next)) {
441                                         desc->error = -EINVAL;
442                                         return 0;
443                                 }
444
445                                 skb = alloc_skb(0, GFP_ATOMIC);
446                                 if (!skb) {
447                                         KCM_STATS_INCR(psock->stats.rx_mem_fail);
448                                         desc->error = -ENOMEM;
449                                         return 0;
450                                 }
451                                 skb->len = head->len;
452                                 skb->data_len = head->len;
453                                 skb->truesize = head->truesize;
454                                 *kcm_rx_msg(skb) = *kcm_rx_msg(head);
455                                 psock->rx_skb_nextp = &head->next;
456                                 skb_shinfo(skb)->frag_list = head;
457                                 psock->rx_skb_head = skb;
458                                 head = skb;
459                         } else {
460                                 psock->rx_skb_nextp =
461                                     &skb_shinfo(head)->frag_list;
462                         }
463                 }
464         }
465
466         while (eaten < orig_len) {
467                 /* Always clone since we will consume something */
468                 skb = skb_clone(orig_skb, GFP_ATOMIC);
469                 if (!skb) {
470                         KCM_STATS_INCR(psock->stats.rx_mem_fail);
471                         desc->error = -ENOMEM;
472                         break;
473                 }
474
475                 cand_len = orig_len - eaten;
476
477                 head = psock->rx_skb_head;
478                 if (!head) {
479                         head = skb;
480                         psock->rx_skb_head = head;
481                         /* Will set rx_skb_nextp on next packet if needed */
482                         psock->rx_skb_nextp = NULL;
483                         rxm = kcm_rx_msg(head);
484                         memset(rxm, 0, sizeof(*rxm));
485                         rxm->offset = orig_offset + eaten;
486                 } else {
487                         /* Unclone since we may be appending to an skb that we
488                          * already share a frag_list with.
489                          */
490                         err = skb_unclone(skb, GFP_ATOMIC);
491                         if (err) {
492                                 KCM_STATS_INCR(psock->stats.rx_mem_fail);
493                                 desc->error = err;
494                                 break;
495                         }
496
497                         rxm = kcm_rx_msg(head);
498                         *psock->rx_skb_nextp = skb;
499                         psock->rx_skb_nextp = &skb->next;
500                         head->data_len += skb->len;
501                         head->len += skb->len;
502                         head->truesize += skb->truesize;
503                 }
504
505                 if (!rxm->full_len) {
506                         ssize_t len;
507
508                         len = KCM_RUN_FILTER(psock->bpf_prog, head);
509
510                         if (!len) {
511                                 /* Need more header to determine length */
512                                 if (!rxm->accum_len) {
513                                         /* Start RX timer for new message */
514                                         kcm_start_rx_timer(psock);
515                                 }
516                                 rxm->accum_len += cand_len;
517                                 eaten += cand_len;
518                                 KCM_STATS_INCR(psock->stats.rx_need_more_hdr);
519                                 WARN_ON(eaten != orig_len);
520                                 break;
521                         } else if (len > psock->sk->sk_rcvbuf) {
522                                 /* Message length exceeds maximum allowed */
523                                 KCM_STATS_INCR(psock->stats.rx_msg_too_big);
524                                 desc->error = -EMSGSIZE;
525                                 psock->rx_skb_head = NULL;
526                                 kcm_abort_rx_psock(psock, EMSGSIZE, head);
527                                 break;
528                         } else if (len <= (ssize_t)head->len -
529                                           skb->len - rxm->offset) {
530                                 /* Length must be into new skb (and also
531                                  * greater than zero)
532                                  */
533                                 KCM_STATS_INCR(psock->stats.rx_bad_hdr_len);
534                                 desc->error = -EPROTO;
535                                 psock->rx_skb_head = NULL;
536                                 kcm_abort_rx_psock(psock, EPROTO, head);
537                                 break;
538                         }
539
540                         rxm->full_len = len;
541                 }
542
543                 extra = (ssize_t)(rxm->accum_len + cand_len) - rxm->full_len;
544
545                 if (extra < 0) {
546                         /* Message not complete yet. */
547                         if (rxm->full_len - rxm->accum_len >
548                             tcp_inq(psock->sk)) {
549                                 /* Don't have the whole messages in the socket
550                                  * buffer. Set psock->rx_need_bytes to wait for
551                                  * the rest of the message. Also, set "early
552                                  * eaten" since we've already buffered the skb
553                                  * but don't consume yet per tcp_read_sock.
554                                  */
555
556                                 if (!rxm->accum_len) {
557                                         /* Start RX timer for new message */
558                                         kcm_start_rx_timer(psock);
559                                 }
560
561                                 psock->rx_need_bytes = rxm->full_len -
562                                                        rxm->accum_len;
563                                 rxm->accum_len += cand_len;
564                                 rxm->early_eaten = cand_len;
565                                 KCM_STATS_ADD(psock->stats.rx_bytes, cand_len);
566                                 desc->count = 0; /* Stop reading socket */
567                                 break;
568                         }
569                         rxm->accum_len += cand_len;
570                         eaten += cand_len;
571                         WARN_ON(eaten != orig_len);
572                         break;
573                 }
574
575                 /* Positive extra indicates ore bytes than needed for the
576                  * message
577                  */
578
579                 WARN_ON(extra > cand_len);
580
581                 eaten += (cand_len - extra);
582
583                 /* Hurray, we have a new message! */
584                 del_timer(&psock->rx_msg_timer);
585                 psock->rx_skb_head = NULL;
586                 KCM_STATS_INCR(psock->stats.rx_msgs);
587
588 try_queue:
589                 kcm = reserve_rx_kcm(psock, head);
590                 if (!kcm) {
591                         /* Unable to reserve a KCM, message is held in psock. */
592                         break;
593                 }
594
595                 if (kcm_queue_rcv_skb(&kcm->sk, head)) {
596                         /* Should mean socket buffer full */
597                         unreserve_rx_kcm(psock, false);
598                         goto try_queue;
599                 }
600         }
601
602         if (cloned_orig)
603                 kfree_skb(orig_skb);
604
605         KCM_STATS_ADD(psock->stats.rx_bytes, eaten);
606
607         return eaten;
608 }
609
610 /* Called with lock held on lower socket */
611 static int psock_tcp_read_sock(struct kcm_psock *psock)
612 {
613         read_descriptor_t desc;
614
615         desc.arg.data = psock;
616         desc.error = 0;
617         desc.count = 1; /* give more than one skb per call */
618
619         /* sk should be locked here, so okay to do tcp_read_sock */
620         tcp_read_sock(psock->sk, &desc, kcm_tcp_recv);
621
622         unreserve_rx_kcm(psock, true);
623
624         return desc.error;
625 }
626
627 /* Lower sock lock held */
628 static void psock_tcp_data_ready(struct sock *sk)
629 {
630         struct kcm_psock *psock;
631
632         read_lock_bh(&sk->sk_callback_lock);
633
634         psock = (struct kcm_psock *)sk->sk_user_data;
635         if (unlikely(!psock || psock->rx_stopped))
636                 goto out;
637
638         if (psock->ready_rx_msg)
639                 goto out;
640
641         if (psock->rx_need_bytes) {
642                 if (tcp_inq(sk) >= psock->rx_need_bytes)
643                         psock->rx_need_bytes = 0;
644                 else
645                         goto out;
646         }
647
648         if (psock_tcp_read_sock(psock) == -ENOMEM)
649                 queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
650
651 out:
652         read_unlock_bh(&sk->sk_callback_lock);
653 }
654
655 static void do_psock_rx_work(struct kcm_psock *psock)
656 {
657         read_descriptor_t rd_desc;
658         struct sock *csk = psock->sk;
659
660         /* We need the read lock to synchronize with psock_tcp_data_ready. We
661          * need the socket lock for calling tcp_read_sock.
662          */
663         lock_sock(csk);
664         read_lock_bh(&csk->sk_callback_lock);
665
666         if (unlikely(csk->sk_user_data != psock))
667                 goto out;
668
669         if (unlikely(psock->rx_stopped))
670                 goto out;
671
672         if (psock->ready_rx_msg)
673                 goto out;
674
675         rd_desc.arg.data = psock;
676
677         if (psock_tcp_read_sock(psock) == -ENOMEM)
678                 queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
679
680 out:
681         read_unlock_bh(&csk->sk_callback_lock);
682         release_sock(csk);
683 }
684
685 static void psock_rx_work(struct work_struct *w)
686 {
687         do_psock_rx_work(container_of(w, struct kcm_psock, rx_work));
688 }
689
690 static void psock_rx_delayed_work(struct work_struct *w)
691 {
692         do_psock_rx_work(container_of(w, struct kcm_psock,
693                                       rx_delayed_work.work));
694 }
695
696 static void psock_tcp_state_change(struct sock *sk)
697 {
698         /* TCP only does a POLLIN for a half close. Do a POLLHUP here
699          * since application will normally not poll with POLLIN
700          * on the TCP sockets.
701          */
702
703         report_csk_error(sk, EPIPE);
704 }
705
706 static void psock_tcp_write_space(struct sock *sk)
707 {
708         struct kcm_psock *psock;
709         struct kcm_mux *mux;
710         struct kcm_sock *kcm;
711
712         read_lock_bh(&sk->sk_callback_lock);
713
714         psock = (struct kcm_psock *)sk->sk_user_data;
715         if (unlikely(!psock))
716                 goto out;
717
718         mux = psock->mux;
719
720         spin_lock_bh(&mux->lock);
721
722         /* Check if the socket is reserved so someone is waiting for sending. */
723         kcm = psock->tx_kcm;
724         if (kcm)
725                 queue_work(kcm_wq, &kcm->tx_work);
726
727         spin_unlock_bh(&mux->lock);
728 out:
729         read_unlock_bh(&sk->sk_callback_lock);
730 }
731
732 static void unreserve_psock(struct kcm_sock *kcm);
733
734 /* kcm sock is locked. */
735 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
736 {
737         struct kcm_mux *mux = kcm->mux;
738         struct kcm_psock *psock;
739
740         psock = kcm->tx_psock;
741
742         smp_rmb(); /* Must read tx_psock before tx_wait */
743
744         if (psock) {
745                 WARN_ON(kcm->tx_wait);
746                 if (unlikely(psock->tx_stopped))
747                         unreserve_psock(kcm);
748                 else
749                         return kcm->tx_psock;
750         }
751
752         spin_lock_bh(&mux->lock);
753
754         /* Check again under lock to see if psock was reserved for this
755          * psock via psock_unreserve.
756          */
757         psock = kcm->tx_psock;
758         if (unlikely(psock)) {
759                 WARN_ON(kcm->tx_wait);
760                 spin_unlock_bh(&mux->lock);
761                 return kcm->tx_psock;
762         }
763
764         if (!list_empty(&mux->psocks_avail)) {
765                 psock = list_first_entry(&mux->psocks_avail,
766                                          struct kcm_psock,
767                                          psock_avail_list);
768                 list_del(&psock->psock_avail_list);
769                 if (kcm->tx_wait) {
770                         list_del(&kcm->wait_psock_list);
771                         kcm->tx_wait = false;
772                 }
773                 kcm->tx_psock = psock;
774                 psock->tx_kcm = kcm;
775                 KCM_STATS_INCR(psock->stats.reserved);
776         } else if (!kcm->tx_wait) {
777                 list_add_tail(&kcm->wait_psock_list,
778                               &mux->kcm_tx_waiters);
779                 kcm->tx_wait = true;
780         }
781
782         spin_unlock_bh(&mux->lock);
783
784         return psock;
785 }
786
787 /* mux lock held */
788 static void psock_now_avail(struct kcm_psock *psock)
789 {
790         struct kcm_mux *mux = psock->mux;
791         struct kcm_sock *kcm;
792
793         if (list_empty(&mux->kcm_tx_waiters)) {
794                 list_add_tail(&psock->psock_avail_list,
795                               &mux->psocks_avail);
796         } else {
797                 kcm = list_first_entry(&mux->kcm_tx_waiters,
798                                        struct kcm_sock,
799                                        wait_psock_list);
800                 list_del(&kcm->wait_psock_list);
801                 kcm->tx_wait = false;
802                 psock->tx_kcm = kcm;
803
804                 /* Commit before changing tx_psock since that is read in
805                  * reserve_psock before queuing work.
806                  */
807                 smp_mb();
808
809                 kcm->tx_psock = psock;
810                 KCM_STATS_INCR(psock->stats.reserved);
811                 queue_work(kcm_wq, &kcm->tx_work);
812         }
813 }
814
815 /* kcm sock is locked. */
816 static void unreserve_psock(struct kcm_sock *kcm)
817 {
818         struct kcm_psock *psock;
819         struct kcm_mux *mux = kcm->mux;
820
821         spin_lock_bh(&mux->lock);
822
823         psock = kcm->tx_psock;
824
825         if (WARN_ON(!psock)) {
826                 spin_unlock_bh(&mux->lock);
827                 return;
828         }
829
830         smp_rmb(); /* Read tx_psock before tx_wait */
831
832         kcm_update_tx_mux_stats(mux, psock);
833
834         WARN_ON(kcm->tx_wait);
835
836         kcm->tx_psock = NULL;
837         psock->tx_kcm = NULL;
838         KCM_STATS_INCR(psock->stats.unreserved);
839
840         if (unlikely(psock->tx_stopped)) {
841                 if (psock->done) {
842                         /* Deferred free */
843                         list_del(&psock->psock_list);
844                         mux->psocks_cnt--;
845                         sock_put(psock->sk);
846                         fput(psock->sk->sk_socket->file);
847                         kmem_cache_free(kcm_psockp, psock);
848                 }
849
850                 /* Don't put back on available list */
851
852                 spin_unlock_bh(&mux->lock);
853
854                 return;
855         }
856
857         psock_now_avail(psock);
858
859         spin_unlock_bh(&mux->lock);
860 }
861
862 static void kcm_report_tx_retry(struct kcm_sock *kcm)
863 {
864         struct kcm_mux *mux = kcm->mux;
865
866         spin_lock_bh(&mux->lock);
867         KCM_STATS_INCR(mux->stats.tx_retries);
868         spin_unlock_bh(&mux->lock);
869 }
870
871 /* Write any messages ready on the kcm socket.  Called with kcm sock lock
872  * held.  Return bytes actually sent or error.
873  */
874 static int kcm_write_msgs(struct kcm_sock *kcm)
875 {
876         struct sock *sk = &kcm->sk;
877         struct kcm_psock *psock;
878         struct sk_buff *skb, *head;
879         struct kcm_tx_msg *txm;
880         unsigned short fragidx, frag_offset;
881         unsigned int sent, total_sent = 0;
882         int ret = 0;
883
884         kcm->tx_wait_more = false;
885         psock = kcm->tx_psock;
886         if (unlikely(psock && psock->tx_stopped)) {
887                 /* A reserved psock was aborted asynchronously. Unreserve
888                  * it and we'll retry the message.
889                  */
890                 unreserve_psock(kcm);
891                 kcm_report_tx_retry(kcm);
892                 if (skb_queue_empty(&sk->sk_write_queue))
893                         return 0;
894
895                 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
896
897         } else if (skb_queue_empty(&sk->sk_write_queue)) {
898                 return 0;
899         }
900
901         head = skb_peek(&sk->sk_write_queue);
902         txm = kcm_tx_msg(head);
903
904         if (txm->sent) {
905                 /* Send of first skbuff in queue already in progress */
906                 if (WARN_ON(!psock)) {
907                         ret = -EINVAL;
908                         goto out;
909                 }
910                 sent = txm->sent;
911                 frag_offset = txm->frag_offset;
912                 fragidx = txm->fragidx;
913                 skb = txm->frag_skb;
914
915                 goto do_frag;
916         }
917
918 try_again:
919         psock = reserve_psock(kcm);
920         if (!psock)
921                 goto out;
922
923         do {
924                 skb = head;
925                 txm = kcm_tx_msg(head);
926                 sent = 0;
927
928 do_frag_list:
929                 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
930                         ret = -EINVAL;
931                         goto out;
932                 }
933
934                 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
935                      fragidx++) {
936                         skb_frag_t *frag;
937
938                         frag_offset = 0;
939 do_frag:
940                         frag = &skb_shinfo(skb)->frags[fragidx];
941                         if (WARN_ON(!frag->size)) {
942                                 ret = -EINVAL;
943                                 goto out;
944                         }
945
946                         ret = kernel_sendpage(psock->sk->sk_socket,
947                                               frag->page.p,
948                                               frag->page_offset + frag_offset,
949                                               frag->size - frag_offset,
950                                               MSG_DONTWAIT);
951                         if (ret <= 0) {
952                                 if (ret == -EAGAIN) {
953                                         /* Save state to try again when there's
954                                          * write space on the socket
955                                          */
956                                         txm->sent = sent;
957                                         txm->frag_offset = frag_offset;
958                                         txm->fragidx = fragidx;
959                                         txm->frag_skb = skb;
960
961                                         ret = 0;
962                                         goto out;
963                                 }
964
965                                 /* Hard failure in sending message, abort this
966                                  * psock since it has lost framing
967                                  * synchonization and retry sending the
968                                  * message from the beginning.
969                                  */
970                                 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
971                                                    true);
972                                 unreserve_psock(kcm);
973
974                                 txm->sent = 0;
975                                 kcm_report_tx_retry(kcm);
976                                 ret = 0;
977
978                                 goto try_again;
979                         }
980
981                         sent += ret;
982                         frag_offset += ret;
983                         KCM_STATS_ADD(psock->stats.tx_bytes, ret);
984                         if (frag_offset < frag->size) {
985                                 /* Not finished with this frag */
986                                 goto do_frag;
987                         }
988                 }
989
990                 if (skb == head) {
991                         if (skb_has_frag_list(skb)) {
992                                 skb = skb_shinfo(skb)->frag_list;
993                                 goto do_frag_list;
994                         }
995                 } else if (skb->next) {
996                         skb = skb->next;
997                         goto do_frag_list;
998                 }
999
1000                 /* Successfully sent the whole packet, account for it. */
1001                 skb_dequeue(&sk->sk_write_queue);
1002                 kfree_skb(head);
1003                 sk->sk_wmem_queued -= sent;
1004                 total_sent += sent;
1005                 KCM_STATS_INCR(psock->stats.tx_msgs);
1006         } while ((head = skb_peek(&sk->sk_write_queue)));
1007 out:
1008         if (!head) {
1009                 /* Done with all queued messages. */
1010                 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
1011                 unreserve_psock(kcm);
1012         }
1013
1014         /* Check if write space is available */
1015         sk->sk_write_space(sk);
1016
1017         return total_sent ? : ret;
1018 }
1019
1020 static void kcm_tx_work(struct work_struct *w)
1021 {
1022         struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
1023         struct sock *sk = &kcm->sk;
1024         int err;
1025
1026         lock_sock(sk);
1027
1028         /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
1029          * aborts
1030          */
1031         err = kcm_write_msgs(kcm);
1032         if (err < 0) {
1033                 /* Hard failure in write, report error on KCM socket */
1034                 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
1035                 report_csk_error(&kcm->sk, -err);
1036                 goto out;
1037         }
1038
1039         /* Primarily for SOCK_SEQPACKET sockets */
1040         if (likely(sk->sk_socket) &&
1041             test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1042                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1043                 sk->sk_write_space(sk);
1044         }
1045
1046 out:
1047         release_sock(sk);
1048 }
1049
1050 static void kcm_push(struct kcm_sock *kcm)
1051 {
1052         if (kcm->tx_wait_more)
1053                 kcm_write_msgs(kcm);
1054 }
1055
1056 static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
1057                             int offset, size_t size, int flags)
1058
1059 {
1060         struct sock *sk = sock->sk;
1061         struct kcm_sock *kcm = kcm_sk(sk);
1062         struct sk_buff *skb = NULL, *head = NULL;
1063         long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1064         bool eor;
1065         int err = 0;
1066         int i;
1067
1068         if (flags & MSG_SENDPAGE_NOTLAST)
1069                 flags |= MSG_MORE;
1070
1071         /* No MSG_EOR from splice, only look at MSG_MORE */
1072         eor = !(flags & MSG_MORE);
1073
1074         lock_sock(sk);
1075
1076         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1077
1078         err = -EPIPE;
1079         if (sk->sk_err)
1080                 goto out_error;
1081
1082         if (kcm->seq_skb) {
1083                 /* Previously opened message */
1084                 head = kcm->seq_skb;
1085                 skb = kcm_tx_msg(head)->last_skb;
1086                 i = skb_shinfo(skb)->nr_frags;
1087
1088                 if (skb_can_coalesce(skb, i, page, offset)) {
1089                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
1090                         skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1091                         goto coalesced;
1092                 }
1093
1094                 if (i >= MAX_SKB_FRAGS) {
1095                         struct sk_buff *tskb;
1096
1097                         tskb = alloc_skb(0, sk->sk_allocation);
1098                         while (!tskb) {
1099                                 kcm_push(kcm);
1100                                 err = sk_stream_wait_memory(sk, &timeo);
1101                                 if (err)
1102                                         goto out_error;
1103                         }
1104
1105                         if (head == skb)
1106                                 skb_shinfo(head)->frag_list = tskb;
1107                         else
1108                                 skb->next = tskb;
1109
1110                         skb = tskb;
1111                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1112                         i = 0;
1113                 }
1114         } else {
1115                 /* Call the sk_stream functions to manage the sndbuf mem. */
1116                 if (!sk_stream_memory_free(sk)) {
1117                         kcm_push(kcm);
1118                         set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1119                         err = sk_stream_wait_memory(sk, &timeo);
1120                         if (err)
1121                                 goto out_error;
1122                 }
1123
1124                 head = alloc_skb(0, sk->sk_allocation);
1125                 while (!head) {
1126                         kcm_push(kcm);
1127                         err = sk_stream_wait_memory(sk, &timeo);
1128                         if (err)
1129                                 goto out_error;
1130                 }
1131
1132                 skb = head;
1133                 i = 0;
1134         }
1135
1136         get_page(page);
1137         skb_fill_page_desc(skb, i, page, offset, size);
1138         skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1139
1140 coalesced:
1141         skb->len += size;
1142         skb->data_len += size;
1143         skb->truesize += size;
1144         sk->sk_wmem_queued += size;
1145         sk_mem_charge(sk, size);
1146
1147         if (head != skb) {
1148                 head->len += size;
1149                 head->data_len += size;
1150                 head->truesize += size;
1151         }
1152
1153         if (eor) {
1154                 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1155
1156                 /* Message complete, queue it on send buffer */
1157                 __skb_queue_tail(&sk->sk_write_queue, head);
1158                 kcm->seq_skb = NULL;
1159                 KCM_STATS_INCR(kcm->stats.tx_msgs);
1160
1161                 if (flags & MSG_BATCH) {
1162                         kcm->tx_wait_more = true;
1163                 } else if (kcm->tx_wait_more || not_busy) {
1164                         err = kcm_write_msgs(kcm);
1165                         if (err < 0) {
1166                                 /* We got a hard error in write_msgs but have
1167                                  * already queued this message. Report an error
1168                                  * in the socket, but don't affect return value
1169                                  * from sendmsg
1170                                  */
1171                                 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1172                                 report_csk_error(&kcm->sk, -err);
1173                         }
1174                 }
1175         } else {
1176                 /* Message not complete, save state */
1177                 kcm->seq_skb = head;
1178                 kcm_tx_msg(head)->last_skb = skb;
1179         }
1180
1181         KCM_STATS_ADD(kcm->stats.tx_bytes, size);
1182
1183         release_sock(sk);
1184         return size;
1185
1186 out_error:
1187         kcm_push(kcm);
1188
1189         err = sk_stream_error(sk, flags, err);
1190
1191         /* make sure we wake any epoll edge trigger waiter */
1192         if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1193                 sk->sk_write_space(sk);
1194
1195         release_sock(sk);
1196         return err;
1197 }
1198
1199 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1200 {
1201         struct sock *sk = sock->sk;
1202         struct kcm_sock *kcm = kcm_sk(sk);
1203         struct sk_buff *skb = NULL, *head = NULL;
1204         size_t copy, copied = 0;
1205         long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1206         int eor = (sock->type == SOCK_DGRAM) ?
1207                   !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
1208         int err = -EPIPE;
1209
1210         lock_sock(sk);
1211
1212         /* Per tcp_sendmsg this should be in poll */
1213         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1214
1215         if (sk->sk_err)
1216                 goto out_error;
1217
1218         if (kcm->seq_skb) {
1219                 /* Previously opened message */
1220                 head = kcm->seq_skb;
1221                 skb = kcm_tx_msg(head)->last_skb;
1222                 goto start;
1223         }
1224
1225         /* Call the sk_stream functions to manage the sndbuf mem. */
1226         if (!sk_stream_memory_free(sk)) {
1227                 kcm_push(kcm);
1228                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1229                 err = sk_stream_wait_memory(sk, &timeo);
1230                 if (err)
1231                         goto out_error;
1232         }
1233
1234         /* New message, alloc head skb */
1235         head = alloc_skb(0, sk->sk_allocation);
1236         while (!head) {
1237                 kcm_push(kcm);
1238                 err = sk_stream_wait_memory(sk, &timeo);
1239                 if (err)
1240                         goto out_error;
1241
1242                 head = alloc_skb(0, sk->sk_allocation);
1243         }
1244
1245         skb = head;
1246
1247         /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
1248          * csum_and_copy_from_iter from skb_do_copy_data_nocache.
1249          */
1250         skb->ip_summed = CHECKSUM_UNNECESSARY;
1251
1252 start:
1253         while (msg_data_left(msg)) {
1254                 bool merge = true;
1255                 int i = skb_shinfo(skb)->nr_frags;
1256                 struct page_frag *pfrag = sk_page_frag(sk);
1257
1258                 if (!sk_page_frag_refill(sk, pfrag))
1259                         goto wait_for_memory;
1260
1261                 if (!skb_can_coalesce(skb, i, pfrag->page,
1262                                       pfrag->offset)) {
1263                         if (i == MAX_SKB_FRAGS) {
1264                                 struct sk_buff *tskb;
1265
1266                                 tskb = alloc_skb(0, sk->sk_allocation);
1267                                 if (!tskb)
1268                                         goto wait_for_memory;
1269
1270                                 if (head == skb)
1271                                         skb_shinfo(head)->frag_list = tskb;
1272                                 else
1273                                         skb->next = tskb;
1274
1275                                 skb = tskb;
1276                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1277                                 continue;
1278                         }
1279                         merge = false;
1280                 }
1281
1282                 copy = min_t(int, msg_data_left(msg),
1283                              pfrag->size - pfrag->offset);
1284
1285                 if (!sk_wmem_schedule(sk, copy))
1286                         goto wait_for_memory;
1287
1288                 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1289                                                pfrag->page,
1290                                                pfrag->offset,
1291                                                copy);
1292                 if (err)
1293                         goto out_error;
1294
1295                 /* Update the skb. */
1296                 if (merge) {
1297                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1298                 } else {
1299                         skb_fill_page_desc(skb, i, pfrag->page,
1300                                            pfrag->offset, copy);
1301                         get_page(pfrag->page);
1302                 }
1303
1304                 pfrag->offset += copy;
1305                 copied += copy;
1306                 if (head != skb) {
1307                         head->len += copy;
1308                         head->data_len += copy;
1309                 }
1310
1311                 continue;
1312
1313 wait_for_memory:
1314                 kcm_push(kcm);
1315                 err = sk_stream_wait_memory(sk, &timeo);
1316                 if (err)
1317                         goto out_error;
1318         }
1319
1320         if (eor) {
1321                 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1322
1323                 /* Message complete, queue it on send buffer */
1324                 __skb_queue_tail(&sk->sk_write_queue, head);
1325                 kcm->seq_skb = NULL;
1326                 KCM_STATS_INCR(kcm->stats.tx_msgs);
1327
1328                 if (msg->msg_flags & MSG_BATCH) {
1329                         kcm->tx_wait_more = true;
1330                 } else if (kcm->tx_wait_more || not_busy) {
1331                         err = kcm_write_msgs(kcm);
1332                         if (err < 0) {
1333                                 /* We got a hard error in write_msgs but have
1334                                  * already queued this message. Report an error
1335                                  * in the socket, but don't affect return value
1336                                  * from sendmsg
1337                                  */
1338                                 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1339                                 report_csk_error(&kcm->sk, -err);
1340                         }
1341                 }
1342         } else {
1343                 /* Message not complete, save state */
1344 partial_message:
1345                 kcm->seq_skb = head;
1346                 kcm_tx_msg(head)->last_skb = skb;
1347         }
1348
1349         KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1350
1351         release_sock(sk);
1352         return copied;
1353
1354 out_error:
1355         kcm_push(kcm);
1356
1357         if (copied && sock->type == SOCK_SEQPACKET) {
1358                 /* Wrote some bytes before encountering an
1359                  * error, return partial success.
1360                  */
1361                 goto partial_message;
1362         }
1363
1364         if (head != kcm->seq_skb)
1365                 kfree_skb(head);
1366
1367         err = sk_stream_error(sk, msg->msg_flags, err);
1368
1369         /* make sure we wake any epoll edge trigger waiter */
1370         if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1371                 sk->sk_write_space(sk);
1372
1373         release_sock(sk);
1374         return err;
1375 }
1376
1377 static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1378                                      long timeo, int *err)
1379 {
1380         struct sk_buff *skb;
1381
1382         while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1383                 if (sk->sk_err) {
1384                         *err = sock_error(sk);
1385                         return NULL;
1386                 }
1387
1388                 if (sock_flag(sk, SOCK_DONE))
1389                         return NULL;
1390
1391                 if ((flags & MSG_DONTWAIT) || !timeo) {
1392                         *err = -EAGAIN;
1393                         return NULL;
1394                 }
1395
1396                 sk_wait_data(sk, &timeo, NULL);
1397
1398                 /* Handle signals */
1399                 if (signal_pending(current)) {
1400                         *err = sock_intr_errno(timeo);
1401                         return NULL;
1402                 }
1403         }
1404
1405         return skb;
1406 }
1407
1408 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1409                        size_t len, int flags)
1410 {
1411         struct sock *sk = sock->sk;
1412         struct kcm_sock *kcm = kcm_sk(sk);
1413         int err = 0;
1414         long timeo;
1415         struct kcm_rx_msg *rxm;
1416         int copied = 0;
1417         struct sk_buff *skb;
1418
1419         timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1420
1421         lock_sock(sk);
1422
1423         skb = kcm_wait_data(sk, flags, timeo, &err);
1424         if (!skb)
1425                 goto out;
1426
1427         /* Okay, have a message on the receive queue */
1428
1429         rxm = kcm_rx_msg(skb);
1430
1431         if (len > rxm->full_len)
1432                 len = rxm->full_len;
1433
1434         err = skb_copy_datagram_msg(skb, rxm->offset, msg, len);
1435         if (err < 0)
1436                 goto out;
1437
1438         copied = len;
1439         if (likely(!(flags & MSG_PEEK))) {
1440                 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1441                 if (copied < rxm->full_len) {
1442                         if (sock->type == SOCK_DGRAM) {
1443                                 /* Truncated message */
1444                                 msg->msg_flags |= MSG_TRUNC;
1445                                 goto msg_finished;
1446                         }
1447                         rxm->offset += copied;
1448                         rxm->full_len -= copied;
1449                 } else {
1450 msg_finished:
1451                         /* Finished with message */
1452                         msg->msg_flags |= MSG_EOR;
1453                         KCM_STATS_INCR(kcm->stats.rx_msgs);
1454                         skb_unlink(skb, &sk->sk_receive_queue);
1455                         kfree_skb(skb);
1456                 }
1457         }
1458
1459 out:
1460         release_sock(sk);
1461
1462         return copied ? : err;
1463 }
1464
1465 static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1466                                struct pipe_inode_info *pipe, size_t len,
1467                                unsigned int flags)
1468 {
1469         struct sock *sk = sock->sk;
1470         struct kcm_sock *kcm = kcm_sk(sk);
1471         long timeo;
1472         struct kcm_rx_msg *rxm;
1473         int err = 0;
1474         ssize_t copied;
1475         struct sk_buff *skb;
1476
1477         /* Only support splice for SOCKSEQPACKET */
1478
1479         timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1480
1481         lock_sock(sk);
1482
1483         skb = kcm_wait_data(sk, flags, timeo, &err);
1484         if (!skb)
1485                 goto err_out;
1486
1487         /* Okay, have a message on the receive queue */
1488
1489         rxm = kcm_rx_msg(skb);
1490
1491         if (len > rxm->full_len)
1492                 len = rxm->full_len;
1493
1494         copied = skb_splice_bits(skb, sk, rxm->offset, pipe, len, flags);
1495         if (copied < 0) {
1496                 err = copied;
1497                 goto err_out;
1498         }
1499
1500         KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1501
1502         rxm->offset += copied;
1503         rxm->full_len -= copied;
1504
1505         /* We have no way to return MSG_EOR. If all the bytes have been
1506          * read we still leave the message in the receive socket buffer.
1507          * A subsequent recvmsg needs to be done to return MSG_EOR and
1508          * finish reading the message.
1509          */
1510
1511         release_sock(sk);
1512
1513         return copied;
1514
1515 err_out:
1516         release_sock(sk);
1517
1518         return err;
1519 }
1520
1521 /* kcm sock lock held */
1522 static void kcm_recv_disable(struct kcm_sock *kcm)
1523 {
1524         struct kcm_mux *mux = kcm->mux;
1525
1526         if (kcm->rx_disabled)
1527                 return;
1528
1529         spin_lock_bh(&mux->rx_lock);
1530
1531         kcm->rx_disabled = 1;
1532
1533         /* If a psock is reserved we'll do cleanup in unreserve */
1534         if (!kcm->rx_psock) {
1535                 if (kcm->rx_wait) {
1536                         list_del(&kcm->wait_rx_list);
1537                         kcm->rx_wait = false;
1538                 }
1539
1540                 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1541         }
1542
1543         spin_unlock_bh(&mux->rx_lock);
1544 }
1545
1546 /* kcm sock lock held */
1547 static void kcm_recv_enable(struct kcm_sock *kcm)
1548 {
1549         struct kcm_mux *mux = kcm->mux;
1550
1551         if (!kcm->rx_disabled)
1552                 return;
1553
1554         spin_lock_bh(&mux->rx_lock);
1555
1556         kcm->rx_disabled = 0;
1557         kcm_rcv_ready(kcm);
1558
1559         spin_unlock_bh(&mux->rx_lock);
1560 }
1561
1562 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1563                           char __user *optval, unsigned int optlen)
1564 {
1565         struct kcm_sock *kcm = kcm_sk(sock->sk);
1566         int val, valbool;
1567         int err = 0;
1568
1569         if (level != SOL_KCM)
1570                 return -ENOPROTOOPT;
1571
1572         if (optlen < sizeof(int))
1573                 return -EINVAL;
1574
1575         if (get_user(val, (int __user *)optval))
1576                 return -EINVAL;
1577
1578         valbool = val ? 1 : 0;
1579
1580         switch (optname) {
1581         case KCM_RECV_DISABLE:
1582                 lock_sock(&kcm->sk);
1583                 if (valbool)
1584                         kcm_recv_disable(kcm);
1585                 else
1586                         kcm_recv_enable(kcm);
1587                 release_sock(&kcm->sk);
1588                 break;
1589         default:
1590                 err = -ENOPROTOOPT;
1591         }
1592
1593         return err;
1594 }
1595
1596 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1597                           char __user *optval, int __user *optlen)
1598 {
1599         struct kcm_sock *kcm = kcm_sk(sock->sk);
1600         int val, len;
1601
1602         if (level != SOL_KCM)
1603                 return -ENOPROTOOPT;
1604
1605         if (get_user(len, optlen))
1606                 return -EFAULT;
1607
1608         len = min_t(unsigned int, len, sizeof(int));
1609         if (len < 0)
1610                 return -EINVAL;
1611
1612         switch (optname) {
1613         case KCM_RECV_DISABLE:
1614                 val = kcm->rx_disabled;
1615                 break;
1616         default:
1617                 return -ENOPROTOOPT;
1618         }
1619
1620         if (put_user(len, optlen))
1621                 return -EFAULT;
1622         if (copy_to_user(optval, &val, len))
1623                 return -EFAULT;
1624         return 0;
1625 }
1626
1627 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1628 {
1629         struct kcm_sock *tkcm;
1630         struct list_head *head;
1631         int index = 0;
1632
1633         /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1634          * we set sk_state, otherwise epoll_wait always returns right away with
1635          * POLLHUP
1636          */
1637         kcm->sk.sk_state = TCP_ESTABLISHED;
1638
1639         /* Add to mux's kcm sockets list */
1640         kcm->mux = mux;
1641         spin_lock_bh(&mux->lock);
1642
1643         head = &mux->kcm_socks;
1644         list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1645                 if (tkcm->index != index)
1646                         break;
1647                 head = &tkcm->kcm_sock_list;
1648                 index++;
1649         }
1650
1651         list_add(&kcm->kcm_sock_list, head);
1652         kcm->index = index;
1653
1654         mux->kcm_socks_cnt++;
1655         spin_unlock_bh(&mux->lock);
1656
1657         INIT_WORK(&kcm->tx_work, kcm_tx_work);
1658
1659         spin_lock_bh(&mux->rx_lock);
1660         kcm_rcv_ready(kcm);
1661         spin_unlock_bh(&mux->rx_lock);
1662 }
1663
1664 static void kcm_rx_msg_timeout(unsigned long arg)
1665 {
1666         struct kcm_psock *psock = (struct kcm_psock *)arg;
1667
1668         /* Message assembly timed out */
1669         KCM_STATS_INCR(psock->stats.rx_msg_timeouts);
1670         kcm_abort_rx_psock(psock, ETIMEDOUT, NULL);
1671 }
1672
1673 static int kcm_attach(struct socket *sock, struct socket *csock,
1674                       struct bpf_prog *prog)
1675 {
1676         struct kcm_sock *kcm = kcm_sk(sock->sk);
1677         struct kcm_mux *mux = kcm->mux;
1678         struct sock *csk;
1679         struct kcm_psock *psock = NULL, *tpsock;
1680         struct list_head *head;
1681         int index = 0;
1682
1683         if (csock->ops->family != PF_INET &&
1684             csock->ops->family != PF_INET6)
1685                 return -EINVAL;
1686
1687         csk = csock->sk;
1688         if (!csk)
1689                 return -EINVAL;
1690
1691         /* Only support TCP for now */
1692         if (csk->sk_protocol != IPPROTO_TCP)
1693                 return -EINVAL;
1694
1695         psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1696         if (!psock)
1697                 return -ENOMEM;
1698
1699         psock->mux = mux;
1700         psock->sk = csk;
1701         psock->bpf_prog = prog;
1702
1703         setup_timer(&psock->rx_msg_timer, kcm_rx_msg_timeout,
1704                     (unsigned long)psock);
1705
1706         INIT_WORK(&psock->rx_work, psock_rx_work);
1707         INIT_DELAYED_WORK(&psock->rx_delayed_work, psock_rx_delayed_work);
1708
1709         sock_hold(csk);
1710
1711         write_lock_bh(&csk->sk_callback_lock);
1712         psock->save_data_ready = csk->sk_data_ready;
1713         psock->save_write_space = csk->sk_write_space;
1714         psock->save_state_change = csk->sk_state_change;
1715         csk->sk_user_data = psock;
1716         csk->sk_data_ready = psock_tcp_data_ready;
1717         csk->sk_write_space = psock_tcp_write_space;
1718         csk->sk_state_change = psock_tcp_state_change;
1719         write_unlock_bh(&csk->sk_callback_lock);
1720
1721         /* Finished initialization, now add the psock to the MUX. */
1722         spin_lock_bh(&mux->lock);
1723         head = &mux->psocks;
1724         list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1725                 if (tpsock->index != index)
1726                         break;
1727                 head = &tpsock->psock_list;
1728                 index++;
1729         }
1730
1731         list_add(&psock->psock_list, head);
1732         psock->index = index;
1733
1734         KCM_STATS_INCR(mux->stats.psock_attach);
1735         mux->psocks_cnt++;
1736         psock_now_avail(psock);
1737         spin_unlock_bh(&mux->lock);
1738
1739         /* Schedule RX work in case there are already bytes queued */
1740         queue_work(kcm_wq, &psock->rx_work);
1741
1742         return 0;
1743 }
1744
1745 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1746 {
1747         struct socket *csock;
1748         struct bpf_prog *prog;
1749         int err;
1750
1751         csock = sockfd_lookup(info->fd, &err);
1752         if (!csock)
1753                 return -ENOENT;
1754
1755         prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1756         if (IS_ERR(prog)) {
1757                 err = PTR_ERR(prog);
1758                 goto out;
1759         }
1760
1761         err = kcm_attach(sock, csock, prog);
1762         if (err) {
1763                 bpf_prog_put(prog);
1764                 goto out;
1765         }
1766
1767         /* Keep reference on file also */
1768
1769         return 0;
1770 out:
1771         fput(csock->file);
1772         return err;
1773 }
1774
1775 static void kcm_unattach(struct kcm_psock *psock)
1776 {
1777         struct sock *csk = psock->sk;
1778         struct kcm_mux *mux = psock->mux;
1779
1780         /* Stop getting callbacks from TCP socket. After this there should
1781          * be no way to reserve a kcm for this psock.
1782          */
1783         write_lock_bh(&csk->sk_callback_lock);
1784         csk->sk_user_data = NULL;
1785         csk->sk_data_ready = psock->save_data_ready;
1786         csk->sk_write_space = psock->save_write_space;
1787         csk->sk_state_change = psock->save_state_change;
1788         psock->rx_stopped = 1;
1789
1790         if (WARN_ON(psock->rx_kcm)) {
1791                 write_unlock_bh(&csk->sk_callback_lock);
1792                 return;
1793         }
1794
1795         spin_lock_bh(&mux->rx_lock);
1796
1797         /* Stop receiver activities. After this point psock should not be
1798          * able to get onto ready list either through callbacks or work.
1799          */
1800         if (psock->ready_rx_msg) {
1801                 list_del(&psock->psock_ready_list);
1802                 kfree_skb(psock->ready_rx_msg);
1803                 psock->ready_rx_msg = NULL;
1804                 KCM_STATS_INCR(mux->stats.rx_ready_drops);
1805         }
1806
1807         spin_unlock_bh(&mux->rx_lock);
1808
1809         write_unlock_bh(&csk->sk_callback_lock);
1810
1811         del_timer_sync(&psock->rx_msg_timer);
1812         cancel_work_sync(&psock->rx_work);
1813         cancel_delayed_work_sync(&psock->rx_delayed_work);
1814
1815         bpf_prog_put(psock->bpf_prog);
1816
1817         kfree_skb(psock->rx_skb_head);
1818         psock->rx_skb_head = NULL;
1819
1820         spin_lock_bh(&mux->lock);
1821
1822         aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1823
1824         KCM_STATS_INCR(mux->stats.psock_unattach);
1825
1826         if (psock->tx_kcm) {
1827                 /* psock was reserved.  Just mark it finished and we will clean
1828                  * up in the kcm paths, we need kcm lock which can not be
1829                  * acquired here.
1830                  */
1831                 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1832                 spin_unlock_bh(&mux->lock);
1833
1834                 /* We are unattaching a socket that is reserved. Abort the
1835                  * socket since we may be out of sync in sending on it. We need
1836                  * to do this without the mux lock.
1837                  */
1838                 kcm_abort_tx_psock(psock, EPIPE, false);
1839
1840                 spin_lock_bh(&mux->lock);
1841                 if (!psock->tx_kcm) {
1842                         /* psock now unreserved in window mux was unlocked */
1843                         goto no_reserved;
1844                 }
1845                 psock->done = 1;
1846
1847                 /* Commit done before queuing work to process it */
1848                 smp_mb();
1849
1850                 /* Queue tx work to make sure psock->done is handled */
1851                 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1852                 spin_unlock_bh(&mux->lock);
1853         } else {
1854 no_reserved:
1855                 if (!psock->tx_stopped)
1856                         list_del(&psock->psock_avail_list);
1857                 list_del(&psock->psock_list);
1858                 mux->psocks_cnt--;
1859                 spin_unlock_bh(&mux->lock);
1860
1861                 sock_put(csk);
1862                 fput(csk->sk_socket->file);
1863                 kmem_cache_free(kcm_psockp, psock);
1864         }
1865 }
1866
1867 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1868 {
1869         struct kcm_sock *kcm = kcm_sk(sock->sk);
1870         struct kcm_mux *mux = kcm->mux;
1871         struct kcm_psock *psock;
1872         struct socket *csock;
1873         struct sock *csk;
1874         int err;
1875
1876         csock = sockfd_lookup(info->fd, &err);
1877         if (!csock)
1878                 return -ENOENT;
1879
1880         csk = csock->sk;
1881         if (!csk) {
1882                 err = -EINVAL;
1883                 goto out;
1884         }
1885
1886         err = -ENOENT;
1887
1888         spin_lock_bh(&mux->lock);
1889
1890         list_for_each_entry(psock, &mux->psocks, psock_list) {
1891                 if (psock->sk != csk)
1892                         continue;
1893
1894                 /* Found the matching psock */
1895
1896                 if (psock->unattaching || WARN_ON(psock->done)) {
1897                         err = -EALREADY;
1898                         break;
1899                 }
1900
1901                 psock->unattaching = 1;
1902
1903                 spin_unlock_bh(&mux->lock);
1904
1905                 kcm_unattach(psock);
1906
1907                 err = 0;
1908                 goto out;
1909         }
1910
1911         spin_unlock_bh(&mux->lock);
1912
1913 out:
1914         fput(csock->file);
1915         return err;
1916 }
1917
1918 static struct proto kcm_proto = {
1919         .name   = "KCM",
1920         .owner  = THIS_MODULE,
1921         .obj_size = sizeof(struct kcm_sock),
1922 };
1923
1924 /* Clone a kcm socket. */
1925 static int kcm_clone(struct socket *osock, struct kcm_clone *info,
1926                      struct socket **newsockp)
1927 {
1928         struct socket *newsock;
1929         struct sock *newsk;
1930         struct file *newfile;
1931         int err, newfd;
1932
1933         err = -ENFILE;
1934         newsock = sock_alloc();
1935         if (!newsock)
1936                 goto out;
1937
1938         newsock->type = osock->type;
1939         newsock->ops = osock->ops;
1940
1941         __module_get(newsock->ops->owner);
1942
1943         newfd = get_unused_fd_flags(0);
1944         if (unlikely(newfd < 0)) {
1945                 err = newfd;
1946                 goto out_fd_fail;
1947         }
1948
1949         newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1950         if (unlikely(IS_ERR(newfile))) {
1951                 err = PTR_ERR(newfile);
1952                 goto out_sock_alloc_fail;
1953         }
1954
1955         newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1956                          &kcm_proto, true);
1957         if (!newsk) {
1958                 err = -ENOMEM;
1959                 goto out_sk_alloc_fail;
1960         }
1961
1962         sock_init_data(newsock, newsk);
1963         init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1964
1965         fd_install(newfd, newfile);
1966         *newsockp = newsock;
1967         info->fd = newfd;
1968
1969         return 0;
1970
1971 out_sk_alloc_fail:
1972         fput(newfile);
1973 out_sock_alloc_fail:
1974         put_unused_fd(newfd);
1975 out_fd_fail:
1976         sock_release(newsock);
1977 out:
1978         return err;
1979 }
1980
1981 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1982 {
1983         int err;
1984
1985         switch (cmd) {
1986         case SIOCKCMATTACH: {
1987                 struct kcm_attach info;
1988
1989                 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1990                         err = -EFAULT;
1991
1992                 err = kcm_attach_ioctl(sock, &info);
1993
1994                 break;
1995         }
1996         case SIOCKCMUNATTACH: {
1997                 struct kcm_unattach info;
1998
1999                 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
2000                         err = -EFAULT;
2001
2002                 err = kcm_unattach_ioctl(sock, &info);
2003
2004                 break;
2005         }
2006         case SIOCKCMCLONE: {
2007                 struct kcm_clone info;
2008                 struct socket *newsock = NULL;
2009
2010                 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
2011                         err = -EFAULT;
2012
2013                 err = kcm_clone(sock, &info, &newsock);
2014
2015                 if (!err) {
2016                         if (copy_to_user((void __user *)arg, &info,
2017                                          sizeof(info))) {
2018                                 err = -EFAULT;
2019                                 sys_close(info.fd);
2020                         }
2021                 }
2022
2023                 break;
2024         }
2025         default:
2026                 err = -ENOIOCTLCMD;
2027                 break;
2028         }
2029
2030         return err;
2031 }
2032
2033 static void free_mux(struct rcu_head *rcu)
2034 {
2035         struct kcm_mux *mux = container_of(rcu,
2036             struct kcm_mux, rcu);
2037
2038         kmem_cache_free(kcm_muxp, mux);
2039 }
2040
2041 static void release_mux(struct kcm_mux *mux)
2042 {
2043         struct kcm_net *knet = mux->knet;
2044         struct kcm_psock *psock, *tmp_psock;
2045
2046         /* Release psocks */
2047         list_for_each_entry_safe(psock, tmp_psock,
2048                                  &mux->psocks, psock_list) {
2049                 if (!WARN_ON(psock->unattaching))
2050                         kcm_unattach(psock);
2051         }
2052
2053         if (WARN_ON(mux->psocks_cnt))
2054                 return;
2055
2056         __skb_queue_purge(&mux->rx_hold_queue);
2057
2058         mutex_lock(&knet->mutex);
2059         aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
2060         aggregate_psock_stats(&mux->aggregate_psock_stats,
2061                               &knet->aggregate_psock_stats);
2062         list_del_rcu(&mux->kcm_mux_list);
2063         knet->count--;
2064         mutex_unlock(&knet->mutex);
2065
2066         call_rcu(&mux->rcu, free_mux);
2067 }
2068
2069 static void kcm_done(struct kcm_sock *kcm)
2070 {
2071         struct kcm_mux *mux = kcm->mux;
2072         struct sock *sk = &kcm->sk;
2073         int socks_cnt;
2074
2075         spin_lock_bh(&mux->rx_lock);
2076         if (kcm->rx_psock) {
2077                 /* Cleanup in unreserve_rx_kcm */
2078                 WARN_ON(kcm->done);
2079                 kcm->rx_disabled = 1;
2080                 kcm->done = 1;
2081                 spin_unlock_bh(&mux->rx_lock);
2082                 return;
2083         }
2084
2085         if (kcm->rx_wait) {
2086                 list_del(&kcm->wait_rx_list);
2087                 kcm->rx_wait = false;
2088         }
2089         /* Move any pending receive messages to other kcm sockets */
2090         requeue_rx_msgs(mux, &sk->sk_receive_queue);
2091
2092         spin_unlock_bh(&mux->rx_lock);
2093
2094         if (WARN_ON(sk_rmem_alloc_get(sk)))
2095                 return;
2096
2097         /* Detach from MUX */
2098         spin_lock_bh(&mux->lock);
2099
2100         list_del(&kcm->kcm_sock_list);
2101         mux->kcm_socks_cnt--;
2102         socks_cnt = mux->kcm_socks_cnt;
2103
2104         spin_unlock_bh(&mux->lock);
2105
2106         if (!socks_cnt) {
2107                 /* We are done with the mux now. */
2108                 release_mux(mux);
2109         }
2110
2111         WARN_ON(kcm->rx_wait);
2112
2113         sock_put(&kcm->sk);
2114 }
2115
2116 /* Called by kcm_release to close a KCM socket.
2117  * If this is the last KCM socket on the MUX, destroy the MUX.
2118  */
2119 static int kcm_release(struct socket *sock)
2120 {
2121         struct sock *sk = sock->sk;
2122         struct kcm_sock *kcm;
2123         struct kcm_mux *mux;
2124         struct kcm_psock *psock;
2125
2126         if (!sk)
2127                 return 0;
2128
2129         kcm = kcm_sk(sk);
2130         mux = kcm->mux;
2131
2132         sock_orphan(sk);
2133         kfree_skb(kcm->seq_skb);
2134
2135         lock_sock(sk);
2136         /* Purge queue under lock to avoid race condition with tx_work trying
2137          * to act when queue is nonempty. If tx_work runs after this point
2138          * it will just return.
2139          */
2140         __skb_queue_purge(&sk->sk_write_queue);
2141         release_sock(sk);
2142
2143         spin_lock_bh(&mux->lock);
2144         if (kcm->tx_wait) {
2145                 /* Take of tx_wait list, after this point there should be no way
2146                  * that a psock will be assigned to this kcm.
2147                  */
2148                 list_del(&kcm->wait_psock_list);
2149                 kcm->tx_wait = false;
2150         }
2151         spin_unlock_bh(&mux->lock);
2152
2153         /* Cancel work. After this point there should be no outside references
2154          * to the kcm socket.
2155          */
2156         cancel_work_sync(&kcm->tx_work);
2157
2158         lock_sock(sk);
2159         psock = kcm->tx_psock;
2160         if (psock) {
2161                 /* A psock was reserved, so we need to kill it since it
2162                  * may already have some bytes queued from a message. We
2163                  * need to do this after removing kcm from tx_wait list.
2164                  */
2165                 kcm_abort_tx_psock(psock, EPIPE, false);
2166                 unreserve_psock(kcm);
2167         }
2168         release_sock(sk);
2169
2170         WARN_ON(kcm->tx_wait);
2171         WARN_ON(kcm->tx_psock);
2172
2173         sock->sk = NULL;
2174
2175         kcm_done(kcm);
2176
2177         return 0;
2178 }
2179
2180 static const struct proto_ops kcm_dgram_ops = {
2181         .family =       PF_KCM,
2182         .owner =        THIS_MODULE,
2183         .release =      kcm_release,
2184         .bind =         sock_no_bind,
2185         .connect =      sock_no_connect,
2186         .socketpair =   sock_no_socketpair,
2187         .accept =       sock_no_accept,
2188         .getname =      sock_no_getname,
2189         .poll =         datagram_poll,
2190         .ioctl =        kcm_ioctl,
2191         .listen =       sock_no_listen,
2192         .shutdown =     sock_no_shutdown,
2193         .setsockopt =   kcm_setsockopt,
2194         .getsockopt =   kcm_getsockopt,
2195         .sendmsg =      kcm_sendmsg,
2196         .recvmsg =      kcm_recvmsg,
2197         .mmap =         sock_no_mmap,
2198         .sendpage =     kcm_sendpage,
2199 };
2200
2201 static const struct proto_ops kcm_seqpacket_ops = {
2202         .family =       PF_KCM,
2203         .owner =        THIS_MODULE,
2204         .release =      kcm_release,
2205         .bind =         sock_no_bind,
2206         .connect =      sock_no_connect,
2207         .socketpair =   sock_no_socketpair,
2208         .accept =       sock_no_accept,
2209         .getname =      sock_no_getname,
2210         .poll =         datagram_poll,
2211         .ioctl =        kcm_ioctl,
2212         .listen =       sock_no_listen,
2213         .shutdown =     sock_no_shutdown,
2214         .setsockopt =   kcm_setsockopt,
2215         .getsockopt =   kcm_getsockopt,
2216         .sendmsg =      kcm_sendmsg,
2217         .recvmsg =      kcm_recvmsg,
2218         .mmap =         sock_no_mmap,
2219         .sendpage =     kcm_sendpage,
2220         .splice_read =  kcm_splice_read,
2221 };
2222
2223 /* Create proto operation for kcm sockets */
2224 static int kcm_create(struct net *net, struct socket *sock,
2225                       int protocol, int kern)
2226 {
2227         struct kcm_net *knet = net_generic(net, kcm_net_id);
2228         struct sock *sk;
2229         struct kcm_mux *mux;
2230
2231         switch (sock->type) {
2232         case SOCK_DGRAM:
2233                 sock->ops = &kcm_dgram_ops;
2234                 break;
2235         case SOCK_SEQPACKET:
2236                 sock->ops = &kcm_seqpacket_ops;
2237                 break;
2238         default:
2239                 return -ESOCKTNOSUPPORT;
2240         }
2241
2242         if (protocol != KCMPROTO_CONNECTED)
2243                 return -EPROTONOSUPPORT;
2244
2245         sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
2246         if (!sk)
2247                 return -ENOMEM;
2248
2249         /* Allocate a kcm mux, shared between KCM sockets */
2250         mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
2251         if (!mux) {
2252                 sk_free(sk);
2253                 return -ENOMEM;
2254         }
2255
2256         spin_lock_init(&mux->lock);
2257         spin_lock_init(&mux->rx_lock);
2258         INIT_LIST_HEAD(&mux->kcm_socks);
2259         INIT_LIST_HEAD(&mux->kcm_rx_waiters);
2260         INIT_LIST_HEAD(&mux->kcm_tx_waiters);
2261
2262         INIT_LIST_HEAD(&mux->psocks);
2263         INIT_LIST_HEAD(&mux->psocks_ready);
2264         INIT_LIST_HEAD(&mux->psocks_avail);
2265
2266         mux->knet = knet;
2267
2268         /* Add new MUX to list */
2269         mutex_lock(&knet->mutex);
2270         list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
2271         knet->count++;
2272         mutex_unlock(&knet->mutex);
2273
2274         skb_queue_head_init(&mux->rx_hold_queue);
2275
2276         /* Init KCM socket */
2277         sock_init_data(sock, sk);
2278         init_kcm_sock(kcm_sk(sk), mux);
2279
2280         return 0;
2281 }
2282
2283 static struct net_proto_family kcm_family_ops = {
2284         .family = PF_KCM,
2285         .create = kcm_create,
2286         .owner  = THIS_MODULE,
2287 };
2288
2289 static __net_init int kcm_init_net(struct net *net)
2290 {
2291         struct kcm_net *knet = net_generic(net, kcm_net_id);
2292
2293         INIT_LIST_HEAD_RCU(&knet->mux_list);
2294         mutex_init(&knet->mutex);
2295
2296         return 0;
2297 }
2298
2299 static __net_exit void kcm_exit_net(struct net *net)
2300 {
2301         struct kcm_net *knet = net_generic(net, kcm_net_id);
2302
2303         /* All KCM sockets should be closed at this point, which should mean
2304          * that all multiplexors and psocks have been destroyed.
2305          */
2306         WARN_ON(!list_empty(&knet->mux_list));
2307 }
2308
2309 static struct pernet_operations kcm_net_ops = {
2310         .init = kcm_init_net,
2311         .exit = kcm_exit_net,
2312         .id   = &kcm_net_id,
2313         .size = sizeof(struct kcm_net),
2314 };
2315
2316 static int __init kcm_init(void)
2317 {
2318         int err = -ENOMEM;
2319
2320         kcm_muxp = kmem_cache_create("kcm_mux_cache",
2321                                      sizeof(struct kcm_mux), 0,
2322                                      SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2323         if (!kcm_muxp)
2324                 goto fail;
2325
2326         kcm_psockp = kmem_cache_create("kcm_psock_cache",
2327                                        sizeof(struct kcm_psock), 0,
2328                                         SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2329         if (!kcm_psockp)
2330                 goto fail;
2331
2332         kcm_wq = create_singlethread_workqueue("kkcmd");
2333         if (!kcm_wq)
2334                 goto fail;
2335
2336         err = proto_register(&kcm_proto, 1);
2337         if (err)
2338                 goto fail;
2339
2340         err = sock_register(&kcm_family_ops);
2341         if (err)
2342                 goto sock_register_fail;
2343
2344         err = register_pernet_device(&kcm_net_ops);
2345         if (err)
2346                 goto net_ops_fail;
2347
2348         err = kcm_proc_init();
2349         if (err)
2350                 goto proc_init_fail;
2351
2352         return 0;
2353
2354 proc_init_fail:
2355         unregister_pernet_device(&kcm_net_ops);
2356
2357 net_ops_fail:
2358         sock_unregister(PF_KCM);
2359
2360 sock_register_fail:
2361         proto_unregister(&kcm_proto);
2362
2363 fail:
2364         kmem_cache_destroy(kcm_muxp);
2365         kmem_cache_destroy(kcm_psockp);
2366
2367         if (kcm_wq)
2368                 destroy_workqueue(kcm_wq);
2369
2370         return err;
2371 }
2372
2373 static void __exit kcm_exit(void)
2374 {
2375         kcm_proc_exit();
2376         unregister_pernet_device(&kcm_net_ops);
2377         sock_unregister(PF_KCM);
2378         proto_unregister(&kcm_proto);
2379         destroy_workqueue(kcm_wq);
2380
2381         kmem_cache_destroy(kcm_muxp);
2382         kmem_cache_destroy(kcm_psockp);
2383 }
2384
2385 module_init(kcm_init);
2386 module_exit(kcm_exit);
2387
2388 MODULE_LICENSE("GPL");
2389 MODULE_ALIAS_NETPROTO(PF_KCM);
2390