2 * linux/net/iucv/af_iucv.c
4 * IUCV protocol stack for Linux on zSeries
6 * Copyright 2006 IBM Corporation
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
11 #define KMSG_COMPONENT "af_iucv"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
25 #include <asm/ebcdic.h>
26 #include <asm/cpcmd.h>
27 #include <linux/kmod.h>
29 #include <net/iucv/iucv.h>
30 #include <net/iucv/af_iucv.h>
34 static char iucv_userid[80];
36 static struct proto_ops iucv_sock_ops;
38 static struct proto iucv_proto = {
41 .obj_size = sizeof(struct iucv_sock),
44 /* special AF_IUCV IPRM messages */
45 static const u8 iprm_shutdown[8] =
46 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
48 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
50 /* macros to set/get socket control buffer at correct offset */
51 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
52 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
53 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
54 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
57 static void iucv_sock_kill(struct sock *sk);
58 static void iucv_sock_close(struct sock *sk);
60 /* Call Back functions */
61 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
62 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
63 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
64 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
66 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
67 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
69 static struct iucv_sock_list iucv_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
71 .autobind_name = ATOMIC_INIT(0)
74 static struct iucv_handler af_iucv_handler = {
75 .path_pending = iucv_callback_connreq,
76 .path_complete = iucv_callback_connack,
77 .path_severed = iucv_callback_connrej,
78 .message_pending = iucv_callback_rx,
79 .message_complete = iucv_callback_txdone,
80 .path_quiesced = iucv_callback_shutdown,
83 static inline void high_nmcpy(unsigned char *dst, char *src)
88 static inline void low_nmcpy(unsigned char *dst, char *src)
90 memcpy(&dst[8], src, 8);
94 * iucv_msg_length() - Returns the length of an iucv message.
95 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
97 * The function returns the length of the specified iucv message @msg of data
98 * stored in a buffer and of data stored in the parameter list (PRMDATA).
100 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
102 * PRMDATA[0..6] socket data (max 7 bytes);
103 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
105 * The socket data length is computed by substracting the socket data length
107 * If the socket data len is greater 7, then PRMDATA can be used for special
108 * notifications (see iucv_sock_shutdown); and further,
109 * if the socket data len is > 7, the function returns 8.
111 * Use this function to allocate socket buffers to store iucv message data.
113 static inline size_t iucv_msg_length(struct iucv_message *msg)
117 if (msg->flags & IUCV_IPRMDATA) {
118 datalen = 0xff - msg->rmmsg[7];
119 return (datalen < 8) ? datalen : 8;
125 static void iucv_sock_timeout(unsigned long arg)
127 struct sock *sk = (struct sock *)arg;
130 sk->sk_err = ETIMEDOUT;
131 sk->sk_state_change(sk);
138 static void iucv_sock_clear_timer(struct sock *sk)
140 sk_stop_timer(sk, &sk->sk_timer);
143 static struct sock *__iucv_get_sock_by_name(char *nm)
146 struct hlist_node *node;
148 sk_for_each(sk, node, &iucv_sk_list.head)
149 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
155 static void iucv_sock_destruct(struct sock *sk)
157 skb_queue_purge(&sk->sk_receive_queue);
158 skb_queue_purge(&sk->sk_write_queue);
162 static void iucv_sock_cleanup_listen(struct sock *parent)
166 /* Close non-accepted connections */
167 while ((sk = iucv_accept_dequeue(parent, NULL))) {
172 parent->sk_state = IUCV_CLOSED;
173 sock_set_flag(parent, SOCK_ZAPPED);
177 static void iucv_sock_kill(struct sock *sk)
179 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
182 iucv_sock_unlink(&iucv_sk_list, sk);
183 sock_set_flag(sk, SOCK_DEAD);
187 /* Close an IUCV socket */
188 static void iucv_sock_close(struct sock *sk)
190 unsigned char user_data[16];
191 struct iucv_sock *iucv = iucv_sk(sk);
195 iucv_sock_clear_timer(sk);
198 switch (sk->sk_state) {
200 iucv_sock_cleanup_listen(sk);
207 sk->sk_state = IUCV_CLOSING;
208 sk->sk_state_change(sk);
210 if (!skb_queue_empty(&iucv->send_skb_q)) {
211 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
212 timeo = sk->sk_lingertime;
214 timeo = IUCV_DISCONN_TIMEOUT;
215 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
218 sk->sk_state = IUCV_CLOSED;
219 sk->sk_state_change(sk);
222 low_nmcpy(user_data, iucv->src_name);
223 high_nmcpy(user_data, iucv->dst_name);
224 ASCEBC(user_data, sizeof(user_data));
225 err = iucv_path_sever(iucv->path, user_data);
226 iucv_path_free(iucv->path);
230 sk->sk_err = ECONNRESET;
231 sk->sk_state_change(sk);
233 skb_queue_purge(&iucv->send_skb_q);
234 skb_queue_purge(&iucv->backlog_skb_q);
236 sock_set_flag(sk, SOCK_ZAPPED);
240 sock_set_flag(sk, SOCK_ZAPPED);
248 static void iucv_sock_init(struct sock *sk, struct sock *parent)
251 sk->sk_type = parent->sk_type;
254 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
258 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
262 sock_init_data(sock, sk);
263 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
264 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
265 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
266 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
267 spin_lock_init(&iucv_sk(sk)->message_q.lock);
268 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
269 iucv_sk(sk)->send_tag = 0;
270 iucv_sk(sk)->flags = 0;
272 sk->sk_destruct = iucv_sock_destruct;
273 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
274 sk->sk_allocation = GFP_DMA;
276 sock_reset_flag(sk, SOCK_ZAPPED);
278 sk->sk_protocol = proto;
279 sk->sk_state = IUCV_OPEN;
281 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
283 iucv_sock_link(&iucv_sk_list, sk);
287 /* Create an IUCV socket */
288 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
292 if (protocol && protocol != PF_IUCV)
293 return -EPROTONOSUPPORT;
295 sock->state = SS_UNCONNECTED;
297 switch (sock->type) {
299 sock->ops = &iucv_sock_ops;
302 /* currently, proto ops can handle both sk types */
303 sock->ops = &iucv_sock_ops;
306 return -ESOCKTNOSUPPORT;
309 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
313 iucv_sock_init(sk, NULL);
318 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
320 write_lock_bh(&l->lock);
321 sk_add_node(sk, &l->head);
322 write_unlock_bh(&l->lock);
325 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
327 write_lock_bh(&l->lock);
328 sk_del_node_init(sk);
329 write_unlock_bh(&l->lock);
332 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
335 struct iucv_sock *par = iucv_sk(parent);
338 spin_lock_irqsave(&par->accept_q_lock, flags);
339 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
340 spin_unlock_irqrestore(&par->accept_q_lock, flags);
341 iucv_sk(sk)->parent = parent;
342 parent->sk_ack_backlog++;
345 void iucv_accept_unlink(struct sock *sk)
348 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
350 spin_lock_irqsave(&par->accept_q_lock, flags);
351 list_del_init(&iucv_sk(sk)->accept_q);
352 spin_unlock_irqrestore(&par->accept_q_lock, flags);
353 iucv_sk(sk)->parent->sk_ack_backlog--;
354 iucv_sk(sk)->parent = NULL;
358 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
360 struct iucv_sock *isk, *n;
363 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
364 sk = (struct sock *) isk;
367 if (sk->sk_state == IUCV_CLOSED) {
368 iucv_accept_unlink(sk);
373 if (sk->sk_state == IUCV_CONNECTED ||
374 sk->sk_state == IUCV_SEVERED ||
376 iucv_accept_unlink(sk);
378 sock_graft(sk, newsock);
380 if (sk->sk_state == IUCV_SEVERED)
381 sk->sk_state = IUCV_DISCONN;
392 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
395 DECLARE_WAITQUEUE(wait, current);
398 add_wait_queue(sk->sk_sleep, &wait);
399 while (sk->sk_state != state && sk->sk_state != state2) {
400 set_current_state(TASK_INTERRUPTIBLE);
407 if (signal_pending(current)) {
408 err = sock_intr_errno(timeo);
413 timeo = schedule_timeout(timeo);
416 err = sock_error(sk);
420 set_current_state(TASK_RUNNING);
421 remove_wait_queue(sk->sk_sleep, &wait);
425 /* Bind an unbound socket */
426 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
429 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
430 struct sock *sk = sock->sk;
431 struct iucv_sock *iucv;
434 /* Verify the input sockaddr */
435 if (!addr || addr->sa_family != AF_IUCV)
439 if (sk->sk_state != IUCV_OPEN) {
444 write_lock_bh(&iucv_sk_list.lock);
447 if (__iucv_get_sock_by_name(sa->siucv_name)) {
456 /* Bind the socket */
457 memcpy(iucv->src_name, sa->siucv_name, 8);
459 /* Copy the user id */
460 memcpy(iucv->src_user_id, iucv_userid, 8);
461 sk->sk_state = IUCV_BOUND;
465 /* Release the socket list lock */
466 write_unlock_bh(&iucv_sk_list.lock);
472 /* Automatically bind an unbound socket */
473 static int iucv_sock_autobind(struct sock *sk)
475 struct iucv_sock *iucv = iucv_sk(sk);
476 char query_buffer[80];
480 /* Set the userid and name */
481 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
485 memcpy(iucv->src_user_id, query_buffer, 8);
487 write_lock_bh(&iucv_sk_list.lock);
489 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
490 while (__iucv_get_sock_by_name(name)) {
491 sprintf(name, "%08x",
492 atomic_inc_return(&iucv_sk_list.autobind_name));
495 write_unlock_bh(&iucv_sk_list.lock);
497 memcpy(&iucv->src_name, name, 8);
502 /* Connect an unconnected socket */
503 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
506 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
507 struct sock *sk = sock->sk;
508 struct iucv_sock *iucv;
509 unsigned char user_data[16];
512 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
515 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
518 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
521 if (sk->sk_state == IUCV_OPEN) {
522 err = iucv_sock_autobind(sk);
529 /* Set the destination information */
530 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
531 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
533 high_nmcpy(user_data, sa->siucv_name);
534 low_nmcpy(user_data, iucv_sk(sk)->src_name);
535 ASCEBC(user_data, sizeof(user_data));
539 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
540 IUCV_IPRMDATA, GFP_KERNEL);
545 err = iucv_path_connect(iucv->path, &af_iucv_handler,
546 sa->siucv_user_id, NULL, user_data, sk);
548 iucv_path_free(iucv->path);
551 case 0x0b: /* Target communicator is not logged on */
554 case 0x0d: /* Max connections for this guest exceeded */
555 case 0x0e: /* Max connections for target guest exceeded */
558 case 0x0f: /* Missing IUCV authorization */
568 if (sk->sk_state != IUCV_CONNECTED) {
569 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
570 sock_sndtimeo(sk, flags & O_NONBLOCK));
573 if (sk->sk_state == IUCV_DISCONN) {
578 iucv_path_sever(iucv->path, NULL);
579 iucv_path_free(iucv->path);
588 /* Move a socket into listening state. */
589 static int iucv_sock_listen(struct socket *sock, int backlog)
591 struct sock *sk = sock->sk;
597 if (sk->sk_state != IUCV_BOUND)
600 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
603 sk->sk_max_ack_backlog = backlog;
604 sk->sk_ack_backlog = 0;
605 sk->sk_state = IUCV_LISTEN;
613 /* Accept a pending connection */
614 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
617 DECLARE_WAITQUEUE(wait, current);
618 struct sock *sk = sock->sk, *nsk;
622 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
624 if (sk->sk_state != IUCV_LISTEN) {
629 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
631 /* Wait for an incoming connection */
632 add_wait_queue_exclusive(sk->sk_sleep, &wait);
633 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
634 set_current_state(TASK_INTERRUPTIBLE);
641 timeo = schedule_timeout(timeo);
642 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
644 if (sk->sk_state != IUCV_LISTEN) {
649 if (signal_pending(current)) {
650 err = sock_intr_errno(timeo);
655 set_current_state(TASK_RUNNING);
656 remove_wait_queue(sk->sk_sleep, &wait);
661 newsock->state = SS_CONNECTED;
668 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
671 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
672 struct sock *sk = sock->sk;
674 addr->sa_family = AF_IUCV;
675 *len = sizeof(struct sockaddr_iucv);
678 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
679 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
681 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
682 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
684 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
685 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
686 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
692 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
694 * @msg: Pointer to a struct iucv_message
695 * @skb: The socket data to send, skb->len MUST BE <= 7
697 * Send the socket data in the parameter list in the iucv message
698 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
699 * list and the socket data len at index 7 (last byte).
700 * See also iucv_msg_length().
702 * Returns the error code from the iucv_message_send() call.
704 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
709 memcpy(prmdata, (void *) skb->data, skb->len);
710 prmdata[7] = 0xff - (u8) skb->len;
711 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
712 (void *) prmdata, 8);
715 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
716 struct msghdr *msg, size_t len)
718 struct sock *sk = sock->sk;
719 struct iucv_sock *iucv = iucv_sk(sk);
721 struct iucv_message txmsg;
722 struct cmsghdr *cmsg;
728 err = sock_error(sk);
732 if (msg->msg_flags & MSG_OOB)
735 /* SOCK_SEQPACKET: we do not support segmented records */
736 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
741 if (sk->sk_shutdown & SEND_SHUTDOWN) {
746 if (sk->sk_state == IUCV_CONNECTED) {
747 /* initialize defaults */
748 cmsg_done = 0; /* check for duplicate headers */
751 /* iterate over control messages */
752 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
753 cmsg = CMSG_NXTHDR(msg, cmsg)) {
755 if (!CMSG_OK(msg, cmsg)) {
760 if (cmsg->cmsg_level != SOL_IUCV)
763 if (cmsg->cmsg_type & cmsg_done) {
767 cmsg_done |= cmsg->cmsg_type;
769 switch (cmsg->cmsg_type) {
770 case SCM_IUCV_TRGCLS:
771 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
776 /* set iucv message target class */
778 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
789 /* allocate one skb for each iucv message:
790 * this is fine for SOCK_SEQPACKET (unless we want to support
791 * segmented records using the MSG_EOR flag), but
792 * for SOCK_STREAM we might want to improve it in future */
793 if (!(skb = sock_alloc_send_skb(sk, len,
794 msg->msg_flags & MSG_DONTWAIT,
798 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
803 /* increment and save iucv message tag for msg_completion cbk */
804 txmsg.tag = iucv->send_tag++;
805 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
806 skb_queue_tail(&iucv->send_skb_q, skb);
808 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
810 err = iucv_send_iprm(iucv->path, &txmsg, skb);
812 /* on success: there is no message_complete callback
813 * for an IPRMDATA msg; remove skb from send queue */
815 skb_unlink(skb, &iucv->send_skb_q);
819 /* this error should never happen since the
820 * IUCV_IPRMDATA path flag is set... sever path */
822 iucv_path_sever(iucv->path, NULL);
823 skb_unlink(skb, &iucv->send_skb_q);
828 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
829 (void *) skb->data, skb->len);
833 memcpy(user_id, iucv->dst_user_id, 8);
835 memcpy(appl_id, iucv->dst_name, 8);
836 pr_err("Application %s on z/VM guest %s"
837 " exceeds message limit\n",
840 skb_unlink(skb, &iucv->send_skb_q);
860 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
862 int dataleft, size, copied = 0;
863 struct sk_buff *nskb;
867 if (dataleft >= sk->sk_rcvbuf / 4)
868 size = sk->sk_rcvbuf / 4;
872 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
876 /* copy target class to control buffer of new skb */
877 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
879 /* copy data fragment */
880 memcpy(nskb->data, skb->data + copied, size);
884 skb_reset_transport_header(nskb);
885 skb_reset_network_header(nskb);
888 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
894 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
895 struct iucv_path *path,
896 struct iucv_message *msg)
901 len = iucv_msg_length(msg);
903 /* store msg target class in the second 4 bytes of skb ctrl buffer */
904 /* Note: the first 4 bytes are reserved for msg tag */
905 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
907 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
908 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
909 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
914 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
915 skb->data, len, NULL);
920 /* we need to fragment iucv messages for SOCK_STREAM only;
921 * for SOCK_SEQPACKET, it is only relevant if we support
922 * record segmentation using MSG_EOR (see also recvmsg()) */
923 if (sk->sk_type == SOCK_STREAM &&
924 skb->truesize >= sk->sk_rcvbuf / 4) {
925 rc = iucv_fragment_skb(sk, skb, len);
929 iucv_path_sever(path, NULL);
932 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
934 skb_reset_transport_header(skb);
935 skb_reset_network_header(skb);
940 if (sock_queue_rcv_skb(sk, skb))
941 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
944 static void iucv_process_message_q(struct sock *sk)
946 struct iucv_sock *iucv = iucv_sk(sk);
948 struct sock_msg_q *p, *n;
950 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
951 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
954 iucv_process_message(sk, skb, p->path, &p->msg);
957 if (!skb_queue_empty(&iucv->backlog_skb_q))
962 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
963 struct msghdr *msg, size_t len, int flags)
965 int noblock = flags & MSG_DONTWAIT;
966 struct sock *sk = sock->sk;
967 struct iucv_sock *iucv = iucv_sk(sk);
969 unsigned int copied, rlen;
970 struct sk_buff *skb, *rskb, *cskb;
973 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
974 skb_queue_empty(&iucv->backlog_skb_q) &&
975 skb_queue_empty(&sk->sk_receive_queue) &&
976 list_empty(&iucv->message_q.list))
979 if (flags & (MSG_OOB))
982 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
984 skb = skb_recv_datagram(sk, flags, noblock, &err);
986 if (sk->sk_shutdown & RCV_SHUTDOWN)
991 rlen = skb->len; /* real length of skb */
992 copied = min_t(unsigned int, rlen, len);
995 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
996 skb_queue_head(&sk->sk_receive_queue, skb);
1002 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1003 if (sk->sk_type == SOCK_SEQPACKET) {
1005 msg->msg_flags |= MSG_TRUNC;
1006 /* each iucv message contains a complete record */
1007 msg->msg_flags |= MSG_EOR;
1010 /* create control message to store iucv msg target class:
1011 * get the trgcls from the control buffer of the skb due to
1012 * fragmentation of original iucv message. */
1013 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1014 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1016 if (!(flags & MSG_PEEK))
1017 skb_queue_head(&sk->sk_receive_queue, skb);
1021 /* Mark read part of skb as used */
1022 if (!(flags & MSG_PEEK)) {
1024 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1025 if (sk->sk_type == SOCK_STREAM) {
1026 skb_pull(skb, copied);
1028 skb_queue_head(&sk->sk_receive_queue, skb);
1035 /* Queue backlog skbs */
1036 rskb = skb_dequeue(&iucv->backlog_skb_q);
1038 if (sock_queue_rcv_skb(sk, rskb)) {
1039 skb_queue_head(&iucv->backlog_skb_q,
1043 rskb = skb_dequeue(&iucv->backlog_skb_q);
1046 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1047 spin_lock_bh(&iucv->message_q.lock);
1048 if (!list_empty(&iucv->message_q.list))
1049 iucv_process_message_q(sk);
1050 spin_unlock_bh(&iucv->message_q.lock);
1054 skb_queue_head(&sk->sk_receive_queue, skb);
1057 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1058 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1064 static inline unsigned int iucv_accept_poll(struct sock *parent)
1066 struct iucv_sock *isk, *n;
1069 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1070 sk = (struct sock *) isk;
1072 if (sk->sk_state == IUCV_CONNECTED)
1073 return POLLIN | POLLRDNORM;
1079 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1082 struct sock *sk = sock->sk;
1083 unsigned int mask = 0;
1085 poll_wait(file, sk->sk_sleep, wait);
1087 if (sk->sk_state == IUCV_LISTEN)
1088 return iucv_accept_poll(sk);
1090 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1093 if (sk->sk_shutdown & RCV_SHUTDOWN)
1096 if (sk->sk_shutdown == SHUTDOWN_MASK)
1099 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1100 (sk->sk_shutdown & RCV_SHUTDOWN))
1101 mask |= POLLIN | POLLRDNORM;
1103 if (sk->sk_state == IUCV_CLOSED)
1106 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
1109 if (sock_writeable(sk))
1110 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1112 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1117 static int iucv_sock_shutdown(struct socket *sock, int how)
1119 struct sock *sk = sock->sk;
1120 struct iucv_sock *iucv = iucv_sk(sk);
1121 struct iucv_message txmsg;
1126 if ((how & ~SHUTDOWN_MASK) || !how)
1130 switch (sk->sk_state) {
1136 sk->sk_shutdown |= how;
1140 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1143 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
1144 (void *) iprm_shutdown, 8);
1160 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1161 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
1165 skb_queue_purge(&sk->sk_receive_queue);
1168 /* Wake up anyone sleeping in poll */
1169 sk->sk_state_change(sk);
1176 static int iucv_sock_release(struct socket *sock)
1178 struct sock *sk = sock->sk;
1184 iucv_sock_close(sk);
1186 /* Unregister with IUCV base support */
1187 if (iucv_sk(sk)->path) {
1188 iucv_path_sever(iucv_sk(sk)->path, NULL);
1189 iucv_path_free(iucv_sk(sk)->path);
1190 iucv_sk(sk)->path = NULL;
1198 /* getsockopt and setsockopt */
1199 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1200 char __user *optval, int optlen)
1202 struct sock *sk = sock->sk;
1203 struct iucv_sock *iucv = iucv_sk(sk);
1207 if (level != SOL_IUCV)
1208 return -ENOPROTOOPT;
1210 if (optlen < sizeof(int))
1213 if (get_user(val, (int __user *) optval))
1220 case SO_IPRMDATA_MSG:
1222 iucv->flags |= IUCV_IPRMDATA;
1224 iucv->flags &= ~IUCV_IPRMDATA;
1235 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1236 char __user *optval, int __user *optlen)
1238 struct sock *sk = sock->sk;
1239 struct iucv_sock *iucv = iucv_sk(sk);
1242 if (level != SOL_IUCV)
1243 return -ENOPROTOOPT;
1245 if (get_user(len, optlen))
1251 len = min_t(unsigned int, len, sizeof(int));
1254 case SO_IPRMDATA_MSG:
1255 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1258 return -ENOPROTOOPT;
1261 if (put_user(len, optlen))
1263 if (copy_to_user(optval, &val, len))
1270 /* Callback wrappers - called from iucv base support */
1271 static int iucv_callback_connreq(struct iucv_path *path,
1272 u8 ipvmid[8], u8 ipuser[16])
1274 unsigned char user_data[16];
1275 unsigned char nuser_data[16];
1276 unsigned char src_name[8];
1277 struct hlist_node *node;
1278 struct sock *sk, *nsk;
1279 struct iucv_sock *iucv, *niucv;
1282 memcpy(src_name, ipuser, 8);
1283 EBCASC(src_name, 8);
1284 /* Find out if this path belongs to af_iucv. */
1285 read_lock(&iucv_sk_list.lock);
1288 sk_for_each(sk, node, &iucv_sk_list.head)
1289 if (sk->sk_state == IUCV_LISTEN &&
1290 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1292 * Found a listening socket with
1293 * src_name == ipuser[0-7].
1298 read_unlock(&iucv_sk_list.lock);
1300 /* No socket found, not one of our paths. */
1305 /* Check if parent socket is listening */
1306 low_nmcpy(user_data, iucv->src_name);
1307 high_nmcpy(user_data, iucv->dst_name);
1308 ASCEBC(user_data, sizeof(user_data));
1309 if (sk->sk_state != IUCV_LISTEN) {
1310 err = iucv_path_sever(path, user_data);
1311 iucv_path_free(path);
1315 /* Check for backlog size */
1316 if (sk_acceptq_is_full(sk)) {
1317 err = iucv_path_sever(path, user_data);
1318 iucv_path_free(path);
1322 /* Create the new socket */
1323 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1325 err = iucv_path_sever(path, user_data);
1326 iucv_path_free(path);
1330 niucv = iucv_sk(nsk);
1331 iucv_sock_init(nsk, sk);
1333 /* Set the new iucv_sock */
1334 memcpy(niucv->dst_name, ipuser + 8, 8);
1335 EBCASC(niucv->dst_name, 8);
1336 memcpy(niucv->dst_user_id, ipvmid, 8);
1337 memcpy(niucv->src_name, iucv->src_name, 8);
1338 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1341 /* Call iucv_accept */
1342 high_nmcpy(nuser_data, ipuser + 8);
1343 memcpy(nuser_data + 8, niucv->src_name, 8);
1344 ASCEBC(nuser_data + 8, 8);
1346 path->msglim = IUCV_QUEUELEN_DEFAULT;
1347 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1349 err = iucv_path_sever(path, user_data);
1350 iucv_path_free(path);
1351 iucv_sock_kill(nsk);
1355 iucv_accept_enqueue(sk, nsk);
1357 /* Wake up accept */
1358 nsk->sk_state = IUCV_CONNECTED;
1359 sk->sk_data_ready(sk, 1);
1366 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1368 struct sock *sk = path->private;
1370 sk->sk_state = IUCV_CONNECTED;
1371 sk->sk_state_change(sk);
1374 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1376 struct sock *sk = path->private;
1377 struct iucv_sock *iucv = iucv_sk(sk);
1378 struct sk_buff *skb;
1379 struct sock_msg_q *save_msg;
1382 if (sk->sk_shutdown & RCV_SHUTDOWN)
1385 if (!list_empty(&iucv->message_q.list) ||
1386 !skb_queue_empty(&iucv->backlog_skb_q))
1389 len = atomic_read(&sk->sk_rmem_alloc);
1390 len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1391 if (len > sk->sk_rcvbuf)
1394 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1398 spin_lock(&iucv->message_q.lock);
1399 iucv_process_message(sk, skb, path, msg);
1400 spin_unlock(&iucv->message_q.lock);
1405 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1408 save_msg->path = path;
1409 save_msg->msg = *msg;
1411 spin_lock(&iucv->message_q.lock);
1412 list_add_tail(&save_msg->list, &iucv->message_q.list);
1413 spin_unlock(&iucv->message_q.lock);
1416 static void iucv_callback_txdone(struct iucv_path *path,
1417 struct iucv_message *msg)
1419 struct sock *sk = path->private;
1420 struct sk_buff *this = NULL;
1421 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1422 struct sk_buff *list_skb = list->next;
1423 unsigned long flags;
1425 if (!skb_queue_empty(list)) {
1426 spin_lock_irqsave(&list->lock, flags);
1428 while (list_skb != (struct sk_buff *)list) {
1429 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1433 list_skb = list_skb->next;
1436 __skb_unlink(this, list);
1438 spin_unlock_irqrestore(&list->lock, flags);
1444 if (sk->sk_state == IUCV_CLOSING) {
1445 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1446 sk->sk_state = IUCV_CLOSED;
1447 sk->sk_state_change(sk);
1453 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1455 struct sock *sk = path->private;
1457 if (!list_empty(&iucv_sk(sk)->accept_q))
1458 sk->sk_state = IUCV_SEVERED;
1460 sk->sk_state = IUCV_DISCONN;
1462 sk->sk_state_change(sk);
1465 /* called if the other communication side shuts down its RECV direction;
1466 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1468 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1470 struct sock *sk = path->private;
1473 if (sk->sk_state != IUCV_CLOSED) {
1474 sk->sk_shutdown |= SEND_SHUTDOWN;
1475 sk->sk_state_change(sk);
1480 static struct proto_ops iucv_sock_ops = {
1482 .owner = THIS_MODULE,
1483 .release = iucv_sock_release,
1484 .bind = iucv_sock_bind,
1485 .connect = iucv_sock_connect,
1486 .listen = iucv_sock_listen,
1487 .accept = iucv_sock_accept,
1488 .getname = iucv_sock_getname,
1489 .sendmsg = iucv_sock_sendmsg,
1490 .recvmsg = iucv_sock_recvmsg,
1491 .poll = iucv_sock_poll,
1492 .ioctl = sock_no_ioctl,
1493 .mmap = sock_no_mmap,
1494 .socketpair = sock_no_socketpair,
1495 .shutdown = iucv_sock_shutdown,
1496 .setsockopt = iucv_sock_setsockopt,
1497 .getsockopt = iucv_sock_getsockopt,
1500 static struct net_proto_family iucv_sock_family_ops = {
1502 .owner = THIS_MODULE,
1503 .create = iucv_sock_create,
1506 static int __init afiucv_init(void)
1510 if (!MACHINE_IS_VM) {
1511 pr_err("The af_iucv module cannot be loaded"
1513 err = -EPROTONOSUPPORT;
1516 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1517 if (unlikely(err)) {
1519 err = -EPROTONOSUPPORT;
1523 err = iucv_register(&af_iucv_handler, 0);
1526 err = proto_register(&iucv_proto, 0);
1529 err = sock_register(&iucv_sock_family_ops);
1535 proto_unregister(&iucv_proto);
1537 iucv_unregister(&af_iucv_handler, 0);
1542 static void __exit afiucv_exit(void)
1544 sock_unregister(PF_IUCV);
1545 proto_unregister(&iucv_proto);
1546 iucv_unregister(&af_iucv_handler, 0);
1549 module_init(afiucv_init);
1550 module_exit(afiucv_exit);
1552 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1553 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1554 MODULE_VERSION(VERSION);
1555 MODULE_LICENSE("GPL");
1556 MODULE_ALIAS_NETPROTO(PF_IUCV);