2 * linux/net/iucv/af_iucv.c
4 * IUCV protocol stack for Linux on zSeries
6 * Copyright 2006 IBM Corporation
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
11 #define KMSG_COMPONENT "af_iucv"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
25 #include <asm/ebcdic.h>
26 #include <asm/cpcmd.h>
27 #include <linux/kmod.h>
29 #include <net/iucv/iucv.h>
30 #include <net/iucv/af_iucv.h>
34 static char iucv_userid[80];
36 static struct proto_ops iucv_sock_ops;
38 static struct proto iucv_proto = {
41 .obj_size = sizeof(struct iucv_sock),
44 /* special AF_IUCV IPRM messages */
45 static const u8 iprm_shutdown[8] =
46 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
48 static void iucv_sock_kill(struct sock *sk);
49 static void iucv_sock_close(struct sock *sk);
51 /* Call Back functions */
52 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
53 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
54 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
55 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
57 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
58 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
60 static struct iucv_sock_list iucv_sk_list = {
61 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
62 .autobind_name = ATOMIC_INIT(0)
65 static struct iucv_handler af_iucv_handler = {
66 .path_pending = iucv_callback_connreq,
67 .path_complete = iucv_callback_connack,
68 .path_severed = iucv_callback_connrej,
69 .message_pending = iucv_callback_rx,
70 .message_complete = iucv_callback_txdone,
71 .path_quiesced = iucv_callback_shutdown,
74 static inline void high_nmcpy(unsigned char *dst, char *src)
79 static inline void low_nmcpy(unsigned char *dst, char *src)
81 memcpy(&dst[8], src, 8);
85 * iucv_msg_length() - Returns the length of an iucv message.
86 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
88 * The function returns the length of the specified iucv message @msg of data
89 * stored in a buffer and of data stored in the parameter list (PRMDATA).
91 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
93 * PRMDATA[0..6] socket data (max 7 bytes);
94 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
96 * The socket data length is computed by substracting the socket data length
98 * If the socket data len is greater 7, then PRMDATA can be used for special
99 * notifications (see iucv_sock_shutdown); and further,
100 * if the socket data len is > 7, the function returns 8.
102 * Use this function to allocate socket buffers to store iucv message data.
104 static inline size_t iucv_msg_length(struct iucv_message *msg)
108 if (msg->flags & IUCV_IPRMDATA) {
109 datalen = 0xff - msg->rmmsg[7];
110 return (datalen < 8) ? datalen : 8;
116 static void iucv_sock_timeout(unsigned long arg)
118 struct sock *sk = (struct sock *)arg;
121 sk->sk_err = ETIMEDOUT;
122 sk->sk_state_change(sk);
129 static void iucv_sock_clear_timer(struct sock *sk)
131 sk_stop_timer(sk, &sk->sk_timer);
134 static struct sock *__iucv_get_sock_by_name(char *nm)
137 struct hlist_node *node;
139 sk_for_each(sk, node, &iucv_sk_list.head)
140 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
146 static void iucv_sock_destruct(struct sock *sk)
148 skb_queue_purge(&sk->sk_receive_queue);
149 skb_queue_purge(&sk->sk_write_queue);
153 static void iucv_sock_cleanup_listen(struct sock *parent)
157 /* Close non-accepted connections */
158 while ((sk = iucv_accept_dequeue(parent, NULL))) {
163 parent->sk_state = IUCV_CLOSED;
164 sock_set_flag(parent, SOCK_ZAPPED);
168 static void iucv_sock_kill(struct sock *sk)
170 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
173 iucv_sock_unlink(&iucv_sk_list, sk);
174 sock_set_flag(sk, SOCK_DEAD);
178 /* Close an IUCV socket */
179 static void iucv_sock_close(struct sock *sk)
181 unsigned char user_data[16];
182 struct iucv_sock *iucv = iucv_sk(sk);
186 iucv_sock_clear_timer(sk);
189 switch (sk->sk_state) {
191 iucv_sock_cleanup_listen(sk);
198 sk->sk_state = IUCV_CLOSING;
199 sk->sk_state_change(sk);
201 if (!skb_queue_empty(&iucv->send_skb_q)) {
202 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
203 timeo = sk->sk_lingertime;
205 timeo = IUCV_DISCONN_TIMEOUT;
206 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
209 sk->sk_state = IUCV_CLOSED;
210 sk->sk_state_change(sk);
213 low_nmcpy(user_data, iucv->src_name);
214 high_nmcpy(user_data, iucv->dst_name);
215 ASCEBC(user_data, sizeof(user_data));
216 err = iucv_path_sever(iucv->path, user_data);
217 iucv_path_free(iucv->path);
221 sk->sk_err = ECONNRESET;
222 sk->sk_state_change(sk);
224 skb_queue_purge(&iucv->send_skb_q);
225 skb_queue_purge(&iucv->backlog_skb_q);
227 sock_set_flag(sk, SOCK_ZAPPED);
231 sock_set_flag(sk, SOCK_ZAPPED);
239 static void iucv_sock_init(struct sock *sk, struct sock *parent)
242 sk->sk_type = parent->sk_type;
245 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
249 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
253 sock_init_data(sock, sk);
254 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
255 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
256 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
257 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
258 spin_lock_init(&iucv_sk(sk)->message_q.lock);
259 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
260 iucv_sk(sk)->send_tag = 0;
261 iucv_sk(sk)->flags = 0;
263 sk->sk_destruct = iucv_sock_destruct;
264 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
265 sk->sk_allocation = GFP_DMA;
267 sock_reset_flag(sk, SOCK_ZAPPED);
269 sk->sk_protocol = proto;
270 sk->sk_state = IUCV_OPEN;
272 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
274 iucv_sock_link(&iucv_sk_list, sk);
278 /* Create an IUCV socket */
279 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
283 if (sock->type != SOCK_STREAM)
284 return -ESOCKTNOSUPPORT;
286 sock->state = SS_UNCONNECTED;
287 sock->ops = &iucv_sock_ops;
289 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
293 iucv_sock_init(sk, NULL);
298 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
300 write_lock_bh(&l->lock);
301 sk_add_node(sk, &l->head);
302 write_unlock_bh(&l->lock);
305 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
307 write_lock_bh(&l->lock);
308 sk_del_node_init(sk);
309 write_unlock_bh(&l->lock);
312 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
315 struct iucv_sock *par = iucv_sk(parent);
318 spin_lock_irqsave(&par->accept_q_lock, flags);
319 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
320 spin_unlock_irqrestore(&par->accept_q_lock, flags);
321 iucv_sk(sk)->parent = parent;
322 parent->sk_ack_backlog++;
325 void iucv_accept_unlink(struct sock *sk)
328 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
330 spin_lock_irqsave(&par->accept_q_lock, flags);
331 list_del_init(&iucv_sk(sk)->accept_q);
332 spin_unlock_irqrestore(&par->accept_q_lock, flags);
333 iucv_sk(sk)->parent->sk_ack_backlog--;
334 iucv_sk(sk)->parent = NULL;
338 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
340 struct iucv_sock *isk, *n;
343 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
344 sk = (struct sock *) isk;
347 if (sk->sk_state == IUCV_CLOSED) {
348 iucv_accept_unlink(sk);
353 if (sk->sk_state == IUCV_CONNECTED ||
354 sk->sk_state == IUCV_SEVERED ||
356 iucv_accept_unlink(sk);
358 sock_graft(sk, newsock);
360 if (sk->sk_state == IUCV_SEVERED)
361 sk->sk_state = IUCV_DISCONN;
372 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
375 DECLARE_WAITQUEUE(wait, current);
378 add_wait_queue(sk->sk_sleep, &wait);
379 while (sk->sk_state != state && sk->sk_state != state2) {
380 set_current_state(TASK_INTERRUPTIBLE);
387 if (signal_pending(current)) {
388 err = sock_intr_errno(timeo);
393 timeo = schedule_timeout(timeo);
396 err = sock_error(sk);
400 set_current_state(TASK_RUNNING);
401 remove_wait_queue(sk->sk_sleep, &wait);
405 /* Bind an unbound socket */
406 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
409 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
410 struct sock *sk = sock->sk;
411 struct iucv_sock *iucv;
414 /* Verify the input sockaddr */
415 if (!addr || addr->sa_family != AF_IUCV)
419 if (sk->sk_state != IUCV_OPEN) {
424 write_lock_bh(&iucv_sk_list.lock);
427 if (__iucv_get_sock_by_name(sa->siucv_name)) {
436 /* Bind the socket */
437 memcpy(iucv->src_name, sa->siucv_name, 8);
439 /* Copy the user id */
440 memcpy(iucv->src_user_id, iucv_userid, 8);
441 sk->sk_state = IUCV_BOUND;
445 /* Release the socket list lock */
446 write_unlock_bh(&iucv_sk_list.lock);
452 /* Automatically bind an unbound socket */
453 static int iucv_sock_autobind(struct sock *sk)
455 struct iucv_sock *iucv = iucv_sk(sk);
456 char query_buffer[80];
460 /* Set the userid and name */
461 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
465 memcpy(iucv->src_user_id, query_buffer, 8);
467 write_lock_bh(&iucv_sk_list.lock);
469 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
470 while (__iucv_get_sock_by_name(name)) {
471 sprintf(name, "%08x",
472 atomic_inc_return(&iucv_sk_list.autobind_name));
475 write_unlock_bh(&iucv_sk_list.lock);
477 memcpy(&iucv->src_name, name, 8);
482 /* Connect an unconnected socket */
483 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
486 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
487 struct sock *sk = sock->sk;
488 struct iucv_sock *iucv;
489 unsigned char user_data[16];
492 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
495 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
498 if (sk->sk_type != SOCK_STREAM)
503 if (sk->sk_state == IUCV_OPEN) {
504 err = iucv_sock_autobind(sk);
511 /* Set the destination information */
512 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
513 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
515 high_nmcpy(user_data, sa->siucv_name);
516 low_nmcpy(user_data, iucv_sk(sk)->src_name);
517 ASCEBC(user_data, sizeof(user_data));
521 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
522 IUCV_IPRMDATA, GFP_KERNEL);
527 err = iucv_path_connect(iucv->path, &af_iucv_handler,
528 sa->siucv_user_id, NULL, user_data, sk);
530 iucv_path_free(iucv->path);
533 case 0x0b: /* Target communicator is not logged on */
536 case 0x0d: /* Max connections for this guest exceeded */
537 case 0x0e: /* Max connections for target guest exceeded */
540 case 0x0f: /* Missing IUCV authorization */
550 if (sk->sk_state != IUCV_CONNECTED) {
551 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
552 sock_sndtimeo(sk, flags & O_NONBLOCK));
555 if (sk->sk_state == IUCV_DISCONN) {
560 iucv_path_sever(iucv->path, NULL);
561 iucv_path_free(iucv->path);
570 /* Move a socket into listening state. */
571 static int iucv_sock_listen(struct socket *sock, int backlog)
573 struct sock *sk = sock->sk;
579 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
582 sk->sk_max_ack_backlog = backlog;
583 sk->sk_ack_backlog = 0;
584 sk->sk_state = IUCV_LISTEN;
592 /* Accept a pending connection */
593 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
596 DECLARE_WAITQUEUE(wait, current);
597 struct sock *sk = sock->sk, *nsk;
601 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
603 if (sk->sk_state != IUCV_LISTEN) {
608 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
610 /* Wait for an incoming connection */
611 add_wait_queue_exclusive(sk->sk_sleep, &wait);
612 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
613 set_current_state(TASK_INTERRUPTIBLE);
620 timeo = schedule_timeout(timeo);
621 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
623 if (sk->sk_state != IUCV_LISTEN) {
628 if (signal_pending(current)) {
629 err = sock_intr_errno(timeo);
634 set_current_state(TASK_RUNNING);
635 remove_wait_queue(sk->sk_sleep, &wait);
640 newsock->state = SS_CONNECTED;
647 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
650 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
651 struct sock *sk = sock->sk;
653 addr->sa_family = AF_IUCV;
654 *len = sizeof(struct sockaddr_iucv);
657 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
658 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
660 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
661 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
663 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
664 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
665 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
671 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
673 * @msg: Pointer to a struct iucv_message
674 * @skb: The socket data to send, skb->len MUST BE <= 7
676 * Send the socket data in the parameter list in the iucv message
677 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
678 * list and the socket data len at index 7 (last byte).
679 * See also iucv_msg_length().
681 * Returns the error code from the iucv_message_send() call.
683 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
688 memcpy(prmdata, (void *) skb->data, skb->len);
689 prmdata[7] = 0xff - (u8) skb->len;
690 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
691 (void *) prmdata, 8);
694 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
695 struct msghdr *msg, size_t len)
697 struct sock *sk = sock->sk;
698 struct iucv_sock *iucv = iucv_sk(sk);
700 struct iucv_message txmsg;
705 err = sock_error(sk);
709 if (msg->msg_flags & MSG_OOB)
714 if (sk->sk_shutdown & SEND_SHUTDOWN) {
719 if (sk->sk_state == IUCV_CONNECTED) {
720 if (!(skb = sock_alloc_send_skb(sk, len,
721 msg->msg_flags & MSG_DONTWAIT,
725 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
731 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
732 txmsg.tag = iucv->send_tag++;
733 memcpy(skb->cb, &txmsg.tag, 4);
734 skb_queue_tail(&iucv->send_skb_q, skb);
736 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
738 err = iucv_send_iprm(iucv->path, &txmsg, skb);
740 /* on success: there is no message_complete callback
741 * for an IPRMDATA msg; remove skb from send queue */
743 skb_unlink(skb, &iucv->send_skb_q);
747 /* this error should never happen since the
748 * IUCV_IPRMDATA path flag is set... sever path */
750 iucv_path_sever(iucv->path, NULL);
751 skb_unlink(skb, &iucv->send_skb_q);
756 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
757 (void *) skb->data, skb->len);
761 memcpy(user_id, iucv->dst_user_id, 8);
763 memcpy(appl_id, iucv->dst_name, 8);
764 pr_err("Application %s on z/VM guest %s"
765 " exceeds message limit\n",
768 skb_unlink(skb, &iucv->send_skb_q);
788 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
790 int dataleft, size, copied = 0;
791 struct sk_buff *nskb;
795 if (dataleft >= sk->sk_rcvbuf / 4)
796 size = sk->sk_rcvbuf / 4;
800 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
804 memcpy(nskb->data, skb->data + copied, size);
808 skb_reset_transport_header(nskb);
809 skb_reset_network_header(nskb);
812 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
818 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
819 struct iucv_path *path,
820 struct iucv_message *msg)
825 len = iucv_msg_length(msg);
827 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
828 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
829 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
834 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
835 skb->data, len, NULL);
840 if (skb->truesize >= sk->sk_rcvbuf / 4) {
841 rc = iucv_fragment_skb(sk, skb, len);
845 iucv_path_sever(path, NULL);
848 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
850 skb_reset_transport_header(skb);
851 skb_reset_network_header(skb);
856 if (sock_queue_rcv_skb(sk, skb))
857 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
860 static void iucv_process_message_q(struct sock *sk)
862 struct iucv_sock *iucv = iucv_sk(sk);
864 struct sock_msg_q *p, *n;
866 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
867 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
870 iucv_process_message(sk, skb, p->path, &p->msg);
873 if (!skb_queue_empty(&iucv->backlog_skb_q))
878 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
879 struct msghdr *msg, size_t len, int flags)
881 int noblock = flags & MSG_DONTWAIT;
882 struct sock *sk = sock->sk;
883 struct iucv_sock *iucv = iucv_sk(sk);
884 int target, copied = 0;
885 struct sk_buff *skb, *rskb, *cskb;
888 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
889 skb_queue_empty(&iucv->backlog_skb_q) &&
890 skb_queue_empty(&sk->sk_receive_queue) &&
891 list_empty(&iucv->message_q.list))
894 if (flags & (MSG_OOB))
897 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
899 skb = skb_recv_datagram(sk, flags, noblock, &err);
901 if (sk->sk_shutdown & RCV_SHUTDOWN)
906 copied = min_t(unsigned int, skb->len, len);
909 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
910 skb_queue_head(&sk->sk_receive_queue, skb);
918 /* Mark read part of skb as used */
919 if (!(flags & MSG_PEEK)) {
920 skb_pull(skb, copied);
923 skb_queue_head(&sk->sk_receive_queue, skb);
929 /* Queue backlog skbs */
930 rskb = skb_dequeue(&iucv->backlog_skb_q);
932 if (sock_queue_rcv_skb(sk, rskb)) {
933 skb_queue_head(&iucv->backlog_skb_q,
937 rskb = skb_dequeue(&iucv->backlog_skb_q);
940 if (skb_queue_empty(&iucv->backlog_skb_q)) {
941 spin_lock_bh(&iucv->message_q.lock);
942 if (!list_empty(&iucv->message_q.list))
943 iucv_process_message_q(sk);
944 spin_unlock_bh(&iucv->message_q.lock);
948 skb_queue_head(&sk->sk_receive_queue, skb);
951 return err ? : copied;
954 static inline unsigned int iucv_accept_poll(struct sock *parent)
956 struct iucv_sock *isk, *n;
959 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
960 sk = (struct sock *) isk;
962 if (sk->sk_state == IUCV_CONNECTED)
963 return POLLIN | POLLRDNORM;
969 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
972 struct sock *sk = sock->sk;
973 unsigned int mask = 0;
975 poll_wait(file, sk->sk_sleep, wait);
977 if (sk->sk_state == IUCV_LISTEN)
978 return iucv_accept_poll(sk);
980 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
983 if (sk->sk_shutdown & RCV_SHUTDOWN)
986 if (sk->sk_shutdown == SHUTDOWN_MASK)
989 if (!skb_queue_empty(&sk->sk_receive_queue) ||
990 (sk->sk_shutdown & RCV_SHUTDOWN))
991 mask |= POLLIN | POLLRDNORM;
993 if (sk->sk_state == IUCV_CLOSED)
996 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
999 if (sock_writeable(sk))
1000 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1002 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1007 static int iucv_sock_shutdown(struct socket *sock, int how)
1009 struct sock *sk = sock->sk;
1010 struct iucv_sock *iucv = iucv_sk(sk);
1011 struct iucv_message txmsg;
1016 if ((how & ~SHUTDOWN_MASK) || !how)
1020 switch (sk->sk_state) {
1026 sk->sk_shutdown |= how;
1030 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1033 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
1034 (void *) iprm_shutdown, 8);
1050 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1051 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
1055 skb_queue_purge(&sk->sk_receive_queue);
1058 /* Wake up anyone sleeping in poll */
1059 sk->sk_state_change(sk);
1066 static int iucv_sock_release(struct socket *sock)
1068 struct sock *sk = sock->sk;
1074 iucv_sock_close(sk);
1076 /* Unregister with IUCV base support */
1077 if (iucv_sk(sk)->path) {
1078 iucv_path_sever(iucv_sk(sk)->path, NULL);
1079 iucv_path_free(iucv_sk(sk)->path);
1080 iucv_sk(sk)->path = NULL;
1088 /* getsockopt and setsockopt */
1089 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1090 char __user *optval, int optlen)
1092 struct sock *sk = sock->sk;
1093 struct iucv_sock *iucv = iucv_sk(sk);
1097 if (level != SOL_IUCV)
1098 return -ENOPROTOOPT;
1100 if (optlen < sizeof(int))
1103 if (get_user(val, (int __user *) optval))
1110 case SO_IPRMDATA_MSG:
1112 iucv->flags |= IUCV_IPRMDATA;
1114 iucv->flags &= ~IUCV_IPRMDATA;
1125 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1126 char __user *optval, int __user *optlen)
1128 struct sock *sk = sock->sk;
1129 struct iucv_sock *iucv = iucv_sk(sk);
1132 if (level != SOL_IUCV)
1133 return -ENOPROTOOPT;
1135 if (get_user(len, optlen))
1141 len = min_t(unsigned int, len, sizeof(int));
1144 case SO_IPRMDATA_MSG:
1145 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1148 return -ENOPROTOOPT;
1151 if (put_user(len, optlen))
1153 if (copy_to_user(optval, &val, len))
1160 /* Callback wrappers - called from iucv base support */
1161 static int iucv_callback_connreq(struct iucv_path *path,
1162 u8 ipvmid[8], u8 ipuser[16])
1164 unsigned char user_data[16];
1165 unsigned char nuser_data[16];
1166 unsigned char src_name[8];
1167 struct hlist_node *node;
1168 struct sock *sk, *nsk;
1169 struct iucv_sock *iucv, *niucv;
1172 memcpy(src_name, ipuser, 8);
1173 EBCASC(src_name, 8);
1174 /* Find out if this path belongs to af_iucv. */
1175 read_lock(&iucv_sk_list.lock);
1178 sk_for_each(sk, node, &iucv_sk_list.head)
1179 if (sk->sk_state == IUCV_LISTEN &&
1180 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1182 * Found a listening socket with
1183 * src_name == ipuser[0-7].
1188 read_unlock(&iucv_sk_list.lock);
1190 /* No socket found, not one of our paths. */
1195 /* Check if parent socket is listening */
1196 low_nmcpy(user_data, iucv->src_name);
1197 high_nmcpy(user_data, iucv->dst_name);
1198 ASCEBC(user_data, sizeof(user_data));
1199 if (sk->sk_state != IUCV_LISTEN) {
1200 err = iucv_path_sever(path, user_data);
1201 iucv_path_free(path);
1205 /* Check for backlog size */
1206 if (sk_acceptq_is_full(sk)) {
1207 err = iucv_path_sever(path, user_data);
1208 iucv_path_free(path);
1212 /* Create the new socket */
1213 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
1215 err = iucv_path_sever(path, user_data);
1216 iucv_path_free(path);
1220 niucv = iucv_sk(nsk);
1221 iucv_sock_init(nsk, sk);
1223 /* Set the new iucv_sock */
1224 memcpy(niucv->dst_name, ipuser + 8, 8);
1225 EBCASC(niucv->dst_name, 8);
1226 memcpy(niucv->dst_user_id, ipvmid, 8);
1227 memcpy(niucv->src_name, iucv->src_name, 8);
1228 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1231 /* Call iucv_accept */
1232 high_nmcpy(nuser_data, ipuser + 8);
1233 memcpy(nuser_data + 8, niucv->src_name, 8);
1234 ASCEBC(nuser_data + 8, 8);
1236 path->msglim = IUCV_QUEUELEN_DEFAULT;
1237 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1239 err = iucv_path_sever(path, user_data);
1240 iucv_path_free(path);
1241 iucv_sock_kill(nsk);
1245 iucv_accept_enqueue(sk, nsk);
1247 /* Wake up accept */
1248 nsk->sk_state = IUCV_CONNECTED;
1249 sk->sk_data_ready(sk, 1);
1256 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1258 struct sock *sk = path->private;
1260 sk->sk_state = IUCV_CONNECTED;
1261 sk->sk_state_change(sk);
1264 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1266 struct sock *sk = path->private;
1267 struct iucv_sock *iucv = iucv_sk(sk);
1268 struct sk_buff *skb;
1269 struct sock_msg_q *save_msg;
1272 if (sk->sk_shutdown & RCV_SHUTDOWN)
1275 if (!list_empty(&iucv->message_q.list) ||
1276 !skb_queue_empty(&iucv->backlog_skb_q))
1279 len = atomic_read(&sk->sk_rmem_alloc);
1280 len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1281 if (len > sk->sk_rcvbuf)
1284 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1288 spin_lock(&iucv->message_q.lock);
1289 iucv_process_message(sk, skb, path, msg);
1290 spin_unlock(&iucv->message_q.lock);
1295 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1298 save_msg->path = path;
1299 save_msg->msg = *msg;
1301 spin_lock(&iucv->message_q.lock);
1302 list_add_tail(&save_msg->list, &iucv->message_q.list);
1303 spin_unlock(&iucv->message_q.lock);
1306 static void iucv_callback_txdone(struct iucv_path *path,
1307 struct iucv_message *msg)
1309 struct sock *sk = path->private;
1310 struct sk_buff *this = NULL;
1311 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1312 struct sk_buff *list_skb = list->next;
1313 unsigned long flags;
1315 if (!skb_queue_empty(list)) {
1316 spin_lock_irqsave(&list->lock, flags);
1318 while (list_skb != (struct sk_buff *)list) {
1319 if (!memcmp(&msg->tag, list_skb->cb, 4)) {
1323 list_skb = list_skb->next;
1326 __skb_unlink(this, list);
1328 spin_unlock_irqrestore(&list->lock, flags);
1334 if (sk->sk_state == IUCV_CLOSING) {
1335 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1336 sk->sk_state = IUCV_CLOSED;
1337 sk->sk_state_change(sk);
1343 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1345 struct sock *sk = path->private;
1347 if (!list_empty(&iucv_sk(sk)->accept_q))
1348 sk->sk_state = IUCV_SEVERED;
1350 sk->sk_state = IUCV_DISCONN;
1352 sk->sk_state_change(sk);
1355 /* called if the other communication side shuts down its RECV direction;
1356 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1358 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1360 struct sock *sk = path->private;
1363 if (sk->sk_state != IUCV_CLOSED) {
1364 sk->sk_shutdown |= SEND_SHUTDOWN;
1365 sk->sk_state_change(sk);
1370 static struct proto_ops iucv_sock_ops = {
1372 .owner = THIS_MODULE,
1373 .release = iucv_sock_release,
1374 .bind = iucv_sock_bind,
1375 .connect = iucv_sock_connect,
1376 .listen = iucv_sock_listen,
1377 .accept = iucv_sock_accept,
1378 .getname = iucv_sock_getname,
1379 .sendmsg = iucv_sock_sendmsg,
1380 .recvmsg = iucv_sock_recvmsg,
1381 .poll = iucv_sock_poll,
1382 .ioctl = sock_no_ioctl,
1383 .mmap = sock_no_mmap,
1384 .socketpair = sock_no_socketpair,
1385 .shutdown = iucv_sock_shutdown,
1386 .setsockopt = iucv_sock_setsockopt,
1387 .getsockopt = iucv_sock_getsockopt,
1390 static struct net_proto_family iucv_sock_family_ops = {
1392 .owner = THIS_MODULE,
1393 .create = iucv_sock_create,
1396 static int __init afiucv_init(void)
1400 if (!MACHINE_IS_VM) {
1401 pr_err("The af_iucv module cannot be loaded"
1403 err = -EPROTONOSUPPORT;
1406 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1407 if (unlikely(err)) {
1409 err = -EPROTONOSUPPORT;
1413 err = iucv_register(&af_iucv_handler, 0);
1416 err = proto_register(&iucv_proto, 0);
1419 err = sock_register(&iucv_sock_family_ops);
1425 proto_unregister(&iucv_proto);
1427 iucv_unregister(&af_iucv_handler, 0);
1432 static void __exit afiucv_exit(void)
1434 sock_unregister(PF_IUCV);
1435 proto_unregister(&iucv_proto);
1436 iucv_unregister(&af_iucv_handler, 0);
1439 module_init(afiucv_init);
1440 module_exit(afiucv_exit);
1442 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1443 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1444 MODULE_VERSION(VERSION);
1445 MODULE_LICENSE("GPL");
1446 MODULE_ALIAS_NETPROTO(PF_IUCV);