Merge git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[cascardo/linux.git] / net / iucv / iucv.c
index 60f2938..b733306 100644 (file)
@@ -32,7 +32,6 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-
 #include <linux/spinlock.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -69,7 +68,7 @@
 #define IUCV_IPNORPY   0x10
 #define IUCV_IPALL     0x80
 
-static int iucv_bus_match (struct device *dev, struct device_driver *drv)
+static int iucv_bus_match(struct device *dev, struct device_driver *drv)
 {
        return 0;
 }
@@ -78,8 +77,11 @@ struct bus_type iucv_bus = {
        .name = "iucv",
        .match = iucv_bus_match,
 };
+EXPORT_SYMBOL(iucv_bus);
 
 struct device *iucv_root;
+EXPORT_SYMBOL(iucv_root);
+
 static int iucv_available;
 
 /* General IUCV interrupt structure */
@@ -90,20 +92,43 @@ struct iucv_irq_data {
        u32 res2[8];
 };
 
-struct iucv_work {
+struct iucv_irq_list {
        struct list_head list;
        struct iucv_irq_data data;
 };
 
-static LIST_HEAD(iucv_work_queue);
-static DEFINE_SPINLOCK(iucv_work_lock);
-
 static struct iucv_irq_data *iucv_irq_data;
 static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;
 static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;
 
-static void iucv_tasklet_handler(unsigned long);
-static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_handler,0);
+/*
+ * Queue of interrupt buffers lock for delivery via the tasklet
+ * (fast but can't call smp_call_function).
+ */
+static LIST_HEAD(iucv_task_queue);
+
+/*
+ * The tasklet for fast delivery of iucv interrupts.
+ */
+static void iucv_tasklet_fn(unsigned long);
+static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0);
+
+/*
+ * Queue of interrupt buffers for delivery via a work queue
+ * (slower but can call smp_call_function).
+ */
+static LIST_HEAD(iucv_work_queue);
+
+/*
+ * The work element to deliver path pending interrupts.
+ */
+static void iucv_work_fn(struct work_struct *work);
+static DECLARE_WORK(iucv_work, iucv_work_fn);
+
+/*
+ * Spinlock protecting task and work queue.
+ */
+static DEFINE_SPINLOCK(iucv_queue_lock);
 
 enum iucv_command_codes {
        IUCV_QUERY = 0,
@@ -147,10 +172,10 @@ static unsigned long iucv_max_pathid;
 static DEFINE_SPINLOCK(iucv_table_lock);
 
 /*
- * iucv_tasklet_cpu: contains the number of the cpu executing the tasklet.
- * Needed for iucv_path_sever called from tasklet.
+ * iucv_active_cpu: contains the number of the cpu executing the tasklet
+ * or the work handler. Needed for iucv_path_sever called from tasklet.
  */
-static int iucv_tasklet_cpu = -1;
+static int iucv_active_cpu = -1;
 
 /*
  * Mutex and wait queue for iucv_register/iucv_unregister.
@@ -382,7 +407,7 @@ static void iucv_declare_cpu(void *data)
        rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
        if (rc) {
                char *err = "Unknown";
-               switch(rc) {
+               switch (rc) {
                case 0x03:
                        err = "Directory error";
                        break;
@@ -449,17 +474,19 @@ static void iucv_setmask_mp(void)
 {
        int cpu;
 
+       preempt_disable();
        for_each_online_cpu(cpu)
                /* Enable all cpus with a declared buffer. */
                if (cpu_isset(cpu, iucv_buffer_cpumask) &&
                    !cpu_isset(cpu, iucv_irq_cpumask))
                        smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu);
+       preempt_enable();
 }
 
 /**
  * iucv_setmask_up
  *
- * Allow iucv interrupts on a single cpus.
+ * Allow iucv interrupts on a single cpu.
  */
 static void iucv_setmask_up(void)
 {
@@ -493,8 +520,10 @@ static int iucv_enable(void)
                goto out;
        /* Declare per cpu buffers. */
        rc = -EIO;
+       preempt_disable();
        for_each_online_cpu(cpu)
                smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
+       preempt_enable();
        if (cpus_empty(iucv_buffer_cpumask))
                /* No cpu could declare an iucv buffer. */
                goto out_path;
@@ -527,6 +556,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
 
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                if (!percpu_populate(iucv_irq_data,
                                     sizeof(struct iucv_irq_data),
                                     GFP_KERNEL|GFP_DMA, cpu))
@@ -538,15 +568,20 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
                }
                break;
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                percpu_depopulate(iucv_param, cpu);
                percpu_depopulate(iucv_irq_data, cpu);
                break;
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
        case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
                smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
                break;
        case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
                cpumask = iucv_buffer_cpumask;
                cpu_clear(cpu, cpumask);
                if (cpus_empty(cpumask))
@@ -561,7 +596,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
-static struct notifier_block iucv_cpu_notifier = {
+static struct notifier_block __cpuinitdata iucv_cpu_notifier = {
        .notifier_call = iucv_cpu_notify,
 };
 
@@ -584,48 +619,49 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
        return iucv_call_b2f0(IUCV_SEVER, parm);
 }
 
+#ifdef CONFIG_SMP
 /**
- * __iucv_cleanup_pathid
+ * __iucv_cleanup_queue
  * @dummy: unused dummy argument
  *
  * Nop function called via smp_call_function to force work items from
  * pending external iucv interrupts to the work queue.
  */
-static void __iucv_cleanup_pathid(void *dummy)
+static void __iucv_cleanup_queue(void *dummy)
 {
 }
+#endif
 
 /**
- * iucv_cleanup_pathid
- * @pathid: 16 bit pathid
+ * iucv_cleanup_queue
  *
  * Function called after a path has been severed to find all remaining
  * work items for the now stale pathid. The caller needs to hold the
  * iucv_table_lock.
  */
-static void iucv_cleanup_pathid(u16 pathid)
+static void iucv_cleanup_queue(void)
 {
-       struct iucv_work *p, *n;
+       struct iucv_irq_list *p, *n;
 
        /*
-        * Path is severed, the pathid can be reused immediatly on
-        * a iucv connect or a connection pending interrupt.
-        * iucv_path_connect and connection pending interrupt will
-        * wait until the iucv_table_lock is released before the
-        * recycled pathid enters the system.
-        * Force remaining interrupts to the work queue, then
-        * scan the work queue for items of this path.
+        * When a path is severed, the pathid can be reused immediatly
+        * on a iucv connect or a connection pending interrupt. Remove
+        * all entries from the task queue that refer to a stale pathid
+        * (iucv_path_table[ix] == NULL). Only then do the iucv connect
+        * or deliver the connection pending interrupt. To get all the
+        * pending interrupts force them to the work queue by calling
+        * an empty function on all cpus.
         */
-       smp_call_function(__iucv_cleanup_pathid, NULL, 0, 1);
-       spin_lock_irq(&iucv_work_lock);
-       list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
-               /* Remove work items for pathid except connection pending */
-               if (p->data.ippathid == pathid && p->data.iptype != 0x01) {
+       smp_call_function(__iucv_cleanup_queue, NULL, 0, 1);
+       spin_lock_irq(&iucv_queue_lock);
+       list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
+               /* Remove stale work items from the task queue. */
+               if (iucv_path_table[p->data.ippathid] == NULL) {
                        list_del(&p->list);
                        kfree(p);
                }
        }
-       spin_unlock_irq(&iucv_work_lock);
+       spin_unlock_irq(&iucv_queue_lock);
 }
 
 /**
@@ -663,6 +699,7 @@ out_mutex:
        mutex_unlock(&iucv_register_mutex);
        return rc;
 }
+EXPORT_SYMBOL(iucv_register);
 
 /**
  * iucv_unregister
@@ -684,7 +721,6 @@ void iucv_unregister(struct iucv_handler *handler, int smp)
                iucv_sever_pathid(p->pathid, NULL);
                iucv_path_table[p->pathid] = NULL;
                list_del(&p->list);
-               iucv_cleanup_pathid(p->pathid);
                iucv_path_free(p);
        }
        spin_unlock_bh(&iucv_table_lock);
@@ -696,6 +732,7 @@ void iucv_unregister(struct iucv_handler *handler, int smp)
                iucv_setmask_mp();
        mutex_unlock(&iucv_register_mutex);
 }
+EXPORT_SYMBOL(iucv_unregister);
 
 /**
  * iucv_path_accept
@@ -734,6 +771,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
        local_bh_enable();
        return rc;
 }
+EXPORT_SYMBOL(iucv_path_accept);
 
 /**
  * iucv_path_connect
@@ -757,9 +795,9 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
        union iucv_param *parm;
        int rc;
 
-       preempt_disable();
-       if (iucv_tasklet_cpu != smp_processor_id())
-               spin_lock_bh(&iucv_table_lock);
+       BUG_ON(in_atomic());
+       spin_lock_bh(&iucv_table_lock);
+       iucv_cleanup_queue();
        parm = percpu_ptr(iucv_param, smp_processor_id());
        memset(parm, 0, sizeof(union iucv_param));
        parm->ctrl.ipmsglim = path->msglim;
@@ -794,11 +832,10 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
                        rc = -EIO;
                }
        }
-       if (iucv_tasklet_cpu != smp_processor_id())
-               spin_unlock_bh(&iucv_table_lock);
-       preempt_enable();
+       spin_unlock_bh(&iucv_table_lock);
        return rc;
 }
+EXPORT_SYMBOL(iucv_path_connect);
 
 /**
  * iucv_path_quiesce:
@@ -825,6 +862,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
        local_bh_enable();
        return rc;
 }
+EXPORT_SYMBOL(iucv_path_quiesce);
 
 /**
  * iucv_path_resume:
@@ -865,21 +903,20 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
 {
        int rc;
 
-
        preempt_disable();
-       if (iucv_tasklet_cpu != smp_processor_id())
+       if (iucv_active_cpu != smp_processor_id())
                spin_lock_bh(&iucv_table_lock);
        rc = iucv_sever_pathid(path->pathid, userdata);
        if (!rc) {
                iucv_path_table[path->pathid] = NULL;
                list_del_init(&path->list);
-               iucv_cleanup_pathid(path->pathid);
        }
-       if (iucv_tasklet_cpu != smp_processor_id())
+       if (iucv_active_cpu != smp_processor_id())
                spin_unlock_bh(&iucv_table_lock);
        preempt_enable();
        return rc;
 }
+EXPORT_SYMBOL(iucv_path_sever);
 
 /**
  * iucv_message_purge
@@ -912,6 +949,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
        local_bh_enable();
        return rc;
 }
+EXPORT_SYMBOL(iucv_message_purge);
 
 /**
  * iucv_message_receive
@@ -982,6 +1020,7 @@ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
        local_bh_enable();
        return rc;
 }
+EXPORT_SYMBOL(iucv_message_receive);
 
 /**
  * iucv_message_reject
@@ -1010,6 +1049,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
        local_bh_enable();
        return rc;
 }
+EXPORT_SYMBOL(iucv_message_reject);
 
 /**
  * iucv_message_reply
@@ -1053,6 +1093,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
        local_bh_enable();
        return rc;
 }
+EXPORT_SYMBOL(iucv_message_reply);
 
 /**
  * iucv_message_send
@@ -1101,6 +1142,7 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
        local_bh_enable();
        return rc;
 }
+EXPORT_SYMBOL(iucv_message_send);
 
 /**
  * iucv_message_send2way
@@ -1157,6 +1199,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
        local_bh_enable();
        return rc;
 }
+EXPORT_SYMBOL(iucv_message_send2way);
 
 /**
  * iucv_path_pending
@@ -1244,8 +1287,7 @@ static void iucv_path_complete(struct iucv_irq_data *data)
        struct iucv_path_complete *ipc = (void *) data;
        struct iucv_path *path = iucv_path_table[ipc->ippathid];
 
-       BUG_ON(!path || !path->handler);
-       if (path->handler->path_complete)
+       if (path && path->handler && path->handler->path_complete)
                path->handler->path_complete(path, ipc->ipuser);
 }
 
@@ -1273,14 +1315,14 @@ static void iucv_path_severed(struct iucv_irq_data *data)
        struct iucv_path_severed *ips = (void *) data;
        struct iucv_path *path = iucv_path_table[ips->ippathid];
 
-       BUG_ON(!path || !path->handler);
+       if (!path || !path->handler)    /* Already severed */
+               return;
        if (path->handler->path_severed)
                path->handler->path_severed(path, ips->ipuser);
        else {
                iucv_sever_pathid(path->pathid, NULL);
                iucv_path_table[path->pathid] = NULL;
                list_del_init(&path->list);
-               iucv_cleanup_pathid(path->pathid);
                iucv_path_free(path);
        }
 }
@@ -1309,8 +1351,7 @@ static void iucv_path_quiesced(struct iucv_irq_data *data)
        struct iucv_path_quiesced *ipq = (void *) data;
        struct iucv_path *path = iucv_path_table[ipq->ippathid];
 
-       BUG_ON(!path || !path->handler);
-       if (path->handler->path_quiesced)
+       if (path && path->handler && path->handler->path_quiesced)
                path->handler->path_quiesced(path, ipq->ipuser);
 }
 
@@ -1338,8 +1379,7 @@ static void iucv_path_resumed(struct iucv_irq_data *data)
        struct iucv_path_resumed *ipr = (void *) data;
        struct iucv_path *path = iucv_path_table[ipr->ippathid];
 
-       BUG_ON(!path || !path->handler);
-       if (path->handler->path_resumed)
+       if (path && path->handler && path->handler->path_resumed)
                path->handler->path_resumed(path, ipr->ipuser);
 }
 
@@ -1371,8 +1411,7 @@ static void iucv_message_complete(struct iucv_irq_data *data)
        struct iucv_path *path = iucv_path_table[imc->ippathid];
        struct iucv_message msg;
 
-       BUG_ON(!path || !path->handler);
-       if (path->handler->message_complete) {
+       if (path && path->handler && path->handler->message_complete) {
                msg.flags = imc->ipflags1;
                msg.id = imc->ipmsgid;
                msg.audit = imc->ipaudit;
@@ -1417,8 +1456,7 @@ static void iucv_message_pending(struct iucv_irq_data *data)
        struct iucv_path *path = iucv_path_table[imp->ippathid];
        struct iucv_message msg;
 
-       BUG_ON(!path || !path->handler);
-       if (path->handler->message_pending) {
+       if (path && path->handler && path->handler->message_pending) {
                msg.flags = imp->ipflags1;
                msg.id = imp->ipmsgid;
                msg.class = imp->iptrgcls;
@@ -1433,17 +1471,16 @@ static void iucv_message_pending(struct iucv_irq_data *data)
 }
 
 /**
- * iucv_tasklet_handler:
+ * iucv_tasklet_fn:
  *
  * This tasklet loops over the queue of irq buffers created by
  * iucv_external_interrupt, calls the appropriate action handler
  * and then frees the buffer.
  */
-static void iucv_tasklet_handler(unsigned long ignored)
+static void iucv_tasklet_fn(unsigned long ignored)
 {
        typedef void iucv_irq_fn(struct iucv_irq_data *);
        static iucv_irq_fn *irq_fn[] = {
-               [0x01] = iucv_path_pending,
                [0x02] = iucv_path_complete,
                [0x03] = iucv_path_severed,
                [0x04] = iucv_path_quiesced,
@@ -1453,38 +1490,70 @@ static void iucv_tasklet_handler(unsigned long ignored)
                [0x08] = iucv_message_pending,
                [0x09] = iucv_message_pending,
        };
-       struct iucv_work *p;
+       struct list_head task_queue = LIST_HEAD_INIT(task_queue);
+       struct iucv_irq_list *p, *n;
 
        /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
        spin_lock(&iucv_table_lock);
-       iucv_tasklet_cpu = smp_processor_id();
+       iucv_active_cpu = smp_processor_id();
+
+       spin_lock_irq(&iucv_queue_lock);
+       list_splice_init(&iucv_task_queue, &task_queue);
+       spin_unlock_irq(&iucv_queue_lock);
 
-       spin_lock_irq(&iucv_work_lock);
-       while (!list_empty(&iucv_work_queue)) {
-               p = list_entry(iucv_work_queue.next, struct iucv_work, list);
+       list_for_each_entry_safe(p, n, &task_queue, list) {
                list_del_init(&p->list);
-               spin_unlock_irq(&iucv_work_lock);
                irq_fn[p->data.iptype](&p->data);
                kfree(p);
-               spin_lock_irq(&iucv_work_lock);
        }
-       spin_unlock_irq(&iucv_work_lock);
 
-       iucv_tasklet_cpu = -1;
+       iucv_active_cpu = -1;
        spin_unlock(&iucv_table_lock);
 }
 
+/**
+ * iucv_work_fn:
+ *
+ * This work function loops over the queue of path pending irq blocks
+ * created by iucv_external_interrupt, calls the appropriate action
+ * handler and then frees the buffer.
+ */
+static void iucv_work_fn(struct work_struct *work)
+{
+       typedef void iucv_irq_fn(struct iucv_irq_data *);
+       struct list_head work_queue = LIST_HEAD_INIT(work_queue);
+       struct iucv_irq_list *p, *n;
+
+       /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
+       spin_lock_bh(&iucv_table_lock);
+       iucv_active_cpu = smp_processor_id();
+
+       spin_lock_irq(&iucv_queue_lock);
+       list_splice_init(&iucv_work_queue, &work_queue);
+       spin_unlock_irq(&iucv_queue_lock);
+
+       iucv_cleanup_queue();
+       list_for_each_entry_safe(p, n, &work_queue, list) {
+               list_del_init(&p->list);
+               iucv_path_pending(&p->data);
+               kfree(p);
+       }
+
+       iucv_active_cpu = -1;
+       spin_unlock_bh(&iucv_table_lock);
+}
+
 /**
  * iucv_external_interrupt
  * @code: irq code
  *
  * Handles external interrupts coming in from CP.
- * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
+ * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
  */
 static void iucv_external_interrupt(u16 code)
 {
        struct iucv_irq_data *p;
-       struct iucv_work *work;
+       struct iucv_irq_list *work;
 
        p = percpu_ptr(iucv_irq_data, smp_processor_id());
        if (p->ippathid >= iucv_max_pathid) {
@@ -1498,16 +1567,23 @@ static void iucv_external_interrupt(u16 code)
                printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n");
                return;
        }
-       work = kmalloc(sizeof(struct iucv_work), GFP_ATOMIC);
+       work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC);
        if (!work) {
                printk(KERN_WARNING "iucv_external_interrupt: out of memory\n");
                return;
        }
        memcpy(&work->data, p, sizeof(work->data));
-       spin_lock(&iucv_work_lock);
-       list_add_tail(&work->list, &iucv_work_queue);
-       spin_unlock(&iucv_work_lock);
-       tasklet_schedule(&iucv_tasklet);
+       spin_lock(&iucv_queue_lock);
+       if (p->iptype == 0x01) {
+               /* Path pending interrupt. */
+               list_add_tail(&work->list, &iucv_work_queue);
+               schedule_work(&iucv_work);
+       } else {
+               /* The other interrupts. */
+               list_add_tail(&work->list, &iucv_task_queue);
+               tasklet_schedule(&iucv_tasklet);
+       }
+       spin_unlock(&iucv_queue_lock);
 }
 
 /**
@@ -1515,7 +1591,7 @@ static void iucv_external_interrupt(u16 code)
  *
  * Allocates and initializes various data structures.
  */
-static int iucv_init(void)
+static int __init iucv_init(void)
 {
        int rc;
 
@@ -1526,7 +1602,7 @@ static int iucv_init(void)
        rc = iucv_query_maxconn();
        if (rc)
                goto out;
-       rc = register_external_interrupt (0x4000, iucv_external_interrupt);
+       rc = register_external_interrupt(0x4000, iucv_external_interrupt);
        if (rc)
                goto out;
        rc = bus_register(&iucv_bus);
@@ -1537,7 +1613,7 @@ static int iucv_init(void)
                rc = PTR_ERR(iucv_root);
                goto out_bus;
        }
-       /* Note: GFP_DMA used used to get memory below 2G */
+       /* Note: GFP_DMA used to get memory below 2G */
        iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data),
                                     GFP_KERNEL|GFP_DMA);
        if (!iucv_irq_data) {
@@ -1575,14 +1651,16 @@ out:
  *
  * Frees everything allocated from iucv_init.
  */
-static void iucv_exit(void)
+static void __exit iucv_exit(void)
 {
-       struct iucv_work *p, *n;
+       struct iucv_irq_list *p, *n;
 
-       spin_lock_irq(&iucv_work_lock);
+       spin_lock_irq(&iucv_queue_lock);
+       list_for_each_entry_safe(p, n, &iucv_task_queue, list)
+               kfree(p);
        list_for_each_entry_safe(p, n, &iucv_work_queue, list)
                kfree(p);
-       spin_unlock_irq(&iucv_work_lock);
+       spin_unlock_irq(&iucv_queue_lock);
        unregister_hotcpu_notifier(&iucv_cpu_notifier);
        percpu_free(iucv_param);
        percpu_free(iucv_irq_data);
@@ -1594,24 +1672,6 @@ static void iucv_exit(void)
 subsys_initcall(iucv_init);
 module_exit(iucv_exit);
 
-/**
- * Export all public stuff
- */
-EXPORT_SYMBOL (iucv_bus);
-EXPORT_SYMBOL (iucv_root);
-EXPORT_SYMBOL (iucv_register);
-EXPORT_SYMBOL (iucv_unregister);
-EXPORT_SYMBOL (iucv_path_accept);
-EXPORT_SYMBOL (iucv_path_connect);
-EXPORT_SYMBOL (iucv_path_quiesce);
-EXPORT_SYMBOL (iucv_path_sever);
-EXPORT_SYMBOL (iucv_message_purge);
-EXPORT_SYMBOL (iucv_message_receive);
-EXPORT_SYMBOL (iucv_message_reject);
-EXPORT_SYMBOL (iucv_message_reply);
-EXPORT_SYMBOL (iucv_message_send);
-EXPORT_SYMBOL (iucv_message_send2way);
-
 MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
 MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
 MODULE_LICENSE("GPL");