static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
-static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
+static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
+ u8 ipuser[16]);
static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
static struct iucv_sock_list iucv_sk_list = {
iucv_sock_clear_timer(sk);
lock_sock(sk);
- switch(sk->sk_state) {
+ switch (sk->sk_state) {
case IUCV_LISTEN:
iucv_sock_cleanup_listen(sk);
break;
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
- if(!skb_queue_empty(&iucv->send_skb_q)) {
+ if (!skb_queue_empty(&iucv->send_skb_q)) {
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
timeo = sk->sk_lingertime;
else
struct iucv_sock *isk, *n;
struct sock *sk;
- list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
+ list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
sk = (struct sock *) isk;
lock_sock(sk);
/* Wait for an incoming connection */
add_wait_queue_exclusive(sk->sk_sleep, &wait);
- while (!(nsk = iucv_accept_dequeue(sk, newsock))){
+ while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
goto out;
}
- if (sk->sk_state == IUCV_CONNECTED){
- if(!(skb = sock_alloc_send_skb(sk, len,
- msg->msg_flags & MSG_DONTWAIT,
- &err)))
+ if (sk->sk_state == IUCV_CONNECTED) {
+ if (!(skb = sock_alloc_send_skb(sk, len,
+ msg->msg_flags & MSG_DONTWAIT,
+ &err)))
goto out;
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
goto fail;
}
/* Queue backlog skbs */
rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
- while(rskb) {
+ while (rskb) {
if (sock_queue_rcv_skb(sk, rskb)) {
skb_queue_head(&iucv_sk(sk)->backlog_skb_q,
rskb);
struct iucv_sock *isk, *n;
struct sock *sk;
- list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
+ list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
sk = (struct sock *) isk;
if (sk->sk_state == IUCV_CONNECTED)
mask |= POLLHUP;
if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ (sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
if (sk->sk_state == IUCV_CLOSED)
return -EINVAL;
lock_sock(sk);
- switch(sk->sk_state) {
+ switch (sk->sk_state) {
case IUCV_CLOSED:
err = -ENOTCONN;
goto fail;
err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
(void *) prmmsg, 8);
if (err) {
- switch(err) {
+ switch (err) {
case 1:
err = -ENOTCONN;
break;
/* Create the new socket */
nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
- if (!nsk){
+ if (!nsk) {
err = iucv_path_sever(path, user_data);
goto fail;
}
path->msglim = IUCV_QUEUELEN_DEFAULT;
err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
- if (err){
+ if (err) {
err = iucv_path_sever(path, user_data);
goto fail;
}
struct sk_buff *nskb;
dataleft = len;
- while(dataleft) {
+ while (dataleft) {
if (dataleft >= sk->sk_rcvbuf / 4)
size = sk->sk_rcvbuf / 4;
else
return 0;
}
+
static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
{
struct sock *sk = path->private;
}
/* Queue the fragmented skb */
fskb = skb_dequeue(&fragmented_skb_q);
- while(fskb) {
+ while (fskb) {
if (!skb_queue_empty(&iucv->backlog_skb_q))
skb_queue_tail(&iucv->backlog_skb_q, fskb);
else if (sock_queue_rcv_skb(sk, fskb))
kfree_skb(this);
}
- if (sk->sk_state == IUCV_CLOSING){
+ if (sk->sk_state == IUCV_CLOSING) {
if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
.create = iucv_sock_create,
};
-static int afiucv_init(void)
+static int __init afiucv_init(void)
{
int err;
#include <linux/module.h>
#include <linux/moduleparam.h>
-
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#define IUCV_IPNORPY 0x10
#define IUCV_IPALL 0x80
-static int iucv_bus_match (struct device *dev, struct device_driver *drv)
+static int iucv_bus_match(struct device *dev, struct device_driver *drv)
{
return 0;
}
.name = "iucv",
.match = iucv_bus_match,
};
+EXPORT_SYMBOL(iucv_bus);
struct device *iucv_root;
+EXPORT_SYMBOL(iucv_root);
+
static int iucv_available;
/* General IUCV interrupt structure */
rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
if (rc) {
char *err = "Unknown";
- switch(rc) {
+ switch (rc) {
case 0x03:
err = "Directory error";
break;
return NOTIFY_OK;
}
-static struct notifier_block iucv_cpu_notifier = {
+static struct notifier_block __cpuinitdata iucv_cpu_notifier = {
.notifier_call = iucv_cpu_notify,
};
mutex_unlock(&iucv_register_mutex);
return rc;
}
+EXPORT_SYMBOL(iucv_register);
/**
* iucv_unregister
iucv_setmask_mp();
mutex_unlock(&iucv_register_mutex);
}
+EXPORT_SYMBOL(iucv_unregister);
/**
* iucv_path_accept
local_bh_enable();
return rc;
}
+EXPORT_SYMBOL(iucv_path_accept);
/**
* iucv_path_connect
spin_unlock_bh(&iucv_table_lock);
return rc;
}
+EXPORT_SYMBOL(iucv_path_connect);
/**
* iucv_path_quiesce:
local_bh_enable();
return rc;
}
+EXPORT_SYMBOL(iucv_path_quiesce);
/**
* iucv_path_resume:
{
int rc;
-
preempt_disable();
if (iucv_active_cpu != smp_processor_id())
spin_lock_bh(&iucv_table_lock);
preempt_enable();
return rc;
}
+EXPORT_SYMBOL(iucv_path_sever);
/**
* iucv_message_purge
local_bh_enable();
return rc;
}
+EXPORT_SYMBOL(iucv_message_purge);
/**
* iucv_message_receive
local_bh_enable();
return rc;
}
+EXPORT_SYMBOL(iucv_message_receive);
/**
* iucv_message_reject
local_bh_enable();
return rc;
}
+EXPORT_SYMBOL(iucv_message_reject);
/**
* iucv_message_reply
local_bh_enable();
return rc;
}
+EXPORT_SYMBOL(iucv_message_reply);
/**
* iucv_message_send
local_bh_enable();
return rc;
}
+EXPORT_SYMBOL(iucv_message_send);
/**
* iucv_message_send2way
local_bh_enable();
return rc;
}
+EXPORT_SYMBOL(iucv_message_send2way);
/**
* iucv_path_pending
*
* Allocates and initializes various data structures.
*/
-static int iucv_init(void)
+static int __init iucv_init(void)
{
int rc;
rc = iucv_query_maxconn();
if (rc)
goto out;
- rc = register_external_interrupt (0x4000, iucv_external_interrupt);
+ rc = register_external_interrupt(0x4000, iucv_external_interrupt);
if (rc)
goto out;
rc = bus_register(&iucv_bus);
rc = PTR_ERR(iucv_root);
goto out_bus;
}
- /* Note: GFP_DMA used used to get memory below 2G */
+ /* Note: GFP_DMA used to get memory below 2G */
iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data),
GFP_KERNEL|GFP_DMA);
if (!iucv_irq_data) {
*
* Frees everything allocated from iucv_init.
*/
-static void iucv_exit(void)
+static void __exit iucv_exit(void)
{
struct iucv_irq_list *p, *n;
subsys_initcall(iucv_init);
module_exit(iucv_exit);
-/**
- * Export all public stuff
- */
-EXPORT_SYMBOL (iucv_bus);
-EXPORT_SYMBOL (iucv_root);
-EXPORT_SYMBOL (iucv_register);
-EXPORT_SYMBOL (iucv_unregister);
-EXPORT_SYMBOL (iucv_path_accept);
-EXPORT_SYMBOL (iucv_path_connect);
-EXPORT_SYMBOL (iucv_path_quiesce);
-EXPORT_SYMBOL (iucv_path_sever);
-EXPORT_SYMBOL (iucv_message_purge);
-EXPORT_SYMBOL (iucv_message_receive);
-EXPORT_SYMBOL (iucv_message_reject);
-EXPORT_SYMBOL (iucv_message_reply);
-EXPORT_SYMBOL (iucv_message_send);
-EXPORT_SYMBOL (iucv_message_send2way);
-
MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
MODULE_LICENSE("GPL");