Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / s390 / net / netiucv.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * IUCV network driver
3 *
1175b257 4 * Copyright IBM Corp. 2001, 2009
1da177e4 5 *
1175b257
UB
6 * Author(s):
7 * Original netiucv driver:
8 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9 * Sysfs integration and all bugs therein:
10 * Cornelia Huck (cornelia.huck@de.ibm.com)
11 * PM functions:
12 * Ursula Braun (ursula.braun@de.ibm.com)
1da177e4
LT
13 *
14 * Documentation used:
15 * the source of the original IUCV driver by:
16 * Stefan Hegewald <hegewald@de.ibm.com>
17 * Hartmut Penner <hpenner@de.ibm.com>
18 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19 * Martin Schwidefsky (schwidefsky@de.ibm.com)
20 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2, or (at your option)
25 * any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 *
1da177e4 36 */
e82b0f2c 37
8f7c502c
UB
38#define KMSG_COMPONENT "netiucv"
39#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
40
1da177e4
LT
41#undef DEBUG
42
43#include <linux/module.h>
44#include <linux/init.h>
45#include <linux/kernel.h>
46#include <linux/slab.h>
47#include <linux/errno.h>
48#include <linux/types.h>
49#include <linux/interrupt.h>
50#include <linux/timer.h>
1da177e4
LT
51#include <linux/bitops.h>
52
53#include <linux/signal.h>
54#include <linux/string.h>
55#include <linux/device.h>
56
57#include <linux/ip.h>
58#include <linux/if_arp.h>
59#include <linux/tcp.h>
60#include <linux/skbuff.h>
61#include <linux/ctype.h>
62#include <net/dst.h>
63
64#include <asm/io.h>
65#include <asm/uaccess.h>
08e3356c 66#include <asm/ebcdic.h>
1da177e4 67
eebce385 68#include <net/iucv/iucv.h>
1da177e4
LT
69#include "fsm.h"
70
71MODULE_AUTHOR
72 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
73MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
74
eebce385
MS
75/**
76 * Debug Facility stuff
77 */
78#define IUCV_DBF_SETUP_NAME "iucv_setup"
08e3356c 79#define IUCV_DBF_SETUP_LEN 64
eebce385
MS
80#define IUCV_DBF_SETUP_PAGES 2
81#define IUCV_DBF_SETUP_NR_AREAS 1
82#define IUCV_DBF_SETUP_LEVEL 3
83
84#define IUCV_DBF_DATA_NAME "iucv_data"
85#define IUCV_DBF_DATA_LEN 128
86#define IUCV_DBF_DATA_PAGES 2
87#define IUCV_DBF_DATA_NR_AREAS 1
88#define IUCV_DBF_DATA_LEVEL 2
89
90#define IUCV_DBF_TRACE_NAME "iucv_trace"
91#define IUCV_DBF_TRACE_LEN 16
92#define IUCV_DBF_TRACE_PAGES 4
93#define IUCV_DBF_TRACE_NR_AREAS 1
94#define IUCV_DBF_TRACE_LEVEL 3
95
96#define IUCV_DBF_TEXT(name,level,text) \
97 do { \
98 debug_text_event(iucv_dbf_##name,level,text); \
99 } while (0)
100
101#define IUCV_DBF_HEX(name,level,addr,len) \
102 do { \
103 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
104 } while (0)
105
106DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
107
f33780d3
PT
108/* Allow to sort out low debug levels early to avoid wasted sprints */
109static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
110{
111 return (level <= dbf_grp->level);
112}
113
114#define IUCV_DBF_TEXT_(name, level, text...) \
115 do { \
116 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
390dfd95
TH
117 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
118 sprintf(__buf, text); \
119 debug_text_event(iucv_dbf_##name, level, __buf); \
f33780d3
PT
120 put_cpu_var(iucv_dbf_txt_buf); \
121 } \
eebce385
MS
122 } while (0)
123
124#define IUCV_DBF_SPRINTF(name,level,text...) \
125 do { \
126 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
127 debug_sprintf_event(iucv_dbf_trace, level, text ); \
128 } while (0)
129
130/**
131 * some more debug stuff
132 */
133#define IUCV_HEXDUMP16(importance,header,ptr) \
134PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
135 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
136 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
137 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
138 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
139 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
140 *(((char*)ptr)+12),*(((char*)ptr)+13), \
141 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
142PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
143 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
144 *(((char*)ptr)+16),*(((char*)ptr)+17), \
145 *(((char*)ptr)+18),*(((char*)ptr)+19), \
146 *(((char*)ptr)+20),*(((char*)ptr)+21), \
147 *(((char*)ptr)+22),*(((char*)ptr)+23), \
148 *(((char*)ptr)+24),*(((char*)ptr)+25), \
149 *(((char*)ptr)+26),*(((char*)ptr)+27), \
150 *(((char*)ptr)+28),*(((char*)ptr)+29), \
151 *(((char*)ptr)+30),*(((char*)ptr)+31));
152
1da177e4
LT
153#define PRINTK_HEADER " iucv: " /* for debugging */
154
1175b257
UB
155/* dummy device to make sure netiucv_pm functions are called */
156static struct device *netiucv_dev;
157
158static int netiucv_pm_prepare(struct device *);
159static void netiucv_pm_complete(struct device *);
160static int netiucv_pm_freeze(struct device *);
161static int netiucv_pm_restore_thaw(struct device *);
162
47145210 163static const struct dev_pm_ops netiucv_pm_ops = {
1175b257
UB
164 .prepare = netiucv_pm_prepare,
165 .complete = netiucv_pm_complete,
166 .freeze = netiucv_pm_freeze,
167 .thaw = netiucv_pm_restore_thaw,
168 .restore = netiucv_pm_restore_thaw,
169};
170
1da177e4 171static struct device_driver netiucv_driver = {
2219510f 172 .owner = THIS_MODULE,
1da177e4
LT
173 .name = "netiucv",
174 .bus = &iucv_bus,
1175b257 175 .pm = &netiucv_pm_ops,
1da177e4
LT
176};
177
eebce385
MS
178static int netiucv_callback_connreq(struct iucv_path *,
179 u8 ipvmid[8], u8 ipuser[16]);
180static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
181static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
182static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
183static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
184static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
185static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
186
187static struct iucv_handler netiucv_handler = {
188 .path_pending = netiucv_callback_connreq,
189 .path_complete = netiucv_callback_connack,
190 .path_severed = netiucv_callback_connrej,
191 .path_quiesced = netiucv_callback_connsusp,
192 .path_resumed = netiucv_callback_connres,
193 .message_pending = netiucv_callback_rx,
194 .message_complete = netiucv_callback_txdone
195};
196
1da177e4
LT
197/**
198 * Per connection profiling data
199 */
200struct connection_profile {
201 unsigned long maxmulti;
202 unsigned long maxcqueue;
203 unsigned long doios_single;
204 unsigned long doios_multi;
205 unsigned long txlen;
206 unsigned long tx_time;
207 struct timespec send_stamp;
208 unsigned long tx_pending;
209 unsigned long tx_max_pending;
210};
211
212/**
213 * Representation of one iucv connection
214 */
215struct iucv_connection {
eebce385
MS
216 struct list_head list;
217 struct iucv_path *path;
1da177e4
LT
218 struct sk_buff *rx_buff;
219 struct sk_buff *tx_buff;
220 struct sk_buff_head collect_queue;
221 struct sk_buff_head commit_queue;
222 spinlock_t collect_lock;
223 int collect_len;
224 int max_buffsize;
225 fsm_timer timer;
226 fsm_instance *fsm;
227 struct net_device *netdev;
228 struct connection_profile prof;
229 char userid[9];
08e3356c 230 char userdata[17];
1da177e4
LT
231};
232
233/**
234 * Linked list of all connection structs.
235 */
c11ca97e 236static LIST_HEAD(iucv_connection_list);
bfac0d0b 237static DEFINE_RWLOCK(iucv_connection_rwlock);
1da177e4
LT
238
239/**
240 * Representation of event-data for the
241 * connection state machine.
242 */
243struct iucv_event {
244 struct iucv_connection *conn;
245 void *data;
246};
247
248/**
249 * Private part of the network device structure
250 */
251struct netiucv_priv {
252 struct net_device_stats stats;
253 unsigned long tbusy;
254 fsm_instance *fsm;
255 struct iucv_connection *conn;
256 struct device *dev;
1175b257 257 int pm_state;
1da177e4
LT
258};
259
260/**
261 * Link level header for a packet.
262 */
eebce385
MS
263struct ll_header {
264 u16 next;
265};
1da177e4 266
eebce385 267#define NETIUCV_HDRLEN (sizeof(struct ll_header))
08e3356c 268#define NETIUCV_BUFSIZE_MAX 65537
1da177e4
LT
269#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
270#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
271#define NETIUCV_MTU_DEFAULT 9216
272#define NETIUCV_QUEUELEN_DEFAULT 50
273#define NETIUCV_TIMEOUT_5SEC 5000
274
275/**
276 * Compatibility macros for busy handling
277 * of network devices.
278 */
eebce385 279static inline void netiucv_clear_busy(struct net_device *dev)
1da177e4 280{
eebce385
MS
281 struct netiucv_priv *priv = netdev_priv(dev);
282 clear_bit(0, &priv->tbusy);
1da177e4
LT
283 netif_wake_queue(dev);
284}
285
eebce385 286static inline int netiucv_test_and_set_busy(struct net_device *dev)
1da177e4 287{
eebce385 288 struct netiucv_priv *priv = netdev_priv(dev);
1da177e4 289 netif_stop_queue(dev);
eebce385 290 return test_and_set_bit(0, &priv->tbusy);
1da177e4
LT
291}
292
08e3356c
UB
293static u8 iucvMagic_ascii[16] = {
294 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
295 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
296};
297
298static u8 iucvMagic_ebcdic[16] = {
1da177e4
LT
299 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
300 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
301};
302
1da177e4
LT
303/**
304 * Convert an iucv userId to its printable
305 * form (strip whitespace at end).
306 *
307 * @param An iucv userId
308 *
309 * @returns The printable string (static data!!)
310 */
08e3356c 311static char *netiucv_printname(char *name, int len)
1da177e4 312{
08e3356c 313 static char tmp[17];
1da177e4 314 char *p = tmp;
08e3356c
UB
315 memcpy(tmp, name, len);
316 tmp[len] = '\0';
317 while (*p && ((p - tmp) < len) && (!isspace(*p)))
1da177e4
LT
318 p++;
319 *p = '\0';
320 return tmp;
321}
e82b0f2c 322
08e3356c
UB
323static char *netiucv_printuser(struct iucv_connection *conn)
324{
325 static char tmp_uid[9];
326 static char tmp_udat[17];
327 static char buf[100];
328
329 if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
330 tmp_uid[8] = '\0';
331 tmp_udat[16] = '\0';
332 memcpy(tmp_uid, conn->userid, 8);
333 memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
334 memcpy(tmp_udat, conn->userdata, 16);
335 EBCASC(tmp_udat, 16);
336 memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
337 sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
338 return buf;
339 } else
340 return netiucv_printname(conn->userid, 8);
341}
342
1da177e4
LT
343/**
344 * States of the interface statemachine.
345 */
346enum dev_states {
347 DEV_STATE_STOPPED,
348 DEV_STATE_STARTWAIT,
349 DEV_STATE_STOPWAIT,
350 DEV_STATE_RUNNING,
351 /**
352 * MUST be always the last element!!
353 */
354 NR_DEV_STATES
355};
356
357static const char *dev_state_names[] = {
358 "Stopped",
359 "StartWait",
360 "StopWait",
361 "Running",
362};
363
364/**
365 * Events of the interface statemachine.
366 */
367enum dev_events {
368 DEV_EVENT_START,
369 DEV_EVENT_STOP,
370 DEV_EVENT_CONUP,
371 DEV_EVENT_CONDOWN,
372 /**
373 * MUST be always the last element!!
374 */
375 NR_DEV_EVENTS
376};
377
378static const char *dev_event_names[] = {
379 "Start",
380 "Stop",
381 "Connection up",
382 "Connection down",
383};
e82b0f2c 384
1da177e4
LT
385/**
386 * Events of the connection statemachine
387 */
388enum conn_events {
389 /**
390 * Events, representing callbacks from
391 * lowlevel iucv layer)
392 */
393 CONN_EVENT_CONN_REQ,
394 CONN_EVENT_CONN_ACK,
395 CONN_EVENT_CONN_REJ,
396 CONN_EVENT_CONN_SUS,
397 CONN_EVENT_CONN_RES,
398 CONN_EVENT_RX,
399 CONN_EVENT_TXDONE,
400
401 /**
402 * Events, representing errors return codes from
403 * calls to lowlevel iucv layer
404 */
405
406 /**
407 * Event, representing timer expiry.
408 */
409 CONN_EVENT_TIMER,
410
411 /**
412 * Events, representing commands from upper levels.
413 */
414 CONN_EVENT_START,
415 CONN_EVENT_STOP,
416
417 /**
418 * MUST be always the last element!!
419 */
420 NR_CONN_EVENTS,
421};
422
423static const char *conn_event_names[] = {
424 "Remote connection request",
425 "Remote connection acknowledge",
426 "Remote connection reject",
427 "Connection suspended",
428 "Connection resumed",
429 "Data received",
430 "Data sent",
431
432 "Timer",
433
434 "Start",
435 "Stop",
436};
437
438/**
439 * States of the connection statemachine.
440 */
441enum conn_states {
442 /**
443 * Connection not assigned to any device,
444 * initial state, invalid
445 */
446 CONN_STATE_INVALID,
447
448 /**
449 * Userid assigned but not operating
450 */
451 CONN_STATE_STOPPED,
452
453 /**
454 * Connection registered,
455 * no connection request sent yet,
456 * no connection request received
457 */
458 CONN_STATE_STARTWAIT,
459
460 /**
461 * Connection registered and connection request sent,
462 * no acknowledge and no connection request received yet.
463 */
464 CONN_STATE_SETUPWAIT,
465
466 /**
467 * Connection up and running idle
468 */
469 CONN_STATE_IDLE,
470
471 /**
472 * Data sent, awaiting CONN_EVENT_TXDONE
473 */
474 CONN_STATE_TX,
475
476 /**
477 * Error during registration.
478 */
479 CONN_STATE_REGERR,
480
481 /**
482 * Error during registration.
483 */
484 CONN_STATE_CONNERR,
485
486 /**
487 * MUST be always the last element!!
488 */
489 NR_CONN_STATES,
490};
491
492static const char *conn_state_names[] = {
493 "Invalid",
494 "Stopped",
495 "StartWait",
496 "SetupWait",
497 "Idle",
498 "TX",
499 "Terminating",
500 "Registration error",
501 "Connect error",
502};
503
e82b0f2c 504
1da177e4
LT
505/**
506 * Debug Facility Stuff
507 */
508static debug_info_t *iucv_dbf_setup = NULL;
509static debug_info_t *iucv_dbf_data = NULL;
510static debug_info_t *iucv_dbf_trace = NULL;
511
512DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
513
eebce385 514static void iucv_unregister_dbf_views(void)
1da177e4
LT
515{
516 if (iucv_dbf_setup)
517 debug_unregister(iucv_dbf_setup);
518 if (iucv_dbf_data)
519 debug_unregister(iucv_dbf_data);
520 if (iucv_dbf_trace)
521 debug_unregister(iucv_dbf_trace);
522}
eebce385 523static int iucv_register_dbf_views(void)
1da177e4
LT
524{
525 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
66a464db 526 IUCV_DBF_SETUP_PAGES,
1da177e4
LT
527 IUCV_DBF_SETUP_NR_AREAS,
528 IUCV_DBF_SETUP_LEN);
529 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
66a464db 530 IUCV_DBF_DATA_PAGES,
1da177e4
LT
531 IUCV_DBF_DATA_NR_AREAS,
532 IUCV_DBF_DATA_LEN);
533 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
66a464db 534 IUCV_DBF_TRACE_PAGES,
1da177e4
LT
535 IUCV_DBF_TRACE_NR_AREAS,
536 IUCV_DBF_TRACE_LEN);
537
538 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
539 (iucv_dbf_trace == NULL)) {
540 iucv_unregister_dbf_views();
541 return -ENOMEM;
542 }
543 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
544 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
545
546 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
547 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
548
549 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
550 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
551
552 return 0;
553}
554
eebce385 555/*
1da177e4 556 * Callback-wrappers, called from lowlevel iucv layer.
eebce385 557 */
1da177e4 558
eebce385
MS
559static void netiucv_callback_rx(struct iucv_path *path,
560 struct iucv_message *msg)
1da177e4 561{
eebce385 562 struct iucv_connection *conn = path->private;
1da177e4
LT
563 struct iucv_event ev;
564
565 ev.conn = conn;
eebce385 566 ev.data = msg;
1da177e4
LT
567 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
568}
569
eebce385
MS
570static void netiucv_callback_txdone(struct iucv_path *path,
571 struct iucv_message *msg)
1da177e4 572{
eebce385 573 struct iucv_connection *conn = path->private;
1da177e4
LT
574 struct iucv_event ev;
575
576 ev.conn = conn;
eebce385 577 ev.data = msg;
1da177e4
LT
578 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
579}
580
eebce385 581static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1da177e4 582{
eebce385 583 struct iucv_connection *conn = path->private;
1da177e4 584
eebce385 585 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
1da177e4
LT
586}
587
eebce385
MS
588static int netiucv_callback_connreq(struct iucv_path *path,
589 u8 ipvmid[8], u8 ipuser[16])
1da177e4 590{
eebce385 591 struct iucv_connection *conn = path->private;
1da177e4 592 struct iucv_event ev;
08e3356c
UB
593 static char tmp_user[9];
594 static char tmp_udat[17];
eebce385 595 int rc;
1da177e4 596
eebce385 597 rc = -EINVAL;
08e3356c
UB
598 memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
599 memcpy(tmp_udat, ipuser, 16);
600 EBCASC(tmp_udat, 16);
eebce385
MS
601 read_lock_bh(&iucv_connection_rwlock);
602 list_for_each_entry(conn, &iucv_connection_list, list) {
08e3356c
UB
603 if (strncmp(ipvmid, conn->userid, 8) ||
604 strncmp(ipuser, conn->userdata, 16))
eebce385
MS
605 continue;
606 /* Found a matching connection for this path. */
607 conn->path = path;
608 ev.conn = conn;
609 ev.data = path;
610 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
611 rc = 0;
612 }
08e3356c
UB
613 IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
614 tmp_user, netiucv_printname(tmp_udat, 16));
eebce385
MS
615 read_unlock_bh(&iucv_connection_rwlock);
616 return rc;
1da177e4
LT
617}
618
eebce385 619static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1da177e4 620{
eebce385 621 struct iucv_connection *conn = path->private;
1da177e4 622
eebce385 623 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
1da177e4
LT
624}
625
eebce385 626static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
1da177e4 627{
eebce385 628 struct iucv_connection *conn = path->private;
1da177e4 629
eebce385 630 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
1da177e4
LT
631}
632
eebce385 633static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
1da177e4 634{
eebce385 635 struct iucv_connection *conn = path->private;
1da177e4 636
eebce385
MS
637 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
638}
1da177e4
LT
639
640/**
21b26f2f 641 * NOP action for statemachines
1da177e4 642 */
21b26f2f 643static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
1da177e4
LT
644{
645}
e82b0f2c 646
eebce385 647/*
1da177e4 648 * Actions of the connection statemachine
eebce385 649 */
1da177e4
LT
650
651/**
eebce385
MS
652 * netiucv_unpack_skb
653 * @conn: The connection where this skb has been received.
654 * @pskb: The received skb.
1da177e4 655 *
eebce385
MS
656 * Unpack a just received skb and hand it over to upper layers.
657 * Helper function for conn_action_rx.
1da177e4 658 */
eebce385
MS
659static void netiucv_unpack_skb(struct iucv_connection *conn,
660 struct sk_buff *pskb)
1da177e4
LT
661{
662 struct net_device *dev = conn->netdev;
eebce385
MS
663 struct netiucv_priv *privptr = netdev_priv(dev);
664 u16 offset = 0;
1da177e4
LT
665
666 skb_put(pskb, NETIUCV_HDRLEN);
667 pskb->dev = dev;
668 pskb->ip_summed = CHECKSUM_NONE;
669 pskb->protocol = ntohs(ETH_P_IP);
670
671 while (1) {
672 struct sk_buff *skb;
eebce385 673 struct ll_header *header = (struct ll_header *) pskb->data;
1da177e4
LT
674
675 if (!header->next)
676 break;
677
678 skb_pull(pskb, NETIUCV_HDRLEN);
679 header->next -= offset;
680 offset += header->next;
681 header->next -= NETIUCV_HDRLEN;
682 if (skb_tailroom(pskb) < header->next) {
1da177e4
LT
683 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
684 header->next, skb_tailroom(pskb));
685 return;
686 }
687 skb_put(pskb, header->next);
459a98ed 688 skb_reset_mac_header(pskb);
1da177e4
LT
689 skb = dev_alloc_skb(pskb->len);
690 if (!skb) {
1da177e4
LT
691 IUCV_DBF_TEXT(data, 2,
692 "Out of memory in netiucv_unpack_skb\n");
693 privptr->stats.rx_dropped++;
694 return;
695 }
d626f62b
ACM
696 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
697 pskb->len);
459a98ed 698 skb_reset_mac_header(skb);
1da177e4
LT
699 skb->dev = pskb->dev;
700 skb->protocol = pskb->protocol;
701 pskb->ip_summed = CHECKSUM_UNNECESSARY;
9b3efc01
JL
702 privptr->stats.rx_packets++;
703 privptr->stats.rx_bytes += skb->len;
1da177e4
LT
704 /*
705 * Since receiving is always initiated from a tasklet (in iucv.c),
706 * we must use netif_rx_ni() instead of netif_rx()
707 */
708 netif_rx_ni(skb);
1da177e4
LT
709 skb_pull(pskb, header->next);
710 skb_put(pskb, NETIUCV_HDRLEN);
711 }
712}
713
eebce385 714static void conn_action_rx(fsm_instance *fi, int event, void *arg)
1da177e4 715{
eebce385 716 struct iucv_event *ev = arg;
1da177e4 717 struct iucv_connection *conn = ev->conn;
eebce385
MS
718 struct iucv_message *msg = ev->data;
719 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
1da177e4
LT
720 int rc;
721
2a2cf6b1 722 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
723
724 if (!conn->netdev) {
eebce385 725 iucv_message_reject(conn->path, msg);
1da177e4 726 IUCV_DBF_TEXT(data, 2,
eebce385 727 "Received data for unlinked connection\n");
1da177e4
LT
728 return;
729 }
eebce385
MS
730 if (msg->length > conn->max_buffsize) {
731 iucv_message_reject(conn->path, msg);
1da177e4 732 privptr->stats.rx_dropped++;
1da177e4 733 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
eebce385 734 msg->length, conn->max_buffsize);
1da177e4
LT
735 return;
736 }
27a884dc
ACM
737 conn->rx_buff->data = conn->rx_buff->head;
738 skb_reset_tail_pointer(conn->rx_buff);
1da177e4 739 conn->rx_buff->len = 0;
eebce385
MS
740 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
741 msg->length, NULL);
742 if (rc || msg->length < 5) {
1da177e4 743 privptr->stats.rx_errors++;
1da177e4
LT
744 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
745 return;
746 }
747 netiucv_unpack_skb(conn, conn->rx_buff);
748}
749
eebce385 750static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
1da177e4 751{
eebce385 752 struct iucv_event *ev = arg;
1da177e4 753 struct iucv_connection *conn = ev->conn;
eebce385
MS
754 struct iucv_message *msg = ev->data;
755 struct iucv_message txmsg;
1da177e4 756 struct netiucv_priv *privptr = NULL;
eebce385
MS
757 u32 single_flag = msg->tag;
758 u32 txbytes = 0;
759 u32 txpackets = 0;
760 u32 stat_maxcq = 0;
1da177e4
LT
761 struct sk_buff *skb;
762 unsigned long saveflags;
eebce385
MS
763 struct ll_header header;
764 int rc;
1da177e4 765
2a2cf6b1 766 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4 767
eebce385
MS
768 if (conn && conn->netdev)
769 privptr = netdev_priv(conn->netdev);
1da177e4
LT
770 conn->prof.tx_pending--;
771 if (single_flag) {
772 if ((skb = skb_dequeue(&conn->commit_queue))) {
773 atomic_dec(&skb->users);
1da177e4
LT
774 if (privptr) {
775 privptr->stats.tx_packets++;
776 privptr->stats.tx_bytes +=
777 (skb->len - NETIUCV_HDRLEN
998221c2 778 - NETIUCV_HDRLEN);
1da177e4 779 }
998221c2 780 dev_kfree_skb_any(skb);
1da177e4
LT
781 }
782 }
27a884dc
ACM
783 conn->tx_buff->data = conn->tx_buff->head;
784 skb_reset_tail_pointer(conn->tx_buff);
1da177e4
LT
785 conn->tx_buff->len = 0;
786 spin_lock_irqsave(&conn->collect_lock, saveflags);
787 while ((skb = skb_dequeue(&conn->collect_queue))) {
788 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
789 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
790 NETIUCV_HDRLEN);
d626f62b
ACM
791 skb_copy_from_linear_data(skb,
792 skb_put(conn->tx_buff, skb->len),
793 skb->len);
1da177e4
LT
794 txbytes += skb->len;
795 txpackets++;
796 stat_maxcq++;
797 atomic_dec(&skb->users);
798 dev_kfree_skb_any(skb);
799 }
800 if (conn->collect_len > conn->prof.maxmulti)
801 conn->prof.maxmulti = conn->collect_len;
802 conn->collect_len = 0;
803 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
eebce385
MS
804 if (conn->tx_buff->len == 0) {
805 fsm_newstate(fi, CONN_STATE_IDLE);
806 return;
807 }
1da177e4 808
eebce385
MS
809 header.next = 0;
810 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
2c6b47de 811 conn->prof.send_stamp = current_kernel_time();
eebce385
MS
812 txmsg.class = 0;
813 txmsg.tag = 0;
814 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
1da177e4 815 conn->tx_buff->data, conn->tx_buff->len);
eebce385
MS
816 conn->prof.doios_multi++;
817 conn->prof.txlen += conn->tx_buff->len;
818 conn->prof.tx_pending++;
819 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
820 conn->prof.tx_max_pending = conn->prof.tx_pending;
821 if (rc) {
822 conn->prof.tx_pending--;
1da177e4 823 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385
MS
824 if (privptr)
825 privptr->stats.tx_errors += txpackets;
eebce385
MS
826 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
827 } else {
828 if (privptr) {
829 privptr->stats.tx_packets += txpackets;
830 privptr->stats.tx_bytes += txbytes;
831 }
832 if (stat_maxcq > conn->prof.maxcqueue)
833 conn->prof.maxcqueue = stat_maxcq;
834 }
1da177e4
LT
835}
836
eebce385 837static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
1da177e4 838{
eebce385 839 struct iucv_event *ev = arg;
1da177e4 840 struct iucv_connection *conn = ev->conn;
eebce385 841 struct iucv_path *path = ev->data;
1da177e4 842 struct net_device *netdev = conn->netdev;
eebce385 843 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 844 int rc;
1da177e4 845
2a2cf6b1 846 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 847
eebce385
MS
848 conn->path = path;
849 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
850 path->flags = 0;
08e3356c 851 rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
1da177e4 852 if (rc) {
1da177e4
LT
853 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
854 return;
855 }
856 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385 857 netdev->tx_queue_len = conn->path->msglim;
1da177e4
LT
858 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
859}
860
eebce385 861static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
1da177e4 862{
eebce385
MS
863 struct iucv_event *ev = arg;
864 struct iucv_path *path = ev->data;
1da177e4 865
2a2cf6b1 866 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385 867 iucv_path_sever(path, NULL);
1da177e4
LT
868}
869
eebce385 870static void conn_action_connack(fsm_instance *fi, int event, void *arg)
1da177e4 871{
eebce385 872 struct iucv_connection *conn = arg;
1da177e4 873 struct net_device *netdev = conn->netdev;
eebce385 874 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 875
2a2cf6b1 876 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
877 fsm_deltimer(&conn->timer);
878 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385 879 netdev->tx_queue_len = conn->path->msglim;
1da177e4
LT
880 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
881}
882
eebce385 883static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
1da177e4 884{
eebce385 885 struct iucv_connection *conn = arg;
1da177e4 886
2a2cf6b1 887 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 888 fsm_deltimer(&conn->timer);
08e3356c 889 iucv_path_sever(conn->path, conn->userdata);
1da177e4
LT
890 fsm_newstate(fi, CONN_STATE_STARTWAIT);
891}
892
eebce385 893static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
1da177e4 894{
eebce385 895 struct iucv_connection *conn = arg;
1da177e4 896 struct net_device *netdev = conn->netdev;
eebce385 897 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 898
2a2cf6b1 899 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
900
901 fsm_deltimer(&conn->timer);
08e3356c
UB
902 iucv_path_sever(conn->path, conn->userdata);
903 dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
904 "connection\n", netiucv_printuser(conn));
1da177e4 905 IUCV_DBF_TEXT(data, 2,
eebce385 906 "conn_action_connsever: Remote dropped connection\n");
1da177e4
LT
907 fsm_newstate(fi, CONN_STATE_STARTWAIT);
908 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
909}
910
eebce385 911static void conn_action_start(fsm_instance *fi, int event, void *arg)
1da177e4 912{
eebce385 913 struct iucv_connection *conn = arg;
8f7c502c
UB
914 struct net_device *netdev = conn->netdev;
915 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4
LT
916 int rc;
917
2a2cf6b1 918 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 919
eebce385 920 fsm_newstate(fi, CONN_STATE_STARTWAIT);
1da177e4 921
eebce385
MS
922 /*
923 * We must set the state before calling iucv_connect because the
924 * callback handler could be called at any point after the connection
925 * request is sent
926 */
1da177e4
LT
927
928 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
eebce385 929 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
08e3356c
UB
930 IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
931 netdev->name, netiucv_printuser(conn));
932
eebce385 933 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
08e3356c 934 NULL, conn->userdata, conn);
1da177e4 935 switch (rc) {
eebce385 936 case 0:
8f7c502c 937 netdev->tx_queue_len = conn->path->msglim;
eebce385
MS
938 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
939 CONN_EVENT_TIMER, conn);
940 return;
941 case 11:
8f7c502c
UB
942 dev_warn(privptr->dev,
943 "The IUCV device failed to connect to z/VM guest %s\n",
08e3356c 944 netiucv_printname(conn->userid, 8));
eebce385
MS
945 fsm_newstate(fi, CONN_STATE_STARTWAIT);
946 break;
947 case 12:
8f7c502c
UB
948 dev_warn(privptr->dev,
949 "The IUCV device failed to connect to the peer on z/VM"
08e3356c 950 " guest %s\n", netiucv_printname(conn->userid, 8));
eebce385
MS
951 fsm_newstate(fi, CONN_STATE_STARTWAIT);
952 break;
953 case 13:
8f7c502c
UB
954 dev_err(privptr->dev,
955 "Connecting the IUCV device would exceed the maximum"
956 " number of IUCV connections\n");
eebce385
MS
957 fsm_newstate(fi, CONN_STATE_CONNERR);
958 break;
959 case 14:
8f7c502c
UB
960 dev_err(privptr->dev,
961 "z/VM guest %s has too many IUCV connections"
962 " to connect with the IUCV device\n",
08e3356c 963 netiucv_printname(conn->userid, 8));
eebce385
MS
964 fsm_newstate(fi, CONN_STATE_CONNERR);
965 break;
966 case 15:
8f7c502c
UB
967 dev_err(privptr->dev,
968 "The IUCV device cannot connect to a z/VM guest with no"
969 " IUCV authorization\n");
eebce385
MS
970 fsm_newstate(fi, CONN_STATE_CONNERR);
971 break;
972 default:
8f7c502c
UB
973 dev_err(privptr->dev,
974 "Connecting the IUCV device failed with error %d\n",
975 rc);
eebce385
MS
976 fsm_newstate(fi, CONN_STATE_CONNERR);
977 break;
1da177e4
LT
978 }
979 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
eebce385
MS
980 kfree(conn->path);
981 conn->path = NULL;
1da177e4
LT
982}
983
eebce385 984static void netiucv_purge_skb_queue(struct sk_buff_head *q)
1da177e4
LT
985{
986 struct sk_buff *skb;
987
988 while ((skb = skb_dequeue(q))) {
989 atomic_dec(&skb->users);
990 dev_kfree_skb_any(skb);
991 }
992}
993
eebce385 994static void conn_action_stop(fsm_instance *fi, int event, void *arg)
1da177e4 995{
eebce385 996 struct iucv_event *ev = arg;
1da177e4
LT
997 struct iucv_connection *conn = ev->conn;
998 struct net_device *netdev = conn->netdev;
eebce385 999 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 1000
2a2cf6b1 1001 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1002
1003 fsm_deltimer(&conn->timer);
1004 fsm_newstate(fi, CONN_STATE_STOPPED);
1005 netiucv_purge_skb_queue(&conn->collect_queue);
eebce385
MS
1006 if (conn->path) {
1007 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
08e3356c 1008 iucv_path_sever(conn->path, conn->userdata);
eebce385
MS
1009 kfree(conn->path);
1010 conn->path = NULL;
1011 }
1da177e4
LT
1012 netiucv_purge_skb_queue(&conn->commit_queue);
1013 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
1014}
1015
eebce385 1016static void conn_action_inval(fsm_instance *fi, int event, void *arg)
1da177e4 1017{
eebce385 1018 struct iucv_connection *conn = arg;
1da177e4
LT
1019 struct net_device *netdev = conn->netdev;
1020
f082bcae
UB
1021 IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
1022 netdev->name, conn->userid);
1da177e4
LT
1023}
1024
1025static const fsm_node conn_fsm[] = {
1026 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
1027 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
1028
1029 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
1030 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
1031 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
1032 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
1033 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
1034 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
1035 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
1036
1037 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
1038 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1039 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1040 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
1041 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
1042
1043 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
1044 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
1045
1046 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
1047 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
1048 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
1049
1050 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
1051 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
1052
1053 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1054 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1055};
1056
1057static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1058
e82b0f2c 1059
eebce385 1060/*
1da177e4 1061 * Actions for interface - statemachine.
eebce385 1062 */
1da177e4
LT
1063
1064/**
eebce385
MS
1065 * dev_action_start
1066 * @fi: An instance of an interface statemachine.
1067 * @event: The event, just happened.
1068 * @arg: Generic pointer, casted from struct net_device * upon call.
1da177e4 1069 *
eebce385 1070 * Startup connection by sending CONN_EVENT_START to it.
1da177e4 1071 */
eebce385 1072static void dev_action_start(fsm_instance *fi, int event, void *arg)
1da177e4 1073{
eebce385
MS
1074 struct net_device *dev = arg;
1075 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1076
2a2cf6b1 1077 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 1078
1da177e4 1079 fsm_newstate(fi, DEV_STATE_STARTWAIT);
eebce385 1080 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1da177e4
LT
1081}
1082
1083/**
1084 * Shutdown connection by sending CONN_EVENT_STOP to it.
1085 *
1086 * @param fi An instance of an interface statemachine.
1087 * @param event The event, just happened.
1088 * @param arg Generic pointer, casted from struct net_device * upon call.
1089 */
1090static void
1091dev_action_stop(fsm_instance *fi, int event, void *arg)
1092{
eebce385
MS
1093 struct net_device *dev = arg;
1094 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4
LT
1095 struct iucv_event ev;
1096
2a2cf6b1 1097 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1098
1099 ev.conn = privptr->conn;
1100
1101 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1102 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1103}
1104
1105/**
1106 * Called from connection statemachine
1107 * when a connection is up and running.
1108 *
1109 * @param fi An instance of an interface statemachine.
1110 * @param event The event, just happened.
1111 * @param arg Generic pointer, casted from struct net_device * upon call.
1112 */
1113static void
1114dev_action_connup(fsm_instance *fi, int event, void *arg)
1115{
eebce385
MS
1116 struct net_device *dev = arg;
1117 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1118
2a2cf6b1 1119 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1120
1121 switch (fsm_getstate(fi)) {
1122 case DEV_STATE_STARTWAIT:
1123 fsm_newstate(fi, DEV_STATE_RUNNING);
8f7c502c
UB
1124 dev_info(privptr->dev,
1125 "The IUCV device has been connected"
08e3356c
UB
1126 " successfully to %s\n",
1127 netiucv_printuser(privptr->conn));
1da177e4
LT
1128 IUCV_DBF_TEXT(setup, 3,
1129 "connection is up and running\n");
1130 break;
1131 case DEV_STATE_STOPWAIT:
1da177e4
LT
1132 IUCV_DBF_TEXT(data, 2,
1133 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1134 break;
1135 }
1136}
1137
1138/**
1139 * Called from connection statemachine
1140 * when a connection has been shutdown.
1141 *
1142 * @param fi An instance of an interface statemachine.
1143 * @param event The event, just happened.
1144 * @param arg Generic pointer, casted from struct net_device * upon call.
1145 */
1146static void
1147dev_action_conndown(fsm_instance *fi, int event, void *arg)
1148{
2a2cf6b1 1149 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1150
1151 switch (fsm_getstate(fi)) {
1152 case DEV_STATE_RUNNING:
1153 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1154 break;
1155 case DEV_STATE_STOPWAIT:
1156 fsm_newstate(fi, DEV_STATE_STOPPED);
1157 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1158 break;
1159 }
1160}
1161
1162static const fsm_node dev_fsm[] = {
1163 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1164
1165 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1166 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1167
1168 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1169 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1170
1171 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1172 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
21b26f2f 1173 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
1da177e4
LT
1174};
1175
1176static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1177
1178/**
1179 * Transmit a packet.
1180 * This is a helper function for netiucv_tx().
1181 *
1182 * @param conn Connection to be used for sending.
1183 * @param skb Pointer to struct sk_buff of packet to send.
1184 * The linklevel header has already been set up
1185 * by netiucv_tx().
1186 *
1187 * @return 0 on success, -ERRNO on failure. (Never fails.)
1188 */
eebce385
MS
1189static int netiucv_transmit_skb(struct iucv_connection *conn,
1190 struct sk_buff *skb)
1191{
1192 struct iucv_message msg;
1da177e4 1193 unsigned long saveflags;
eebce385
MS
1194 struct ll_header header;
1195 int rc;
1da177e4
LT
1196
1197 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1198 int l = skb->len + NETIUCV_HDRLEN;
1199
1200 spin_lock_irqsave(&conn->collect_lock, saveflags);
1201 if (conn->collect_len + l >
1202 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1203 rc = -EBUSY;
1204 IUCV_DBF_TEXT(data, 2,
eebce385 1205 "EBUSY from netiucv_transmit_skb\n");
1da177e4
LT
1206 } else {
1207 atomic_inc(&skb->users);
1208 skb_queue_tail(&conn->collect_queue, skb);
1209 conn->collect_len += l;
eebce385 1210 rc = 0;
1da177e4
LT
1211 }
1212 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1213 } else {
1214 struct sk_buff *nskb = skb;
1215 /**
1216 * Copy the skb to a new allocated skb in lowmem only if the
1217 * data is located above 2G in memory or tailroom is < 2.
1218 */
27a884dc
ACM
1219 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1220 NETIUCV_HDRLEN)) >> 31;
1da177e4
LT
1221 int copied = 0;
1222 if (hi || (skb_tailroom(skb) < 2)) {
1223 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1224 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1225 if (!nskb) {
1da177e4
LT
1226 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1227 rc = -ENOMEM;
1228 return rc;
1229 } else {
1230 skb_reserve(nskb, NETIUCV_HDRLEN);
1231 memcpy(skb_put(nskb, skb->len),
1232 skb->data, skb->len);
1233 }
1234 copied = 1;
1235 }
1236 /**
1237 * skb now is below 2G and has enough room. Add headers.
1238 */
1239 header.next = nskb->len + NETIUCV_HDRLEN;
1240 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1241 header.next = 0;
1242 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1243
1244 fsm_newstate(conn->fsm, CONN_STATE_TX);
2c6b47de 1245 conn->prof.send_stamp = current_kernel_time();
e82b0f2c 1246
eebce385
MS
1247 msg.tag = 1;
1248 msg.class = 0;
1249 rc = iucv_message_send(conn->path, &msg, 0, 0,
1250 nskb->data, nskb->len);
1da177e4
LT
1251 conn->prof.doios_single++;
1252 conn->prof.txlen += skb->len;
1253 conn->prof.tx_pending++;
1254 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1255 conn->prof.tx_max_pending = conn->prof.tx_pending;
1256 if (rc) {
1257 struct netiucv_priv *privptr;
1258 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1259 conn->prof.tx_pending--;
eebce385 1260 privptr = netdev_priv(conn->netdev);
1da177e4
LT
1261 if (privptr)
1262 privptr->stats.tx_errors++;
1263 if (copied)
1264 dev_kfree_skb(nskb);
1265 else {
1266 /**
1267 * Remove our headers. They get added
1268 * again on retransmit.
1269 */
1270 skb_pull(skb, NETIUCV_HDRLEN);
1271 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1272 }
1da177e4
LT
1273 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1274 } else {
1275 if (copied)
1276 dev_kfree_skb(skb);
1277 atomic_inc(&nskb->users);
1278 skb_queue_tail(&conn->commit_queue, nskb);
1279 }
1280 }
1281
1282 return rc;
1283}
e82b0f2c 1284
eebce385 1285/*
1da177e4 1286 * Interface API for upper network layers
eebce385 1287 */
1da177e4
LT
1288
1289/**
1290 * Open an interface.
1291 * Called from generic network layer when ifconfig up is run.
1292 *
1293 * @param dev Pointer to interface struct.
1294 *
1295 * @return 0 on success, -ERRNO on failure. (Never fails.)
1296 */
eebce385
MS
1297static int netiucv_open(struct net_device *dev)
1298{
1299 struct netiucv_priv *priv = netdev_priv(dev);
1300
1301 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1da177e4
LT
1302 return 0;
1303}
1304
1305/**
1306 * Close an interface.
1307 * Called from generic network layer when ifconfig down is run.
1308 *
1309 * @param dev Pointer to interface struct.
1310 *
1311 * @return 0 on success, -ERRNO on failure. (Never fails.)
1312 */
eebce385
MS
1313static int netiucv_close(struct net_device *dev)
1314{
1315 struct netiucv_priv *priv = netdev_priv(dev);
1316
1317 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1da177e4
LT
1318 return 0;
1319}
1320
1175b257
UB
1321static int netiucv_pm_prepare(struct device *dev)
1322{
1323 IUCV_DBF_TEXT(trace, 3, __func__);
1324 return 0;
1325}
1326
1327static void netiucv_pm_complete(struct device *dev)
1328{
1329 IUCV_DBF_TEXT(trace, 3, __func__);
1330 return;
1331}
1332
1333/**
1334 * netiucv_pm_freeze() - Freeze PM callback
1335 * @dev: netiucv device
1336 *
1337 * close open netiucv interfaces
1338 */
1339static int netiucv_pm_freeze(struct device *dev)
1340{
4f0076f7 1341 struct netiucv_priv *priv = dev_get_drvdata(dev);
1175b257
UB
1342 struct net_device *ndev = NULL;
1343 int rc = 0;
1344
1345 IUCV_DBF_TEXT(trace, 3, __func__);
1346 if (priv && priv->conn)
1347 ndev = priv->conn->netdev;
1348 if (!ndev)
1349 goto out;
1350 netif_device_detach(ndev);
1351 priv->pm_state = fsm_getstate(priv->fsm);
1352 rc = netiucv_close(ndev);
1353out:
1354 return rc;
1355}
1356
1357/**
1358 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1359 * @dev: netiucv device
1360 *
1361 * re-open netiucv interfaces closed during freeze
1362 */
1363static int netiucv_pm_restore_thaw(struct device *dev)
1364{
4f0076f7 1365 struct netiucv_priv *priv = dev_get_drvdata(dev);
1175b257
UB
1366 struct net_device *ndev = NULL;
1367 int rc = 0;
1368
1369 IUCV_DBF_TEXT(trace, 3, __func__);
1370 if (priv && priv->conn)
1371 ndev = priv->conn->netdev;
1372 if (!ndev)
1373 goto out;
1374 switch (priv->pm_state) {
1375 case DEV_STATE_RUNNING:
1376 case DEV_STATE_STARTWAIT:
1377 rc = netiucv_open(ndev);
1378 break;
1379 default:
1380 break;
1381 }
1382 netif_device_attach(ndev);
1383out:
1384 return rc;
1385}
1386
1da177e4
LT
1387/**
1388 * Start transmission of a packet.
1389 * Called from generic network device layer.
1390 *
1391 * @param skb Pointer to buffer containing the packet.
1392 * @param dev Pointer to interface struct.
1393 *
1394 * @return 0 if packet consumed, !0 if packet rejected.
1395 * Note: If we return !0, then the packet is free'd by
1396 * the generic network layer.
1397 */
1398static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1399{
eebce385
MS
1400 struct netiucv_priv *privptr = netdev_priv(dev);
1401 int rc;
1da177e4 1402
2a2cf6b1 1403 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1404 /**
1405 * Some sanity checks ...
1406 */
1407 if (skb == NULL) {
1da177e4
LT
1408 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1409 privptr->stats.tx_dropped++;
ec634fe3 1410 return NETDEV_TX_OK;
1da177e4
LT
1411 }
1412 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1da177e4
LT
1413 IUCV_DBF_TEXT(data, 2,
1414 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1415 dev_kfree_skb(skb);
1416 privptr->stats.tx_dropped++;
ec634fe3 1417 return NETDEV_TX_OK;
1da177e4
LT
1418 }
1419
1420 /**
1421 * If connection is not running, try to restart it
e82b0f2c 1422 * and throw away packet.
1da177e4
LT
1423 */
1424 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1da177e4
LT
1425 dev_kfree_skb(skb);
1426 privptr->stats.tx_dropped++;
1427 privptr->stats.tx_errors++;
1428 privptr->stats.tx_carrier_errors++;
ec634fe3 1429 return NETDEV_TX_OK;
1da177e4
LT
1430 }
1431
1432 if (netiucv_test_and_set_busy(dev)) {
1433 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
4e584d66 1434 return NETDEV_TX_BUSY;
1da177e4
LT
1435 }
1436 dev->trans_start = jiffies;
5b548140 1437 rc = netiucv_transmit_skb(privptr->conn, skb);
1da177e4 1438 netiucv_clear_busy(dev);
5b548140 1439 return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1da177e4
LT
1440}
1441
1442/**
eebce385
MS
1443 * netiucv_stats
1444 * @dev: Pointer to interface struct.
1da177e4 1445 *
eebce385 1446 * Returns interface statistics of a device.
1da177e4 1447 *
eebce385 1448 * Returns pointer to stats struct of this interface.
1da177e4 1449 */
eebce385 1450static struct net_device_stats *netiucv_stats (struct net_device * dev)
1da177e4 1451{
eebce385
MS
1452 struct netiucv_priv *priv = netdev_priv(dev);
1453
2a2cf6b1 1454 IUCV_DBF_TEXT(trace, 5, __func__);
eebce385 1455 return &priv->stats;
1da177e4
LT
1456}
1457
1458/**
eebce385
MS
1459 * netiucv_change_mtu
1460 * @dev: Pointer to interface struct.
1461 * @new_mtu: The new MTU to use for this interface.
1da177e4 1462 *
eebce385 1463 * Sets MTU of an interface.
1da177e4 1464 *
eebce385 1465 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1da177e4
LT
1466 * (valid range is 576 .. NETIUCV_MTU_MAX).
1467 */
eebce385 1468static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1da177e4 1469{
2a2cf6b1 1470 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385 1471 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1da177e4
LT
1472 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1473 return -EINVAL;
1474 }
1475 dev->mtu = new_mtu;
1476 return 0;
1477}
1478
eebce385 1479/*
1da177e4 1480 * attributes in sysfs
eebce385 1481 */
1da177e4 1482
eebce385
MS
1483static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1484 char *buf)
1da177e4 1485{
dff59b64 1486 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1487
2a2cf6b1 1488 IUCV_DBF_TEXT(trace, 5, __func__);
08e3356c 1489 return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1da177e4
LT
1490}
1491
08e3356c
UB
1492static int netiucv_check_user(const char *buf, size_t count, char *username,
1493 char *userdata)
1da177e4 1494{
08e3356c
UB
1495 const char *p;
1496 int i;
1da177e4 1497
08e3356c
UB
1498 p = strchr(buf, '.');
1499 if ((p && ((count > 26) ||
1500 ((p - buf) > 8) ||
1501 (buf + count - p > 18))) ||
1502 (!p && (count > 9))) {
1503 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1da177e4
LT
1504 return -EINVAL;
1505 }
1506
08e3356c
UB
1507 for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1508 if (isalnum(*p) || *p == '$') {
1509 username[i] = toupper(*p);
eebce385
MS
1510 continue;
1511 }
08e3356c 1512 if (*p == '\n')
1da177e4
LT
1513 /* trailing lf, grr */
1514 break;
eebce385 1515 IUCV_DBF_TEXT_(setup, 2,
08e3356c 1516 "conn_write: invalid character %02x\n", *p);
eebce385 1517 return -EINVAL;
1da177e4 1518 }
eebce385 1519 while (i < 8)
1da177e4 1520 username[i++] = ' ';
16a83b30 1521 username[8] = '\0';
1da177e4 1522
08e3356c
UB
1523 if (*p == '.') {
1524 p++;
1525 for (i = 0; i < 16 && *p; i++, p++) {
1526 if (*p == '\n')
1527 break;
1528 userdata[i] = toupper(*p);
1529 }
1530 while (i > 0 && i < 16)
1531 userdata[i++] = ' ';
1532 } else
1533 memcpy(userdata, iucvMagic_ascii, 16);
1534 userdata[16] = '\0';
1535 ASCEBC(userdata, 16);
1536
1537 return 0;
1538}
1539
1540static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1541 const char *buf, size_t count)
1542{
1543 struct netiucv_priv *priv = dev_get_drvdata(dev);
1544 struct net_device *ndev = priv->conn->netdev;
1545 char username[9];
1546 char userdata[17];
1547 int rc;
1548 struct iucv_connection *cp;
1549
1550 IUCV_DBF_TEXT(trace, 3, __func__);
1551 rc = netiucv_check_user(buf, count, username, userdata);
1552 if (rc)
1553 return rc;
1554
eebce385
MS
1555 if (memcmp(username, priv->conn->userid, 9) &&
1556 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1557 /* username changed while the interface is active. */
eebce385 1558 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
f082bcae 1559 return -EPERM;
eebce385
MS
1560 }
1561 read_lock_bh(&iucv_connection_rwlock);
1562 list_for_each_entry(cp, &iucv_connection_list, list) {
08e3356c
UB
1563 if (!strncmp(username, cp->userid, 9) &&
1564 !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
eebce385 1565 read_unlock_bh(&iucv_connection_rwlock);
08e3356c
UB
1566 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1567 "already exists\n", netiucv_printuser(cp));
eebce385 1568 return -EEXIST;
1da177e4
LT
1569 }
1570 }
eebce385 1571 read_unlock_bh(&iucv_connection_rwlock);
1da177e4 1572 memcpy(priv->conn->userid, username, 9);
08e3356c 1573 memcpy(priv->conn->userdata, userdata, 17);
1da177e4 1574 return count;
1da177e4
LT
1575}
1576
1577static DEVICE_ATTR(user, 0644, user_show, user_write);
1578
eebce385
MS
1579static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1580 char *buf)
dff59b64
GKH
1581{
1582 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1583
2a2cf6b1 1584 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1585 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1586}
1587
eebce385
MS
1588static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1589 const char *buf, size_t count)
1da177e4 1590{
dff59b64 1591 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4
LT
1592 struct net_device *ndev = priv->conn->netdev;
1593 char *e;
1594 int bs1;
1595
2a2cf6b1 1596 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1597 if (count >= 39)
1598 return -EINVAL;
1599
1600 bs1 = simple_strtoul(buf, &e, 0);
1601
1602 if (e && (!isspace(*e))) {
08e3356c
UB
1603 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
1604 *e);
1da177e4
LT
1605 return -EINVAL;
1606 }
1607 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1da177e4
LT
1608 IUCV_DBF_TEXT_(setup, 2,
1609 "buffer_write: buffer size %d too large\n",
1610 bs1);
1611 return -EINVAL;
1612 }
1613 if ((ndev->flags & IFF_RUNNING) &&
1614 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1da177e4
LT
1615 IUCV_DBF_TEXT_(setup, 2,
1616 "buffer_write: buffer size %d too small\n",
1617 bs1);
1618 return -EINVAL;
1619 }
1620 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1da177e4
LT
1621 IUCV_DBF_TEXT_(setup, 2,
1622 "buffer_write: buffer size %d too small\n",
1623 bs1);
1624 return -EINVAL;
1625 }
1626
1627 priv->conn->max_buffsize = bs1;
1628 if (!(ndev->flags & IFF_RUNNING))
1629 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1630
1631 return count;
1632
1633}
1634
1635static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1636
eebce385
MS
1637static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1638 char *buf)
1da177e4 1639{
dff59b64 1640 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1641
2a2cf6b1 1642 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1643 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1644}
1645
1646static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1647
eebce385
MS
1648static ssize_t conn_fsm_show (struct device *dev,
1649 struct device_attribute *attr, char *buf)
1da177e4 1650{
dff59b64 1651 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1652
2a2cf6b1 1653 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1654 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1655}
1656
1657static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1658
eebce385
MS
1659static ssize_t maxmulti_show (struct device *dev,
1660 struct device_attribute *attr, char *buf)
1da177e4 1661{
dff59b64 1662 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1663
2a2cf6b1 1664 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1665 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1666}
1667
eebce385
MS
1668static ssize_t maxmulti_write (struct device *dev,
1669 struct device_attribute *attr,
1670 const char *buf, size_t count)
1da177e4 1671{
dff59b64 1672 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1673
2a2cf6b1 1674 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1675 priv->conn->prof.maxmulti = 0;
1676 return count;
1677}
1678
1679static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1680
eebce385
MS
1681static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1682 char *buf)
1da177e4 1683{
dff59b64 1684 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1685
2a2cf6b1 1686 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1687 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1688}
1689
eebce385
MS
1690static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1691 const char *buf, size_t count)
1da177e4 1692{
dff59b64 1693 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1694
2a2cf6b1 1695 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1696 priv->conn->prof.maxcqueue = 0;
1697 return count;
1698}
1699
1700static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1701
eebce385
MS
1702static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1703 char *buf)
1da177e4 1704{
dff59b64 1705 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1706
2a2cf6b1 1707 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1708 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1709}
1710
eebce385
MS
1711static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1712 const char *buf, size_t count)
1da177e4 1713{
dff59b64 1714 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1715
2a2cf6b1 1716 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1717 priv->conn->prof.doios_single = 0;
1718 return count;
1719}
1720
1721static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1722
eebce385
MS
1723static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1724 char *buf)
1da177e4 1725{
dff59b64 1726 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1727
2a2cf6b1 1728 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1729 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1730}
1731
eebce385
MS
1732static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1733 const char *buf, size_t count)
1da177e4 1734{
dff59b64 1735 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1736
2a2cf6b1 1737 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1738 priv->conn->prof.doios_multi = 0;
1739 return count;
1740}
1741
1742static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1743
eebce385
MS
1744static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1745 char *buf)
1da177e4 1746{
dff59b64 1747 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1748
2a2cf6b1 1749 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1750 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1751}
1752
eebce385
MS
1753static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1754 const char *buf, size_t count)
1da177e4 1755{
dff59b64 1756 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1757
2a2cf6b1 1758 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1759 priv->conn->prof.txlen = 0;
1760 return count;
1761}
1762
1763static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1764
eebce385
MS
1765static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1766 char *buf)
1da177e4 1767{
dff59b64 1768 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1769
2a2cf6b1 1770 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1771 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1772}
1773
eebce385
MS
1774static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1775 const char *buf, size_t count)
1da177e4 1776{
dff59b64 1777 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1778
2a2cf6b1 1779 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1780 priv->conn->prof.tx_time = 0;
1781 return count;
1782}
1783
1784static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1785
eebce385
MS
1786static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1787 char *buf)
1da177e4 1788{
dff59b64 1789 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1790
2a2cf6b1 1791 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1792 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1793}
1794
eebce385
MS
1795static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1796 const char *buf, size_t count)
1da177e4 1797{
dff59b64 1798 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1799
2a2cf6b1 1800 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1801 priv->conn->prof.tx_pending = 0;
1802 return count;
1803}
1804
1805static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1806
eebce385
MS
1807static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1808 char *buf)
1da177e4 1809{
dff59b64 1810 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1811
2a2cf6b1 1812 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1813 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1814}
1815
eebce385
MS
1816static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1817 const char *buf, size_t count)
1da177e4 1818{
dff59b64 1819 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1820
2a2cf6b1 1821 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1822 priv->conn->prof.tx_max_pending = 0;
1823 return count;
1824}
1825
1826static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1827
1828static struct attribute *netiucv_attrs[] = {
1829 &dev_attr_buffer.attr,
1830 &dev_attr_user.attr,
1831 NULL,
1832};
1833
1834static struct attribute_group netiucv_attr_group = {
1835 .attrs = netiucv_attrs,
1836};
1837
1838static struct attribute *netiucv_stat_attrs[] = {
1839 &dev_attr_device_fsm_state.attr,
1840 &dev_attr_connection_fsm_state.attr,
1841 &dev_attr_max_tx_buffer_used.attr,
1842 &dev_attr_max_chained_skbs.attr,
1843 &dev_attr_tx_single_write_ops.attr,
1844 &dev_attr_tx_multi_write_ops.attr,
1845 &dev_attr_netto_bytes.attr,
1846 &dev_attr_max_tx_io_time.attr,
1847 &dev_attr_tx_pending.attr,
1848 &dev_attr_tx_max_pending.attr,
1849 NULL,
1850};
1851
1852static struct attribute_group netiucv_stat_attr_group = {
1853 .name = "stats",
1854 .attrs = netiucv_stat_attrs,
1855};
1856
0b945293 1857static const struct attribute_group *netiucv_attr_groups[] = {
1858 &netiucv_stat_attr_group,
1859 &netiucv_attr_group,
1860 NULL,
1861};
1da177e4 1862
eebce385 1863static int netiucv_register_device(struct net_device *ndev)
1da177e4 1864{
eebce385 1865 struct netiucv_priv *priv = netdev_priv(ndev);
88abaab4 1866 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1da177e4
LT
1867 int ret;
1868
2a2cf6b1 1869 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1870
1871 if (dev) {
1bf5b285 1872 dev_set_name(dev, "net%s", ndev->name);
1da177e4
LT
1873 dev->bus = &iucv_bus;
1874 dev->parent = iucv_root;
0b945293 1875 dev->groups = netiucv_attr_groups;
1da177e4
LT
1876 /*
1877 * The release function could be called after the
1878 * module has been unloaded. It's _only_ task is to
1879 * free the struct. Therefore, we specify kfree()
1880 * directly here. (Probably a little bit obfuscating
1881 * but legitime ...).
1882 */
1883 dev->release = (void (*)(struct device *))kfree;
1884 dev->driver = &netiucv_driver;
1885 } else
1886 return -ENOMEM;
1887
1888 ret = device_register(dev);
c6304933
SO
1889 if (ret) {
1890 put_device(dev);
1da177e4 1891 return ret;
c6304933 1892 }
1da177e4 1893 priv->dev = dev;
dff59b64 1894 dev_set_drvdata(dev, priv);
1da177e4 1895 return 0;
1da177e4
LT
1896}
1897
eebce385 1898static void netiucv_unregister_device(struct device *dev)
1da177e4 1899{
2a2cf6b1 1900 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1901 device_unregister(dev);
1902}
1903
1904/**
1905 * Allocate and initialize a new connection structure.
1906 * Add it to the list of netiucv connections;
1907 */
eebce385 1908static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
08e3356c
UB
1909 char *username,
1910 char *userdata)
eebce385
MS
1911{
1912 struct iucv_connection *conn;
1da177e4 1913
eebce385
MS
1914 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1915 if (!conn)
1916 goto out;
1917 skb_queue_head_init(&conn->collect_queue);
1918 skb_queue_head_init(&conn->commit_queue);
1919 spin_lock_init(&conn->collect_lock);
1920 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1921 conn->netdev = dev;
1922
1923 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1924 if (!conn->rx_buff)
1925 goto out_conn;
1926 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1927 if (!conn->tx_buff)
1928 goto out_rx;
1929 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1930 conn_event_names, NR_CONN_STATES,
1931 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1932 GFP_KERNEL);
1933 if (!conn->fsm)
1934 goto out_tx;
1935
1936 fsm_settimer(conn->fsm, &conn->timer);
1937 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1938
08e3356c
UB
1939 if (userdata)
1940 memcpy(conn->userdata, userdata, 17);
eebce385
MS
1941 if (username) {
1942 memcpy(conn->userid, username, 9);
1943 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1da177e4 1944 }
eebce385
MS
1945
1946 write_lock_bh(&iucv_connection_rwlock);
1947 list_add_tail(&conn->list, &iucv_connection_list);
1948 write_unlock_bh(&iucv_connection_rwlock);
1da177e4 1949 return conn;
eebce385
MS
1950
1951out_tx:
1952 kfree_skb(conn->tx_buff);
1953out_rx:
1954 kfree_skb(conn->rx_buff);
1955out_conn:
1956 kfree(conn);
1957out:
1958 return NULL;
1da177e4
LT
1959}
1960
1961/**
1962 * Release a connection structure and remove it from the
1963 * list of netiucv connections.
1964 */
eebce385 1965static void netiucv_remove_connection(struct iucv_connection *conn)
1da177e4 1966{
08e3356c 1967
2a2cf6b1 1968 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385
MS
1969 write_lock_bh(&iucv_connection_rwlock);
1970 list_del_init(&conn->list);
1971 write_unlock_bh(&iucv_connection_rwlock);
0be4acec
UB
1972 fsm_deltimer(&conn->timer);
1973 netiucv_purge_skb_queue(&conn->collect_queue);
eebce385 1974 if (conn->path) {
08e3356c 1975 iucv_path_sever(conn->path, conn->userdata);
eebce385
MS
1976 kfree(conn->path);
1977 conn->path = NULL;
1da177e4 1978 }
0be4acec 1979 netiucv_purge_skb_queue(&conn->commit_queue);
eebce385
MS
1980 kfree_fsm(conn->fsm);
1981 kfree_skb(conn->rx_buff);
1982 kfree_skb(conn->tx_buff);
1da177e4
LT
1983}
1984
1985/**
1986 * Release everything of a net device.
1987 */
eebce385 1988static void netiucv_free_netdevice(struct net_device *dev)
1da177e4 1989{
eebce385 1990 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1991
2a2cf6b1 1992 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1993
1994 if (!dev)
1995 return;
1996
1da177e4
LT
1997 if (privptr) {
1998 if (privptr->conn)
1999 netiucv_remove_connection(privptr->conn);
2000 if (privptr->fsm)
2001 kfree_fsm(privptr->fsm);
2002 privptr->conn = NULL; privptr->fsm = NULL;
2003 /* privptr gets freed by free_netdev() */
2004 }
2005 free_netdev(dev);
2006}
2007
2008/**
2009 * Initialize a net device. (Called from kernel in alloc_netdev())
2010 */
4edd73b5
FB
2011static const struct net_device_ops netiucv_netdev_ops = {
2012 .ndo_open = netiucv_open,
2013 .ndo_stop = netiucv_close,
2014 .ndo_get_stats = netiucv_stats,
2015 .ndo_start_xmit = netiucv_tx,
2016 .ndo_change_mtu = netiucv_change_mtu,
2017};
2018
eebce385 2019static void netiucv_setup_netdevice(struct net_device *dev)
1da177e4 2020{
1da177e4 2021 dev->mtu = NETIUCV_MTU_DEFAULT;
1da177e4
LT
2022 dev->destructor = netiucv_free_netdevice;
2023 dev->hard_header_len = NETIUCV_HDRLEN;
2024 dev->addr_len = 0;
2025 dev->type = ARPHRD_SLIP;
2026 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
2027 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
4edd73b5 2028 dev->netdev_ops = &netiucv_netdev_ops;
1da177e4
LT
2029}
2030
2031/**
2032 * Allocate and initialize everything of a net device.
2033 */
08e3356c 2034static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
1da177e4
LT
2035{
2036 struct netiucv_priv *privptr;
2037 struct net_device *dev;
2038
2039 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
2040 netiucv_setup_netdevice);
2041 if (!dev)
2042 return NULL;
aaf9522d 2043 rtnl_lock();
1d503563
UB
2044 if (dev_alloc_name(dev, dev->name) < 0)
2045 goto out_netdev;
1da177e4 2046
eebce385 2047 privptr = netdev_priv(dev);
1da177e4
LT
2048 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
2049 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2050 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
eebce385
MS
2051 if (!privptr->fsm)
2052 goto out_netdev;
2053
08e3356c 2054 privptr->conn = netiucv_new_connection(dev, username, userdata);
1da177e4 2055 if (!privptr->conn) {
1da177e4 2056 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
eebce385 2057 goto out_fsm;
1da177e4
LT
2058 }
2059 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1da177e4 2060 return dev;
eebce385
MS
2061
2062out_fsm:
2063 kfree_fsm(privptr->fsm);
2064out_netdev:
aaf9522d 2065 rtnl_unlock();
eebce385
MS
2066 free_netdev(dev);
2067 return NULL;
1da177e4
LT
2068}
2069
eebce385
MS
2070static ssize_t conn_write(struct device_driver *drv,
2071 const char *buf, size_t count)
1da177e4 2072{
16a83b30 2073 char username[9];
08e3356c
UB
2074 char userdata[17];
2075 int rc;
1da177e4 2076 struct net_device *dev;
eebce385
MS
2077 struct netiucv_priv *priv;
2078 struct iucv_connection *cp;
1da177e4 2079
2a2cf6b1 2080 IUCV_DBF_TEXT(trace, 3, __func__);
08e3356c
UB
2081 rc = netiucv_check_user(buf, count, username, userdata);
2082 if (rc)
2083 return rc;
16a83b30 2084
eebce385
MS
2085 read_lock_bh(&iucv_connection_rwlock);
2086 list_for_each_entry(cp, &iucv_connection_list, list) {
08e3356c
UB
2087 if (!strncmp(username, cp->userid, 9) &&
2088 !strncmp(userdata, cp->userdata, 17)) {
eebce385 2089 read_unlock_bh(&iucv_connection_rwlock);
08e3356c
UB
2090 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
2091 "already exists\n", netiucv_printuser(cp));
eebce385
MS
2092 return -EEXIST;
2093 }
16a83b30 2094 }
eebce385
MS
2095 read_unlock_bh(&iucv_connection_rwlock);
2096
08e3356c 2097 dev = netiucv_init_netdevice(username, userdata);
1da177e4 2098 if (!dev) {
1da177e4
LT
2099 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2100 return -ENODEV;
2101 }
2102
eebce385
MS
2103 rc = netiucv_register_device(dev);
2104 if (rc) {
aaf9522d 2105 rtnl_unlock();
1da177e4 2106 IUCV_DBF_TEXT_(setup, 2,
eebce385 2107 "ret %d from netiucv_register_device\n", rc);
1da177e4
LT
2108 goto out_free_ndev;
2109 }
2110
2111 /* sysfs magic */
eebce385
MS
2112 priv = netdev_priv(dev);
2113 SET_NETDEV_DEV(dev, priv->dev);
1da177e4 2114
aaf9522d
BP
2115 rc = register_netdevice(dev);
2116 rtnl_unlock();
eebce385
MS
2117 if (rc)
2118 goto out_unreg;
1da177e4 2119
08e3356c
UB
2120 dev_info(priv->dev, "The IUCV interface to %s has been established "
2121 "successfully\n",
2122 netiucv_printuser(priv->conn));
e82b0f2c 2123
1da177e4
LT
2124 return count;
2125
eebce385
MS
2126out_unreg:
2127 netiucv_unregister_device(priv->dev);
1da177e4 2128out_free_ndev:
1da177e4 2129 netiucv_free_netdevice(dev);
eebce385 2130 return rc;
1da177e4
LT
2131}
2132
2b67fc46 2133static DRIVER_ATTR(connection, 0200, NULL, conn_write);
1da177e4 2134
eebce385
MS
2135static ssize_t remove_write (struct device_driver *drv,
2136 const char *buf, size_t count)
1da177e4 2137{
eebce385 2138 struct iucv_connection *cp;
1da177e4
LT
2139 struct net_device *ndev;
2140 struct netiucv_priv *priv;
2141 struct device *dev;
2142 char name[IFNAMSIZ];
eebce385 2143 const char *p;
1da177e4
LT
2144 int i;
2145
2a2cf6b1 2146 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
2147
2148 if (count >= IFNAMSIZ)
a419aef8 2149 count = IFNAMSIZ - 1;
1da177e4 2150
eebce385
MS
2151 for (i = 0, p = buf; i < count && *p; i++, p++) {
2152 if (*p == '\n' || *p == ' ')
1da177e4
LT
2153 /* trailing lf, grr */
2154 break;
eebce385 2155 name[i] = *p;
1da177e4
LT
2156 }
2157 name[i] = '\0';
2158
eebce385
MS
2159 read_lock_bh(&iucv_connection_rwlock);
2160 list_for_each_entry(cp, &iucv_connection_list, list) {
2161 ndev = cp->netdev;
2162 priv = netdev_priv(ndev);
1da177e4 2163 dev = priv->dev;
eebce385
MS
2164 if (strncmp(name, ndev->name, count))
2165 continue;
2166 read_unlock_bh(&iucv_connection_rwlock);
1da177e4 2167 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
8f7c502c
UB
2168 dev_warn(dev, "The IUCV device is connected"
2169 " to %s and cannot be removed\n",
2170 priv->conn->userid);
1da177e4 2171 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
f082bcae 2172 return -EPERM;
1da177e4
LT
2173 }
2174 unregister_netdev(ndev);
2175 netiucv_unregister_device(dev);
2176 return count;
2177 }
eebce385 2178 read_unlock_bh(&iucv_connection_rwlock);
1da177e4
LT
2179 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2180 return -EINVAL;
2181}
2182
2b67fc46 2183static DRIVER_ATTR(remove, 0200, NULL, remove_write);
1da177e4 2184
eebce385
MS
2185static struct attribute * netiucv_drv_attrs[] = {
2186 &driver_attr_connection.attr,
2187 &driver_attr_remove.attr,
2188 NULL,
2189};
2190
2191static struct attribute_group netiucv_drv_attr_group = {
2192 .attrs = netiucv_drv_attrs,
2193};
2194
a4dbd674 2195static const struct attribute_group *netiucv_drv_attr_groups[] = {
5b88feb1
CH
2196 &netiucv_drv_attr_group,
2197 NULL,
2198};
2199
eebce385 2200static void netiucv_banner(void)
1da177e4 2201{
8f7c502c 2202 pr_info("driver initialized\n");
1da177e4
LT
2203}
2204
eebce385 2205static void __exit netiucv_exit(void)
1da177e4 2206{
eebce385
MS
2207 struct iucv_connection *cp;
2208 struct net_device *ndev;
2209 struct netiucv_priv *priv;
2210 struct device *dev;
2211
2a2cf6b1 2212 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385
MS
2213 while (!list_empty(&iucv_connection_list)) {
2214 cp = list_entry(iucv_connection_list.next,
2215 struct iucv_connection, list);
eebce385
MS
2216 ndev = cp->netdev;
2217 priv = netdev_priv(ndev);
2218 dev = priv->dev;
1da177e4
LT
2219
2220 unregister_netdev(ndev);
2221 netiucv_unregister_device(dev);
2222 }
2223
1175b257 2224 device_unregister(netiucv_dev);
1da177e4 2225 driver_unregister(&netiucv_driver);
eebce385 2226 iucv_unregister(&netiucv_handler, 1);
1da177e4
LT
2227 iucv_unregister_dbf_views();
2228
8f7c502c 2229 pr_info("driver unloaded\n");
1da177e4
LT
2230 return;
2231}
2232
eebce385 2233static int __init netiucv_init(void)
1da177e4 2234{
eebce385 2235 int rc;
e82b0f2c 2236
eebce385
MS
2237 rc = iucv_register_dbf_views();
2238 if (rc)
2239 goto out;
2240 rc = iucv_register(&netiucv_handler, 1);
2241 if (rc)
2242 goto out_dbf;
2a2cf6b1 2243 IUCV_DBF_TEXT(trace, 3, __func__);
0a0a8310 2244 netiucv_driver.groups = netiucv_drv_attr_groups;
eebce385
MS
2245 rc = driver_register(&netiucv_driver);
2246 if (rc) {
eebce385
MS
2247 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2248 goto out_iucv;
1da177e4 2249 }
1175b257
UB
2250 /* establish dummy device */
2251 netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2252 if (!netiucv_dev) {
2253 rc = -ENOMEM;
2254 goto out_driver;
2255 }
2256 dev_set_name(netiucv_dev, "netiucv");
2257 netiucv_dev->bus = &iucv_bus;
2258 netiucv_dev->parent = iucv_root;
2259 netiucv_dev->release = (void (*)(struct device *))kfree;
2260 netiucv_dev->driver = &netiucv_driver;
2261 rc = device_register(netiucv_dev);
c6304933
SO
2262 if (rc) {
2263 put_device(netiucv_dev);
1175b257 2264 goto out_driver;
c6304933 2265 }
eebce385
MS
2266 netiucv_banner();
2267 return rc;
2268
1175b257
UB
2269out_driver:
2270 driver_unregister(&netiucv_driver);
eebce385
MS
2271out_iucv:
2272 iucv_unregister(&netiucv_handler, 1);
2273out_dbf:
2274 iucv_unregister_dbf_views();
2275out:
2276 return rc;
1da177e4 2277}
e82b0f2c 2278
1da177e4
LT
2279module_init(netiucv_init);
2280module_exit(netiucv_exit);
2281MODULE_LICENSE("GPL");