Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * IUCV network driver
3 *
4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 *
7 * Sysfs integration and all bugs therein by Cornelia Huck
8 * (cornelia.huck@de.ibm.com)
9 *
10 * Documentation used:
11 * the source of the original IUCV driver by:
12 * Stefan Hegewald <hegewald@de.ibm.com>
13 * Hartmut Penner <hpenner@de.ibm.com>
14 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15 * Martin Schwidefsky (schwidefsky@de.ibm.com)
16 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 *
32 */
33
34#undef DEBUG
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/kernel.h>
39#include <linux/slab.h>
40#include <linux/errno.h>
41#include <linux/types.h>
42#include <linux/interrupt.h>
43#include <linux/timer.h>
44#include <linux/bitops.h>
45
46#include <linux/signal.h>
47#include <linux/string.h>
48#include <linux/device.h>
49
50#include <linux/ip.h>
51#include <linux/if_arp.h>
52#include <linux/tcp.h>
53#include <linux/skbuff.h>
54#include <linux/ctype.h>
55#include <net/dst.h>
56
57#include <asm/io.h>
58#include <asm/uaccess.h>
59
60#include <net/iucv/iucv.h>
61#include "fsm.h"
62
63MODULE_AUTHOR
64 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
65MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
66
67/**
68 * Debug Facility stuff
69 */
70#define IUCV_DBF_SETUP_NAME "iucv_setup"
71#define IUCV_DBF_SETUP_LEN 32
72#define IUCV_DBF_SETUP_PAGES 2
73#define IUCV_DBF_SETUP_NR_AREAS 1
74#define IUCV_DBF_SETUP_LEVEL 3
75
76#define IUCV_DBF_DATA_NAME "iucv_data"
77#define IUCV_DBF_DATA_LEN 128
78#define IUCV_DBF_DATA_PAGES 2
79#define IUCV_DBF_DATA_NR_AREAS 1
80#define IUCV_DBF_DATA_LEVEL 2
81
82#define IUCV_DBF_TRACE_NAME "iucv_trace"
83#define IUCV_DBF_TRACE_LEN 16
84#define IUCV_DBF_TRACE_PAGES 4
85#define IUCV_DBF_TRACE_NR_AREAS 1
86#define IUCV_DBF_TRACE_LEVEL 3
87
88#define IUCV_DBF_TEXT(name,level,text) \
89 do { \
90 debug_text_event(iucv_dbf_##name,level,text); \
91 } while (0)
92
93#define IUCV_DBF_HEX(name,level,addr,len) \
94 do { \
95 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
96 } while (0)
97
98DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
99
100#define IUCV_DBF_TEXT_(name,level,text...) \
101 do { \
102 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
103 sprintf(iucv_dbf_txt_buf, text); \
104 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
105 put_cpu_var(iucv_dbf_txt_buf); \
106 } while (0)
107
108#define IUCV_DBF_SPRINTF(name,level,text...) \
109 do { \
110 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
111 debug_sprintf_event(iucv_dbf_trace, level, text ); \
112 } while (0)
113
114/**
115 * some more debug stuff
116 */
117#define IUCV_HEXDUMP16(importance,header,ptr) \
118PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
119 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
120 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
121 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
122 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
123 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
124 *(((char*)ptr)+12),*(((char*)ptr)+13), \
125 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
126PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
127 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
128 *(((char*)ptr)+16),*(((char*)ptr)+17), \
129 *(((char*)ptr)+18),*(((char*)ptr)+19), \
130 *(((char*)ptr)+20),*(((char*)ptr)+21), \
131 *(((char*)ptr)+22),*(((char*)ptr)+23), \
132 *(((char*)ptr)+24),*(((char*)ptr)+25), \
133 *(((char*)ptr)+26),*(((char*)ptr)+27), \
134 *(((char*)ptr)+28),*(((char*)ptr)+29), \
135 *(((char*)ptr)+30),*(((char*)ptr)+31));
136
137#define PRINTK_HEADER " iucv: " /* for debugging */
138
139static struct device_driver netiucv_driver = {
140 .name = "netiucv",
141 .bus = &iucv_bus,
142};
143
144static int netiucv_callback_connreq(struct iucv_path *,
145 u8 ipvmid[8], u8 ipuser[16]);
146static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
147static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
148static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
149static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
150static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
151static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
152
153static struct iucv_handler netiucv_handler = {
154 .path_pending = netiucv_callback_connreq,
155 .path_complete = netiucv_callback_connack,
156 .path_severed = netiucv_callback_connrej,
157 .path_quiesced = netiucv_callback_connsusp,
158 .path_resumed = netiucv_callback_connres,
159 .message_pending = netiucv_callback_rx,
160 .message_complete = netiucv_callback_txdone
161};
162
163/**
164 * Per connection profiling data
165 */
166struct connection_profile {
167 unsigned long maxmulti;
168 unsigned long maxcqueue;
169 unsigned long doios_single;
170 unsigned long doios_multi;
171 unsigned long txlen;
172 unsigned long tx_time;
173 struct timespec send_stamp;
174 unsigned long tx_pending;
175 unsigned long tx_max_pending;
176};
177
178/**
179 * Representation of one iucv connection
180 */
181struct iucv_connection {
182 struct list_head list;
183 struct iucv_path *path;
184 struct sk_buff *rx_buff;
185 struct sk_buff *tx_buff;
186 struct sk_buff_head collect_queue;
187 struct sk_buff_head commit_queue;
188 spinlock_t collect_lock;
189 int collect_len;
190 int max_buffsize;
191 fsm_timer timer;
192 fsm_instance *fsm;
193 struct net_device *netdev;
194 struct connection_profile prof;
195 char userid[9];
196};
197
198/**
199 * Linked list of all connection structs.
200 */
201static LIST_HEAD(iucv_connection_list);
202static DEFINE_RWLOCK(iucv_connection_rwlock);
203
204/**
205 * Representation of event-data for the
206 * connection state machine.
207 */
208struct iucv_event {
209 struct iucv_connection *conn;
210 void *data;
211};
212
213/**
214 * Private part of the network device structure
215 */
216struct netiucv_priv {
217 struct net_device_stats stats;
218 unsigned long tbusy;
219 fsm_instance *fsm;
220 struct iucv_connection *conn;
221 struct device *dev;
222};
223
224/**
225 * Link level header for a packet.
226 */
227struct ll_header {
228 u16 next;
229};
230
231#define NETIUCV_HDRLEN (sizeof(struct ll_header))
232#define NETIUCV_BUFSIZE_MAX 32768
233#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
234#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
235#define NETIUCV_MTU_DEFAULT 9216
236#define NETIUCV_QUEUELEN_DEFAULT 50
237#define NETIUCV_TIMEOUT_5SEC 5000
238
239/**
240 * Compatibility macros for busy handling
241 * of network devices.
242 */
243static inline void netiucv_clear_busy(struct net_device *dev)
244{
245 struct netiucv_priv *priv = netdev_priv(dev);
246 clear_bit(0, &priv->tbusy);
247 netif_wake_queue(dev);
248}
249
250static inline int netiucv_test_and_set_busy(struct net_device *dev)
251{
252 struct netiucv_priv *priv = netdev_priv(dev);
253 netif_stop_queue(dev);
254 return test_and_set_bit(0, &priv->tbusy);
255}
256
257static u8 iucvMagic[16] = {
258 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
259 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
260};
261
262/**
263 * Convert an iucv userId to its printable
264 * form (strip whitespace at end).
265 *
266 * @param An iucv userId
267 *
268 * @returns The printable string (static data!!)
269 */
270static char *netiucv_printname(char *name)
271{
272 static char tmp[9];
273 char *p = tmp;
274 memcpy(tmp, name, 8);
275 tmp[8] = '\0';
276 while (*p && (!isspace(*p)))
277 p++;
278 *p = '\0';
279 return tmp;
280}
281
282/**
283 * States of the interface statemachine.
284 */
285enum dev_states {
286 DEV_STATE_STOPPED,
287 DEV_STATE_STARTWAIT,
288 DEV_STATE_STOPWAIT,
289 DEV_STATE_RUNNING,
290 /**
291 * MUST be always the last element!!
292 */
293 NR_DEV_STATES
294};
295
296static const char *dev_state_names[] = {
297 "Stopped",
298 "StartWait",
299 "StopWait",
300 "Running",
301};
302
303/**
304 * Events of the interface statemachine.
305 */
306enum dev_events {
307 DEV_EVENT_START,
308 DEV_EVENT_STOP,
309 DEV_EVENT_CONUP,
310 DEV_EVENT_CONDOWN,
311 /**
312 * MUST be always the last element!!
313 */
314 NR_DEV_EVENTS
315};
316
317static const char *dev_event_names[] = {
318 "Start",
319 "Stop",
320 "Connection up",
321 "Connection down",
322};
323
324/**
325 * Events of the connection statemachine
326 */
327enum conn_events {
328 /**
329 * Events, representing callbacks from
330 * lowlevel iucv layer)
331 */
332 CONN_EVENT_CONN_REQ,
333 CONN_EVENT_CONN_ACK,
334 CONN_EVENT_CONN_REJ,
335 CONN_EVENT_CONN_SUS,
336 CONN_EVENT_CONN_RES,
337 CONN_EVENT_RX,
338 CONN_EVENT_TXDONE,
339
340 /**
341 * Events, representing errors return codes from
342 * calls to lowlevel iucv layer
343 */
344
345 /**
346 * Event, representing timer expiry.
347 */
348 CONN_EVENT_TIMER,
349
350 /**
351 * Events, representing commands from upper levels.
352 */
353 CONN_EVENT_START,
354 CONN_EVENT_STOP,
355
356 /**
357 * MUST be always the last element!!
358 */
359 NR_CONN_EVENTS,
360};
361
362static const char *conn_event_names[] = {
363 "Remote connection request",
364 "Remote connection acknowledge",
365 "Remote connection reject",
366 "Connection suspended",
367 "Connection resumed",
368 "Data received",
369 "Data sent",
370
371 "Timer",
372
373 "Start",
374 "Stop",
375};
376
377/**
378 * States of the connection statemachine.
379 */
380enum conn_states {
381 /**
382 * Connection not assigned to any device,
383 * initial state, invalid
384 */
385 CONN_STATE_INVALID,
386
387 /**
388 * Userid assigned but not operating
389 */
390 CONN_STATE_STOPPED,
391
392 /**
393 * Connection registered,
394 * no connection request sent yet,
395 * no connection request received
396 */
397 CONN_STATE_STARTWAIT,
398
399 /**
400 * Connection registered and connection request sent,
401 * no acknowledge and no connection request received yet.
402 */
403 CONN_STATE_SETUPWAIT,
404
405 /**
406 * Connection up and running idle
407 */
408 CONN_STATE_IDLE,
409
410 /**
411 * Data sent, awaiting CONN_EVENT_TXDONE
412 */
413 CONN_STATE_TX,
414
415 /**
416 * Error during registration.
417 */
418 CONN_STATE_REGERR,
419
420 /**
421 * Error during registration.
422 */
423 CONN_STATE_CONNERR,
424
425 /**
426 * MUST be always the last element!!
427 */
428 NR_CONN_STATES,
429};
430
431static const char *conn_state_names[] = {
432 "Invalid",
433 "Stopped",
434 "StartWait",
435 "SetupWait",
436 "Idle",
437 "TX",
438 "Terminating",
439 "Registration error",
440 "Connect error",
441};
442
443
444/**
445 * Debug Facility Stuff
446 */
447static debug_info_t *iucv_dbf_setup = NULL;
448static debug_info_t *iucv_dbf_data = NULL;
449static debug_info_t *iucv_dbf_trace = NULL;
450
451DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
452
453static void iucv_unregister_dbf_views(void)
454{
455 if (iucv_dbf_setup)
456 debug_unregister(iucv_dbf_setup);
457 if (iucv_dbf_data)
458 debug_unregister(iucv_dbf_data);
459 if (iucv_dbf_trace)
460 debug_unregister(iucv_dbf_trace);
461}
462static int iucv_register_dbf_views(void)
463{
464 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
465 IUCV_DBF_SETUP_PAGES,
466 IUCV_DBF_SETUP_NR_AREAS,
467 IUCV_DBF_SETUP_LEN);
468 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
469 IUCV_DBF_DATA_PAGES,
470 IUCV_DBF_DATA_NR_AREAS,
471 IUCV_DBF_DATA_LEN);
472 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
473 IUCV_DBF_TRACE_PAGES,
474 IUCV_DBF_TRACE_NR_AREAS,
475 IUCV_DBF_TRACE_LEN);
476
477 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
478 (iucv_dbf_trace == NULL)) {
479 iucv_unregister_dbf_views();
480 return -ENOMEM;
481 }
482 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
483 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
484
485 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
486 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
487
488 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
489 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
490
491 return 0;
492}
493
494/*
495 * Callback-wrappers, called from lowlevel iucv layer.
496 */
497
498static void netiucv_callback_rx(struct iucv_path *path,
499 struct iucv_message *msg)
500{
501 struct iucv_connection *conn = path->private;
502 struct iucv_event ev;
503
504 ev.conn = conn;
505 ev.data = msg;
506 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
507}
508
509static void netiucv_callback_txdone(struct iucv_path *path,
510 struct iucv_message *msg)
511{
512 struct iucv_connection *conn = path->private;
513 struct iucv_event ev;
514
515 ev.conn = conn;
516 ev.data = msg;
517 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
518}
519
520static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
521{
522 struct iucv_connection *conn = path->private;
523
524 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
525}
526
527static int netiucv_callback_connreq(struct iucv_path *path,
528 u8 ipvmid[8], u8 ipuser[16])
529{
530 struct iucv_connection *conn = path->private;
531 struct iucv_event ev;
532 int rc;
533
534 if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
535 /* ipuser must match iucvMagic. */
536 return -EINVAL;
537 rc = -EINVAL;
538 read_lock_bh(&iucv_connection_rwlock);
539 list_for_each_entry(conn, &iucv_connection_list, list) {
540 if (strncmp(ipvmid, conn->userid, 8))
541 continue;
542 /* Found a matching connection for this path. */
543 conn->path = path;
544 ev.conn = conn;
545 ev.data = path;
546 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
547 rc = 0;
548 }
549 read_unlock_bh(&iucv_connection_rwlock);
550 return rc;
551}
552
553static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
554{
555 struct iucv_connection *conn = path->private;
556
557 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
558}
559
560static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
561{
562 struct iucv_connection *conn = path->private;
563
564 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
565}
566
567static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
568{
569 struct iucv_connection *conn = path->private;
570
571 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
572}
573
574/**
575 * Dummy NOP action for all statemachines
576 */
577static void fsm_action_nop(fsm_instance *fi, int event, void *arg)
578{
579}
580
581/*
582 * Actions of the connection statemachine
583 */
584
585/**
586 * netiucv_unpack_skb
587 * @conn: The connection where this skb has been received.
588 * @pskb: The received skb.
589 *
590 * Unpack a just received skb and hand it over to upper layers.
591 * Helper function for conn_action_rx.
592 */
593static void netiucv_unpack_skb(struct iucv_connection *conn,
594 struct sk_buff *pskb)
595{
596 struct net_device *dev = conn->netdev;
597 struct netiucv_priv *privptr = netdev_priv(dev);
598 u16 offset = 0;
599
600 skb_put(pskb, NETIUCV_HDRLEN);
601 pskb->dev = dev;
602 pskb->ip_summed = CHECKSUM_NONE;
603 pskb->protocol = ntohs(ETH_P_IP);
604
605 while (1) {
606 struct sk_buff *skb;
607 struct ll_header *header = (struct ll_header *) pskb->data;
608
609 if (!header->next)
610 break;
611
612 skb_pull(pskb, NETIUCV_HDRLEN);
613 header->next -= offset;
614 offset += header->next;
615 header->next -= NETIUCV_HDRLEN;
616 if (skb_tailroom(pskb) < header->next) {
617 PRINT_WARN("%s: Illegal next field in iucv header: "
618 "%d > %d\n",
619 dev->name, header->next, skb_tailroom(pskb));
620 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
621 header->next, skb_tailroom(pskb));
622 return;
623 }
624 skb_put(pskb, header->next);
625 skb_reset_mac_header(pskb);
626 skb = dev_alloc_skb(pskb->len);
627 if (!skb) {
628 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
629 dev->name);
630 IUCV_DBF_TEXT(data, 2,
631 "Out of memory in netiucv_unpack_skb\n");
632 privptr->stats.rx_dropped++;
633 return;
634 }
635 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
636 pskb->len);
637 skb_reset_mac_header(skb);
638 skb->dev = pskb->dev;
639 skb->protocol = pskb->protocol;
640 pskb->ip_summed = CHECKSUM_UNNECESSARY;
641 privptr->stats.rx_packets++;
642 privptr->stats.rx_bytes += skb->len;
643 /*
644 * Since receiving is always initiated from a tasklet (in iucv.c),
645 * we must use netif_rx_ni() instead of netif_rx()
646 */
647 netif_rx_ni(skb);
648 dev->last_rx = jiffies;
649 skb_pull(pskb, header->next);
650 skb_put(pskb, NETIUCV_HDRLEN);
651 }
652}
653
654static void conn_action_rx(fsm_instance *fi, int event, void *arg)
655{
656 struct iucv_event *ev = arg;
657 struct iucv_connection *conn = ev->conn;
658 struct iucv_message *msg = ev->data;
659 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
660 int rc;
661
662 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
663
664 if (!conn->netdev) {
665 iucv_message_reject(conn->path, msg);
666 PRINT_WARN("Received data for unlinked connection\n");
667 IUCV_DBF_TEXT(data, 2,
668 "Received data for unlinked connection\n");
669 return;
670 }
671 if (msg->length > conn->max_buffsize) {
672 iucv_message_reject(conn->path, msg);
673 privptr->stats.rx_dropped++;
674 PRINT_WARN("msglen %d > max_buffsize %d\n",
675 msg->length, conn->max_buffsize);
676 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
677 msg->length, conn->max_buffsize);
678 return;
679 }
680 conn->rx_buff->data = conn->rx_buff->head;
681 skb_reset_tail_pointer(conn->rx_buff);
682 conn->rx_buff->len = 0;
683 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
684 msg->length, NULL);
685 if (rc || msg->length < 5) {
686 privptr->stats.rx_errors++;
687 PRINT_WARN("iucv_receive returned %08x\n", rc);
688 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
689 return;
690 }
691 netiucv_unpack_skb(conn, conn->rx_buff);
692}
693
694static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
695{
696 struct iucv_event *ev = arg;
697 struct iucv_connection *conn = ev->conn;
698 struct iucv_message *msg = ev->data;
699 struct iucv_message txmsg;
700 struct netiucv_priv *privptr = NULL;
701 u32 single_flag = msg->tag;
702 u32 txbytes = 0;
703 u32 txpackets = 0;
704 u32 stat_maxcq = 0;
705 struct sk_buff *skb;
706 unsigned long saveflags;
707 struct ll_header header;
708 int rc;
709
710 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
711
712 if (conn && conn->netdev)
713 privptr = netdev_priv(conn->netdev);
714 conn->prof.tx_pending--;
715 if (single_flag) {
716 if ((skb = skb_dequeue(&conn->commit_queue))) {
717 atomic_dec(&skb->users);
718 dev_kfree_skb_any(skb);
719 if (privptr) {
720 privptr->stats.tx_packets++;
721 privptr->stats.tx_bytes +=
722 (skb->len - NETIUCV_HDRLEN
723 - NETIUCV_HDRLEN);
724 }
725 }
726 }
727 conn->tx_buff->data = conn->tx_buff->head;
728 skb_reset_tail_pointer(conn->tx_buff);
729 conn->tx_buff->len = 0;
730 spin_lock_irqsave(&conn->collect_lock, saveflags);
731 while ((skb = skb_dequeue(&conn->collect_queue))) {
732 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
733 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
734 NETIUCV_HDRLEN);
735 skb_copy_from_linear_data(skb,
736 skb_put(conn->tx_buff, skb->len),
737 skb->len);
738 txbytes += skb->len;
739 txpackets++;
740 stat_maxcq++;
741 atomic_dec(&skb->users);
742 dev_kfree_skb_any(skb);
743 }
744 if (conn->collect_len > conn->prof.maxmulti)
745 conn->prof.maxmulti = conn->collect_len;
746 conn->collect_len = 0;
747 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
748 if (conn->tx_buff->len == 0) {
749 fsm_newstate(fi, CONN_STATE_IDLE);
750 return;
751 }
752
753 header.next = 0;
754 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
755 conn->prof.send_stamp = current_kernel_time();
756 txmsg.class = 0;
757 txmsg.tag = 0;
758 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
759 conn->tx_buff->data, conn->tx_buff->len);
760 conn->prof.doios_multi++;
761 conn->prof.txlen += conn->tx_buff->len;
762 conn->prof.tx_pending++;
763 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
764 conn->prof.tx_max_pending = conn->prof.tx_pending;
765 if (rc) {
766 conn->prof.tx_pending--;
767 fsm_newstate(fi, CONN_STATE_IDLE);
768 if (privptr)
769 privptr->stats.tx_errors += txpackets;
770 PRINT_WARN("iucv_send returned %08x\n", rc);
771 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
772 } else {
773 if (privptr) {
774 privptr->stats.tx_packets += txpackets;
775 privptr->stats.tx_bytes += txbytes;
776 }
777 if (stat_maxcq > conn->prof.maxcqueue)
778 conn->prof.maxcqueue = stat_maxcq;
779 }
780}
781
782static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
783{
784 struct iucv_event *ev = arg;
785 struct iucv_connection *conn = ev->conn;
786 struct iucv_path *path = ev->data;
787 struct net_device *netdev = conn->netdev;
788 struct netiucv_priv *privptr = netdev_priv(netdev);
789 int rc;
790
791 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
792
793 conn->path = path;
794 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
795 path->flags = 0;
796 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
797 if (rc) {
798 PRINT_WARN("%s: IUCV accept failed with error %d\n",
799 netdev->name, rc);
800 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
801 return;
802 }
803 fsm_newstate(fi, CONN_STATE_IDLE);
804 netdev->tx_queue_len = conn->path->msglim;
805 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
806}
807
808static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
809{
810 struct iucv_event *ev = arg;
811 struct iucv_path *path = ev->data;
812
813 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
814 iucv_path_sever(path, NULL);
815}
816
817static void conn_action_connack(fsm_instance *fi, int event, void *arg)
818{
819 struct iucv_connection *conn = arg;
820 struct net_device *netdev = conn->netdev;
821 struct netiucv_priv *privptr = netdev_priv(netdev);
822
823 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
824 fsm_deltimer(&conn->timer);
825 fsm_newstate(fi, CONN_STATE_IDLE);
826 netdev->tx_queue_len = conn->path->msglim;
827 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
828}
829
830static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
831{
832 struct iucv_connection *conn = arg;
833
834 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
835 fsm_deltimer(&conn->timer);
836 iucv_path_sever(conn->path, NULL);
837 fsm_newstate(fi, CONN_STATE_STARTWAIT);
838}
839
840static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
841{
842 struct iucv_connection *conn = arg;
843 struct net_device *netdev = conn->netdev;
844 struct netiucv_priv *privptr = netdev_priv(netdev);
845
846 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
847
848 fsm_deltimer(&conn->timer);
849 iucv_path_sever(conn->path, NULL);
850 PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
851 IUCV_DBF_TEXT(data, 2,
852 "conn_action_connsever: Remote dropped connection\n");
853 fsm_newstate(fi, CONN_STATE_STARTWAIT);
854 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
855}
856
857static void conn_action_start(fsm_instance *fi, int event, void *arg)
858{
859 struct iucv_connection *conn = arg;
860 int rc;
861
862 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
863
864 fsm_newstate(fi, CONN_STATE_STARTWAIT);
865 PRINT_DEBUG("%s('%s'): connecting ...\n",
866 conn->netdev->name, conn->userid);
867
868 /*
869 * We must set the state before calling iucv_connect because the
870 * callback handler could be called at any point after the connection
871 * request is sent
872 */
873
874 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
875 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
876 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
877 NULL, iucvMagic, conn);
878 switch (rc) {
879 case 0:
880 conn->netdev->tx_queue_len = conn->path->msglim;
881 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
882 CONN_EVENT_TIMER, conn);
883 return;
884 case 11:
885 PRINT_INFO("%s: User %s is currently not available.\n",
886 conn->netdev->name,
887 netiucv_printname(conn->userid));
888 fsm_newstate(fi, CONN_STATE_STARTWAIT);
889 break;
890 case 12:
891 PRINT_INFO("%s: User %s is currently not ready.\n",
892 conn->netdev->name,
893 netiucv_printname(conn->userid));
894 fsm_newstate(fi, CONN_STATE_STARTWAIT);
895 break;
896 case 13:
897 PRINT_WARN("%s: Too many IUCV connections.\n",
898 conn->netdev->name);
899 fsm_newstate(fi, CONN_STATE_CONNERR);
900 break;
901 case 14:
902 PRINT_WARN("%s: User %s has too many IUCV connections.\n",
903 conn->netdev->name,
904 netiucv_printname(conn->userid));
905 fsm_newstate(fi, CONN_STATE_CONNERR);
906 break;
907 case 15:
908 PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
909 conn->netdev->name);
910 fsm_newstate(fi, CONN_STATE_CONNERR);
911 break;
912 default:
913 PRINT_WARN("%s: iucv_connect returned error %d\n",
914 conn->netdev->name, rc);
915 fsm_newstate(fi, CONN_STATE_CONNERR);
916 break;
917 }
918 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
919 kfree(conn->path);
920 conn->path = NULL;
921}
922
923static void netiucv_purge_skb_queue(struct sk_buff_head *q)
924{
925 struct sk_buff *skb;
926
927 while ((skb = skb_dequeue(q))) {
928 atomic_dec(&skb->users);
929 dev_kfree_skb_any(skb);
930 }
931}
932
933static void conn_action_stop(fsm_instance *fi, int event, void *arg)
934{
935 struct iucv_event *ev = arg;
936 struct iucv_connection *conn = ev->conn;
937 struct net_device *netdev = conn->netdev;
938 struct netiucv_priv *privptr = netdev_priv(netdev);
939
940 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
941
942 fsm_deltimer(&conn->timer);
943 fsm_newstate(fi, CONN_STATE_STOPPED);
944 netiucv_purge_skb_queue(&conn->collect_queue);
945 if (conn->path) {
946 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
947 iucv_path_sever(conn->path, iucvMagic);
948 kfree(conn->path);
949 conn->path = NULL;
950 }
951 netiucv_purge_skb_queue(&conn->commit_queue);
952 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
953}
954
955static void conn_action_inval(fsm_instance *fi, int event, void *arg)
956{
957 struct iucv_connection *conn = arg;
958 struct net_device *netdev = conn->netdev;
959
960 PRINT_WARN("%s: Cannot connect without username\n", netdev->name);
961 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
962}
963
964static const fsm_node conn_fsm[] = {
965 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
966 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
967
968 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
969 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
970 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
971 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
972 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
973 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
974 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
975
976 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
977 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
978 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
979 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
980 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
981
982 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
983 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
984
985 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
986 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
987 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
988
989 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
990 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
991
992 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
993 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
994};
995
996static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
997
998
999/*
1000 * Actions for interface - statemachine.
1001 */
1002
1003/**
1004 * dev_action_start
1005 * @fi: An instance of an interface statemachine.
1006 * @event: The event, just happened.
1007 * @arg: Generic pointer, casted from struct net_device * upon call.
1008 *
1009 * Startup connection by sending CONN_EVENT_START to it.
1010 */
1011static void dev_action_start(fsm_instance *fi, int event, void *arg)
1012{
1013 struct net_device *dev = arg;
1014 struct netiucv_priv *privptr = netdev_priv(dev);
1015
1016 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1017
1018 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1019 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1020}
1021
1022/**
1023 * Shutdown connection by sending CONN_EVENT_STOP to it.
1024 *
1025 * @param fi An instance of an interface statemachine.
1026 * @param event The event, just happened.
1027 * @param arg Generic pointer, casted from struct net_device * upon call.
1028 */
1029static void
1030dev_action_stop(fsm_instance *fi, int event, void *arg)
1031{
1032 struct net_device *dev = arg;
1033 struct netiucv_priv *privptr = netdev_priv(dev);
1034 struct iucv_event ev;
1035
1036 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1037
1038 ev.conn = privptr->conn;
1039
1040 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1041 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1042}
1043
1044/**
1045 * Called from connection statemachine
1046 * when a connection is up and running.
1047 *
1048 * @param fi An instance of an interface statemachine.
1049 * @param event The event, just happened.
1050 * @param arg Generic pointer, casted from struct net_device * upon call.
1051 */
1052static void
1053dev_action_connup(fsm_instance *fi, int event, void *arg)
1054{
1055 struct net_device *dev = arg;
1056 struct netiucv_priv *privptr = netdev_priv(dev);
1057
1058 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1059
1060 switch (fsm_getstate(fi)) {
1061 case DEV_STATE_STARTWAIT:
1062 fsm_newstate(fi, DEV_STATE_RUNNING);
1063 PRINT_INFO("%s: connected with remote side %s\n",
1064 dev->name, privptr->conn->userid);
1065 IUCV_DBF_TEXT(setup, 3,
1066 "connection is up and running\n");
1067 break;
1068 case DEV_STATE_STOPWAIT:
1069 PRINT_INFO(
1070 "%s: got connection UP event during shutdown!\n",
1071 dev->name);
1072 IUCV_DBF_TEXT(data, 2,
1073 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1074 break;
1075 }
1076}
1077
1078/**
1079 * Called from connection statemachine
1080 * when a connection has been shutdown.
1081 *
1082 * @param fi An instance of an interface statemachine.
1083 * @param event The event, just happened.
1084 * @param arg Generic pointer, casted from struct net_device * upon call.
1085 */
1086static void
1087dev_action_conndown(fsm_instance *fi, int event, void *arg)
1088{
1089 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1090
1091 switch (fsm_getstate(fi)) {
1092 case DEV_STATE_RUNNING:
1093 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1094 break;
1095 case DEV_STATE_STOPWAIT:
1096 fsm_newstate(fi, DEV_STATE_STOPPED);
1097 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1098 break;
1099 }
1100}
1101
1102static const fsm_node dev_fsm[] = {
1103 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1104
1105 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1106 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1107
1108 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1109 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1110
1111 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1112 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1113 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
1114};
1115
1116static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1117
1118/**
1119 * Transmit a packet.
1120 * This is a helper function for netiucv_tx().
1121 *
1122 * @param conn Connection to be used for sending.
1123 * @param skb Pointer to struct sk_buff of packet to send.
1124 * The linklevel header has already been set up
1125 * by netiucv_tx().
1126 *
1127 * @return 0 on success, -ERRNO on failure. (Never fails.)
1128 */
1129static int netiucv_transmit_skb(struct iucv_connection *conn,
1130 struct sk_buff *skb)
1131{
1132 struct iucv_message msg;
1133 unsigned long saveflags;
1134 struct ll_header header;
1135 int rc;
1136
1137 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1138 int l = skb->len + NETIUCV_HDRLEN;
1139
1140 spin_lock_irqsave(&conn->collect_lock, saveflags);
1141 if (conn->collect_len + l >
1142 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1143 rc = -EBUSY;
1144 IUCV_DBF_TEXT(data, 2,
1145 "EBUSY from netiucv_transmit_skb\n");
1146 } else {
1147 atomic_inc(&skb->users);
1148 skb_queue_tail(&conn->collect_queue, skb);
1149 conn->collect_len += l;
1150 rc = 0;
1151 }
1152 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1153 } else {
1154 struct sk_buff *nskb = skb;
1155 /**
1156 * Copy the skb to a new allocated skb in lowmem only if the
1157 * data is located above 2G in memory or tailroom is < 2.
1158 */
1159 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1160 NETIUCV_HDRLEN)) >> 31;
1161 int copied = 0;
1162 if (hi || (skb_tailroom(skb) < 2)) {
1163 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1164 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1165 if (!nskb) {
1166 PRINT_WARN("%s: Could not allocate tx_skb\n",
1167 conn->netdev->name);
1168 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1169 rc = -ENOMEM;
1170 return rc;
1171 } else {
1172 skb_reserve(nskb, NETIUCV_HDRLEN);
1173 memcpy(skb_put(nskb, skb->len),
1174 skb->data, skb->len);
1175 }
1176 copied = 1;
1177 }
1178 /**
1179 * skb now is below 2G and has enough room. Add headers.
1180 */
1181 header.next = nskb->len + NETIUCV_HDRLEN;
1182 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1183 header.next = 0;
1184 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1185
1186 fsm_newstate(conn->fsm, CONN_STATE_TX);
1187 conn->prof.send_stamp = current_kernel_time();
1188
1189 msg.tag = 1;
1190 msg.class = 0;
1191 rc = iucv_message_send(conn->path, &msg, 0, 0,
1192 nskb->data, nskb->len);
1193 conn->prof.doios_single++;
1194 conn->prof.txlen += skb->len;
1195 conn->prof.tx_pending++;
1196 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1197 conn->prof.tx_max_pending = conn->prof.tx_pending;
1198 if (rc) {
1199 struct netiucv_priv *privptr;
1200 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1201 conn->prof.tx_pending--;
1202 privptr = netdev_priv(conn->netdev);
1203 if (privptr)
1204 privptr->stats.tx_errors++;
1205 if (copied)
1206 dev_kfree_skb(nskb);
1207 else {
1208 /**
1209 * Remove our headers. They get added
1210 * again on retransmit.
1211 */
1212 skb_pull(skb, NETIUCV_HDRLEN);
1213 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1214 }
1215 PRINT_WARN("iucv_send returned %08x\n", rc);
1216 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1217 } else {
1218 if (copied)
1219 dev_kfree_skb(skb);
1220 atomic_inc(&nskb->users);
1221 skb_queue_tail(&conn->commit_queue, nskb);
1222 }
1223 }
1224
1225 return rc;
1226}
1227
1228/*
1229 * Interface API for upper network layers
1230 */
1231
1232/**
1233 * Open an interface.
1234 * Called from generic network layer when ifconfig up is run.
1235 *
1236 * @param dev Pointer to interface struct.
1237 *
1238 * @return 0 on success, -ERRNO on failure. (Never fails.)
1239 */
1240static int netiucv_open(struct net_device *dev)
1241{
1242 struct netiucv_priv *priv = netdev_priv(dev);
1243
1244 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1245 return 0;
1246}
1247
1248/**
1249 * Close an interface.
1250 * Called from generic network layer when ifconfig down is run.
1251 *
1252 * @param dev Pointer to interface struct.
1253 *
1254 * @return 0 on success, -ERRNO on failure. (Never fails.)
1255 */
1256static int netiucv_close(struct net_device *dev)
1257{
1258 struct netiucv_priv *priv = netdev_priv(dev);
1259
1260 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1261 return 0;
1262}
1263
1264/**
1265 * Start transmission of a packet.
1266 * Called from generic network device layer.
1267 *
1268 * @param skb Pointer to buffer containing the packet.
1269 * @param dev Pointer to interface struct.
1270 *
1271 * @return 0 if packet consumed, !0 if packet rejected.
1272 * Note: If we return !0, then the packet is free'd by
1273 * the generic network layer.
1274 */
1275static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1276{
1277 struct netiucv_priv *privptr = netdev_priv(dev);
1278 int rc;
1279
1280 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1281 /**
1282 * Some sanity checks ...
1283 */
1284 if (skb == NULL) {
1285 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1286 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1287 privptr->stats.tx_dropped++;
1288 return 0;
1289 }
1290 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1291 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1292 dev->name, NETIUCV_HDRLEN);
1293 IUCV_DBF_TEXT(data, 2,
1294 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1295 dev_kfree_skb(skb);
1296 privptr->stats.tx_dropped++;
1297 return 0;
1298 }
1299
1300 /**
1301 * If connection is not running, try to restart it
1302 * and throw away packet.
1303 */
1304 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1305 if (!in_atomic())
1306 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
1307 dev_kfree_skb(skb);
1308 privptr->stats.tx_dropped++;
1309 privptr->stats.tx_errors++;
1310 privptr->stats.tx_carrier_errors++;
1311 return 0;
1312 }
1313
1314 if (netiucv_test_and_set_busy(dev)) {
1315 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1316 return -EBUSY;
1317 }
1318 dev->trans_start = jiffies;
1319 rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
1320 netiucv_clear_busy(dev);
1321 return rc;
1322}
1323
1324/**
1325 * netiucv_stats
1326 * @dev: Pointer to interface struct.
1327 *
1328 * Returns interface statistics of a device.
1329 *
1330 * Returns pointer to stats struct of this interface.
1331 */
1332static struct net_device_stats *netiucv_stats (struct net_device * dev)
1333{
1334 struct netiucv_priv *priv = netdev_priv(dev);
1335
1336 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1337 return &priv->stats;
1338}
1339
1340/**
1341 * netiucv_change_mtu
1342 * @dev: Pointer to interface struct.
1343 * @new_mtu: The new MTU to use for this interface.
1344 *
1345 * Sets MTU of an interface.
1346 *
1347 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1348 * (valid range is 576 .. NETIUCV_MTU_MAX).
1349 */
1350static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1351{
1352 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1353 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1354 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1355 return -EINVAL;
1356 }
1357 dev->mtu = new_mtu;
1358 return 0;
1359}
1360
1361/*
1362 * attributes in sysfs
1363 */
1364
1365static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1366 char *buf)
1367{
1368 struct netiucv_priv *priv = dev->driver_data;
1369
1370 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1371 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1372}
1373
1374static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1375 const char *buf, size_t count)
1376{
1377 struct netiucv_priv *priv = dev->driver_data;
1378 struct net_device *ndev = priv->conn->netdev;
1379 char *p;
1380 char *tmp;
1381 char username[9];
1382 int i;
1383 struct iucv_connection *cp;
1384
1385 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1386 if (count > 9) {
1387 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1388 IUCV_DBF_TEXT_(setup, 2,
1389 "%d is length of username\n", (int) count);
1390 return -EINVAL;
1391 }
1392
1393 tmp = strsep((char **) &buf, "\n");
1394 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1395 if (isalnum(*p) || (*p == '$')) {
1396 username[i]= toupper(*p);
1397 continue;
1398 }
1399 if (*p == '\n') {
1400 /* trailing lf, grr */
1401 break;
1402 }
1403 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1404 IUCV_DBF_TEXT_(setup, 2,
1405 "username: invalid character %c\n", *p);
1406 return -EINVAL;
1407 }
1408 while (i < 8)
1409 username[i++] = ' ';
1410 username[8] = '\0';
1411
1412 if (memcmp(username, priv->conn->userid, 9) &&
1413 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1414 /* username changed while the interface is active. */
1415 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1416 dev->bus_id, priv->conn->userid);
1417 PRINT_WARN("netiucv: user cannot be updated\n");
1418 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1419 return -EBUSY;
1420 }
1421 read_lock_bh(&iucv_connection_rwlock);
1422 list_for_each_entry(cp, &iucv_connection_list, list) {
1423 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1424 read_unlock_bh(&iucv_connection_rwlock);
1425 PRINT_WARN("netiucv: Connection to %s already "
1426 "exists\n", username);
1427 return -EEXIST;
1428 }
1429 }
1430 read_unlock_bh(&iucv_connection_rwlock);
1431 memcpy(priv->conn->userid, username, 9);
1432 return count;
1433}
1434
1435static DEVICE_ATTR(user, 0644, user_show, user_write);
1436
1437static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1438 char *buf)
1439{ struct netiucv_priv *priv = dev->driver_data;
1440
1441 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1442 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1443}
1444
1445static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1446 const char *buf, size_t count)
1447{
1448 struct netiucv_priv *priv = dev->driver_data;
1449 struct net_device *ndev = priv->conn->netdev;
1450 char *e;
1451 int bs1;
1452
1453 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1454 if (count >= 39)
1455 return -EINVAL;
1456
1457 bs1 = simple_strtoul(buf, &e, 0);
1458
1459 if (e && (!isspace(*e))) {
1460 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1461 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1462 return -EINVAL;
1463 }
1464 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1465 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1466 bs1);
1467 IUCV_DBF_TEXT_(setup, 2,
1468 "buffer_write: buffer size %d too large\n",
1469 bs1);
1470 return -EINVAL;
1471 }
1472 if ((ndev->flags & IFF_RUNNING) &&
1473 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1474 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1475 bs1);
1476 IUCV_DBF_TEXT_(setup, 2,
1477 "buffer_write: buffer size %d too small\n",
1478 bs1);
1479 return -EINVAL;
1480 }
1481 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1482 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1483 bs1);
1484 IUCV_DBF_TEXT_(setup, 2,
1485 "buffer_write: buffer size %d too small\n",
1486 bs1);
1487 return -EINVAL;
1488 }
1489
1490 priv->conn->max_buffsize = bs1;
1491 if (!(ndev->flags & IFF_RUNNING))
1492 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1493
1494 return count;
1495
1496}
1497
1498static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1499
1500static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1501 char *buf)
1502{
1503 struct netiucv_priv *priv = dev->driver_data;
1504
1505 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1506 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1507}
1508
1509static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1510
1511static ssize_t conn_fsm_show (struct device *dev,
1512 struct device_attribute *attr, char *buf)
1513{
1514 struct netiucv_priv *priv = dev->driver_data;
1515
1516 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1517 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1518}
1519
1520static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1521
1522static ssize_t maxmulti_show (struct device *dev,
1523 struct device_attribute *attr, char *buf)
1524{
1525 struct netiucv_priv *priv = dev->driver_data;
1526
1527 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1528 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1529}
1530
1531static ssize_t maxmulti_write (struct device *dev,
1532 struct device_attribute *attr,
1533 const char *buf, size_t count)
1534{
1535 struct netiucv_priv *priv = dev->driver_data;
1536
1537 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1538 priv->conn->prof.maxmulti = 0;
1539 return count;
1540}
1541
1542static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1543
1544static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1545 char *buf)
1546{
1547 struct netiucv_priv *priv = dev->driver_data;
1548
1549 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1550 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1551}
1552
1553static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1554 const char *buf, size_t count)
1555{
1556 struct netiucv_priv *priv = dev->driver_data;
1557
1558 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1559 priv->conn->prof.maxcqueue = 0;
1560 return count;
1561}
1562
1563static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1564
1565static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1566 char *buf)
1567{
1568 struct netiucv_priv *priv = dev->driver_data;
1569
1570 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1571 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1572}
1573
1574static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1575 const char *buf, size_t count)
1576{
1577 struct netiucv_priv *priv = dev->driver_data;
1578
1579 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1580 priv->conn->prof.doios_single = 0;
1581 return count;
1582}
1583
1584static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1585
1586static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1587 char *buf)
1588{
1589 struct netiucv_priv *priv = dev->driver_data;
1590
1591 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1592 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1593}
1594
1595static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1596 const char *buf, size_t count)
1597{
1598 struct netiucv_priv *priv = dev->driver_data;
1599
1600 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1601 priv->conn->prof.doios_multi = 0;
1602 return count;
1603}
1604
1605static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1606
1607static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1608 char *buf)
1609{
1610 struct netiucv_priv *priv = dev->driver_data;
1611
1612 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1613 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1614}
1615
1616static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1617 const char *buf, size_t count)
1618{
1619 struct netiucv_priv *priv = dev->driver_data;
1620
1621 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1622 priv->conn->prof.txlen = 0;
1623 return count;
1624}
1625
1626static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1627
1628static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1629 char *buf)
1630{
1631 struct netiucv_priv *priv = dev->driver_data;
1632
1633 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1634 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1635}
1636
1637static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1638 const char *buf, size_t count)
1639{
1640 struct netiucv_priv *priv = dev->driver_data;
1641
1642 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1643 priv->conn->prof.tx_time = 0;
1644 return count;
1645}
1646
1647static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1648
1649static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1650 char *buf)
1651{
1652 struct netiucv_priv *priv = dev->driver_data;
1653
1654 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1655 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1656}
1657
1658static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1659 const char *buf, size_t count)
1660{
1661 struct netiucv_priv *priv = dev->driver_data;
1662
1663 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1664 priv->conn->prof.tx_pending = 0;
1665 return count;
1666}
1667
1668static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1669
1670static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1671 char *buf)
1672{
1673 struct netiucv_priv *priv = dev->driver_data;
1674
1675 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1676 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1677}
1678
1679static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1680 const char *buf, size_t count)
1681{
1682 struct netiucv_priv *priv = dev->driver_data;
1683
1684 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1685 priv->conn->prof.tx_max_pending = 0;
1686 return count;
1687}
1688
1689static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1690
1691static struct attribute *netiucv_attrs[] = {
1692 &dev_attr_buffer.attr,
1693 &dev_attr_user.attr,
1694 NULL,
1695};
1696
1697static struct attribute_group netiucv_attr_group = {
1698 .attrs = netiucv_attrs,
1699};
1700
1701static struct attribute *netiucv_stat_attrs[] = {
1702 &dev_attr_device_fsm_state.attr,
1703 &dev_attr_connection_fsm_state.attr,
1704 &dev_attr_max_tx_buffer_used.attr,
1705 &dev_attr_max_chained_skbs.attr,
1706 &dev_attr_tx_single_write_ops.attr,
1707 &dev_attr_tx_multi_write_ops.attr,
1708 &dev_attr_netto_bytes.attr,
1709 &dev_attr_max_tx_io_time.attr,
1710 &dev_attr_tx_pending.attr,
1711 &dev_attr_tx_max_pending.attr,
1712 NULL,
1713};
1714
1715static struct attribute_group netiucv_stat_attr_group = {
1716 .name = "stats",
1717 .attrs = netiucv_stat_attrs,
1718};
1719
1720static int netiucv_add_files(struct device *dev)
1721{
1722 int ret;
1723
1724 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1725 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1726 if (ret)
1727 return ret;
1728 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1729 if (ret)
1730 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1731 return ret;
1732}
1733
1734static void netiucv_remove_files(struct device *dev)
1735{
1736 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1737 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1738 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1739}
1740
1741static int netiucv_register_device(struct net_device *ndev)
1742{
1743 struct netiucv_priv *priv = netdev_priv(ndev);
1744 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1745 int ret;
1746
1747
1748 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1749
1750 if (dev) {
1751 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
1752 dev->bus = &iucv_bus;
1753 dev->parent = iucv_root;
1754 /*
1755 * The release function could be called after the
1756 * module has been unloaded. It's _only_ task is to
1757 * free the struct. Therefore, we specify kfree()
1758 * directly here. (Probably a little bit obfuscating
1759 * but legitime ...).
1760 */
1761 dev->release = (void (*)(struct device *))kfree;
1762 dev->driver = &netiucv_driver;
1763 } else
1764 return -ENOMEM;
1765
1766 ret = device_register(dev);
1767
1768 if (ret)
1769 return ret;
1770 ret = netiucv_add_files(dev);
1771 if (ret)
1772 goto out_unreg;
1773 priv->dev = dev;
1774 dev->driver_data = priv;
1775 return 0;
1776
1777out_unreg:
1778 device_unregister(dev);
1779 return ret;
1780}
1781
1782static void netiucv_unregister_device(struct device *dev)
1783{
1784 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1785 netiucv_remove_files(dev);
1786 device_unregister(dev);
1787}
1788
1789/**
1790 * Allocate and initialize a new connection structure.
1791 * Add it to the list of netiucv connections;
1792 */
1793static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1794 char *username)
1795{
1796 struct iucv_connection *conn;
1797
1798 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1799 if (!conn)
1800 goto out;
1801 skb_queue_head_init(&conn->collect_queue);
1802 skb_queue_head_init(&conn->commit_queue);
1803 spin_lock_init(&conn->collect_lock);
1804 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1805 conn->netdev = dev;
1806
1807 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1808 if (!conn->rx_buff)
1809 goto out_conn;
1810 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1811 if (!conn->tx_buff)
1812 goto out_rx;
1813 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1814 conn_event_names, NR_CONN_STATES,
1815 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1816 GFP_KERNEL);
1817 if (!conn->fsm)
1818 goto out_tx;
1819
1820 fsm_settimer(conn->fsm, &conn->timer);
1821 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1822
1823 if (username) {
1824 memcpy(conn->userid, username, 9);
1825 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1826 }
1827
1828 write_lock_bh(&iucv_connection_rwlock);
1829 list_add_tail(&conn->list, &iucv_connection_list);
1830 write_unlock_bh(&iucv_connection_rwlock);
1831 return conn;
1832
1833out_tx:
1834 kfree_skb(conn->tx_buff);
1835out_rx:
1836 kfree_skb(conn->rx_buff);
1837out_conn:
1838 kfree(conn);
1839out:
1840 return NULL;
1841}
1842
1843/**
1844 * Release a connection structure and remove it from the
1845 * list of netiucv connections.
1846 */
1847static void netiucv_remove_connection(struct iucv_connection *conn)
1848{
1849 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1850 write_lock_bh(&iucv_connection_rwlock);
1851 list_del_init(&conn->list);
1852 write_unlock_bh(&iucv_connection_rwlock);
1853 fsm_deltimer(&conn->timer);
1854 netiucv_purge_skb_queue(&conn->collect_queue);
1855 if (conn->path) {
1856 iucv_path_sever(conn->path, iucvMagic);
1857 kfree(conn->path);
1858 conn->path = NULL;
1859 }
1860 netiucv_purge_skb_queue(&conn->commit_queue);
1861 kfree_fsm(conn->fsm);
1862 kfree_skb(conn->rx_buff);
1863 kfree_skb(conn->tx_buff);
1864}
1865
1866/**
1867 * Release everything of a net device.
1868 */
1869static void netiucv_free_netdevice(struct net_device *dev)
1870{
1871 struct netiucv_priv *privptr = netdev_priv(dev);
1872
1873 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1874
1875 if (!dev)
1876 return;
1877
1878 if (privptr) {
1879 if (privptr->conn)
1880 netiucv_remove_connection(privptr->conn);
1881 if (privptr->fsm)
1882 kfree_fsm(privptr->fsm);
1883 privptr->conn = NULL; privptr->fsm = NULL;
1884 /* privptr gets freed by free_netdev() */
1885 }
1886 free_netdev(dev);
1887}
1888
1889/**
1890 * Initialize a net device. (Called from kernel in alloc_netdev())
1891 */
1892static void netiucv_setup_netdevice(struct net_device *dev)
1893{
1894 dev->mtu = NETIUCV_MTU_DEFAULT;
1895 dev->hard_start_xmit = netiucv_tx;
1896 dev->open = netiucv_open;
1897 dev->stop = netiucv_close;
1898 dev->get_stats = netiucv_stats;
1899 dev->change_mtu = netiucv_change_mtu;
1900 dev->destructor = netiucv_free_netdevice;
1901 dev->hard_header_len = NETIUCV_HDRLEN;
1902 dev->addr_len = 0;
1903 dev->type = ARPHRD_SLIP;
1904 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1905 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1906}
1907
1908/**
1909 * Allocate and initialize everything of a net device.
1910 */
1911static struct net_device *netiucv_init_netdevice(char *username)
1912{
1913 struct netiucv_priv *privptr;
1914 struct net_device *dev;
1915
1916 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1917 netiucv_setup_netdevice);
1918 if (!dev)
1919 return NULL;
1920 if (dev_alloc_name(dev, dev->name) < 0)
1921 goto out_netdev;
1922
1923 privptr = netdev_priv(dev);
1924 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1925 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1926 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1927 if (!privptr->fsm)
1928 goto out_netdev;
1929
1930 privptr->conn = netiucv_new_connection(dev, username);
1931 if (!privptr->conn) {
1932 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1933 goto out_fsm;
1934 }
1935 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1936 return dev;
1937
1938out_fsm:
1939 kfree_fsm(privptr->fsm);
1940out_netdev:
1941 free_netdev(dev);
1942 return NULL;
1943}
1944
1945static ssize_t conn_write(struct device_driver *drv,
1946 const char *buf, size_t count)
1947{
1948 const char *p;
1949 char username[9];
1950 int i, rc;
1951 struct net_device *dev;
1952 struct netiucv_priv *priv;
1953 struct iucv_connection *cp;
1954
1955 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1956 if (count>9) {
1957 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1958 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1959 return -EINVAL;
1960 }
1961
1962 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1963 if (isalnum(*p) || *p == '$') {
1964 username[i] = toupper(*p);
1965 continue;
1966 }
1967 if (*p == '\n')
1968 /* trailing lf, grr */
1969 break;
1970 PRINT_WARN("netiucv: Invalid character in username!\n");
1971 IUCV_DBF_TEXT_(setup, 2,
1972 "conn_write: invalid character %c\n", *p);
1973 return -EINVAL;
1974 }
1975 while (i < 8)
1976 username[i++] = ' ';
1977 username[8] = '\0';
1978
1979 read_lock_bh(&iucv_connection_rwlock);
1980 list_for_each_entry(cp, &iucv_connection_list, list) {
1981 if (!strncmp(username, cp->userid, 9)) {
1982 read_unlock_bh(&iucv_connection_rwlock);
1983 PRINT_WARN("netiucv: Connection to %s already "
1984 "exists\n", username);
1985 return -EEXIST;
1986 }
1987 }
1988 read_unlock_bh(&iucv_connection_rwlock);
1989
1990 dev = netiucv_init_netdevice(username);
1991 if (!dev) {
1992 PRINT_WARN("netiucv: Could not allocate network device "
1993 "structure for user '%s'\n",
1994 netiucv_printname(username));
1995 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1996 return -ENODEV;
1997 }
1998
1999 rc = netiucv_register_device(dev);
2000 if (rc) {
2001 IUCV_DBF_TEXT_(setup, 2,
2002 "ret %d from netiucv_register_device\n", rc);
2003 goto out_free_ndev;
2004 }
2005
2006 /* sysfs magic */
2007 priv = netdev_priv(dev);
2008 SET_NETDEV_DEV(dev, priv->dev);
2009
2010 rc = register_netdev(dev);
2011 if (rc)
2012 goto out_unreg;
2013
2014 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2015
2016 return count;
2017
2018out_unreg:
2019 netiucv_unregister_device(priv->dev);
2020out_free_ndev:
2021 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2022 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2023 netiucv_free_netdevice(dev);
2024 return rc;
2025}
2026
2027static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2028
2029static ssize_t remove_write (struct device_driver *drv,
2030 const char *buf, size_t count)
2031{
2032 struct iucv_connection *cp;
2033 struct net_device *ndev;
2034 struct netiucv_priv *priv;
2035 struct device *dev;
2036 char name[IFNAMSIZ];
2037 const char *p;
2038 int i;
2039
2040 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2041
2042 if (count >= IFNAMSIZ)
2043 count = IFNAMSIZ - 1;;
2044
2045 for (i = 0, p = buf; i < count && *p; i++, p++) {
2046 if (*p == '\n' || *p == ' ')
2047 /* trailing lf, grr */
2048 break;
2049 name[i] = *p;
2050 }
2051 name[i] = '\0';
2052
2053 read_lock_bh(&iucv_connection_rwlock);
2054 list_for_each_entry(cp, &iucv_connection_list, list) {
2055 ndev = cp->netdev;
2056 priv = netdev_priv(ndev);
2057 dev = priv->dev;
2058 if (strncmp(name, ndev->name, count))
2059 continue;
2060 read_unlock_bh(&iucv_connection_rwlock);
2061 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2062 PRINT_WARN("netiucv: net device %s active with peer "
2063 "%s\n", ndev->name, priv->conn->userid);
2064 PRINT_WARN("netiucv: %s cannot be removed\n",
2065 ndev->name);
2066 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2067 return -EBUSY;
2068 }
2069 unregister_netdev(ndev);
2070 netiucv_unregister_device(dev);
2071 return count;
2072 }
2073 read_unlock_bh(&iucv_connection_rwlock);
2074 PRINT_WARN("netiucv: net device %s unknown\n", name);
2075 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2076 return -EINVAL;
2077}
2078
2079static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2080
2081static struct attribute * netiucv_drv_attrs[] = {
2082 &driver_attr_connection.attr,
2083 &driver_attr_remove.attr,
2084 NULL,
2085};
2086
2087static struct attribute_group netiucv_drv_attr_group = {
2088 .attrs = netiucv_drv_attrs,
2089};
2090
2091static struct attribute_group *netiucv_drv_attr_groups[] = {
2092 &netiucv_drv_attr_group,
2093 NULL,
2094};
2095
2096static void netiucv_banner(void)
2097{
2098 PRINT_INFO("NETIUCV driver initialized\n");
2099}
2100
2101static void __exit netiucv_exit(void)
2102{
2103 struct iucv_connection *cp;
2104 struct net_device *ndev;
2105 struct netiucv_priv *priv;
2106 struct device *dev;
2107
2108 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2109 while (!list_empty(&iucv_connection_list)) {
2110 cp = list_entry(iucv_connection_list.next,
2111 struct iucv_connection, list);
2112 ndev = cp->netdev;
2113 priv = netdev_priv(ndev);
2114 dev = priv->dev;
2115
2116 unregister_netdev(ndev);
2117 netiucv_unregister_device(dev);
2118 }
2119
2120 driver_unregister(&netiucv_driver);
2121 iucv_unregister(&netiucv_handler, 1);
2122 iucv_unregister_dbf_views();
2123
2124 PRINT_INFO("NETIUCV driver unloaded\n");
2125 return;
2126}
2127
2128static int __init netiucv_init(void)
2129{
2130 int rc;
2131
2132 rc = iucv_register_dbf_views();
2133 if (rc)
2134 goto out;
2135 rc = iucv_register(&netiucv_handler, 1);
2136 if (rc)
2137 goto out_dbf;
2138 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2139 netiucv_driver.groups = netiucv_drv_attr_groups;
2140 rc = driver_register(&netiucv_driver);
2141 if (rc) {
2142 PRINT_ERR("NETIUCV: failed to register driver.\n");
2143 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2144 goto out_iucv;
2145 }
2146
2147 netiucv_banner();
2148 return rc;
2149
2150out_iucv:
2151 iucv_unregister(&netiucv_handler, 1);
2152out_dbf:
2153 iucv_unregister_dbf_views();
2154out:
2155 return rc;
2156}
2157
2158module_init(netiucv_init);
2159module_exit(netiucv_exit);
2160MODULE_LICENSE("GPL");