]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/s390/net/netiucv.c
net: use symbolic values for ndo_start_xmit() return codes
[mirror_ubuntu-jammy-kernel.git] / drivers / s390 / net / netiucv.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * IUCV network driver
3 *
eebce385 4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
1da177e4
LT
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 *
4ce3b30c
CH
7 * Sysfs integration and all bugs therein by Cornelia Huck
8 * (cornelia.huck@de.ibm.com)
1da177e4
LT
9 *
10 * Documentation used:
11 * the source of the original IUCV driver by:
12 * Stefan Hegewald <hegewald@de.ibm.com>
13 * Hartmut Penner <hpenner@de.ibm.com>
14 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15 * Martin Schwidefsky (schwidefsky@de.ibm.com)
16 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 *
1da177e4 32 */
e82b0f2c 33
8f7c502c
UB
34#define KMSG_COMPONENT "netiucv"
35#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
36
1da177e4
LT
37#undef DEBUG
38
39#include <linux/module.h>
40#include <linux/init.h>
41#include <linux/kernel.h>
42#include <linux/slab.h>
43#include <linux/errno.h>
44#include <linux/types.h>
45#include <linux/interrupt.h>
46#include <linux/timer.h>
1da177e4
LT
47#include <linux/bitops.h>
48
49#include <linux/signal.h>
50#include <linux/string.h>
51#include <linux/device.h>
52
53#include <linux/ip.h>
54#include <linux/if_arp.h>
55#include <linux/tcp.h>
56#include <linux/skbuff.h>
57#include <linux/ctype.h>
58#include <net/dst.h>
59
60#include <asm/io.h>
61#include <asm/uaccess.h>
62
eebce385 63#include <net/iucv/iucv.h>
1da177e4
LT
64#include "fsm.h"
65
66MODULE_AUTHOR
67 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
68MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
69
eebce385
MS
70/**
71 * Debug Facility stuff
72 */
73#define IUCV_DBF_SETUP_NAME "iucv_setup"
74#define IUCV_DBF_SETUP_LEN 32
75#define IUCV_DBF_SETUP_PAGES 2
76#define IUCV_DBF_SETUP_NR_AREAS 1
77#define IUCV_DBF_SETUP_LEVEL 3
78
79#define IUCV_DBF_DATA_NAME "iucv_data"
80#define IUCV_DBF_DATA_LEN 128
81#define IUCV_DBF_DATA_PAGES 2
82#define IUCV_DBF_DATA_NR_AREAS 1
83#define IUCV_DBF_DATA_LEVEL 2
84
85#define IUCV_DBF_TRACE_NAME "iucv_trace"
86#define IUCV_DBF_TRACE_LEN 16
87#define IUCV_DBF_TRACE_PAGES 4
88#define IUCV_DBF_TRACE_NR_AREAS 1
89#define IUCV_DBF_TRACE_LEVEL 3
90
91#define IUCV_DBF_TEXT(name,level,text) \
92 do { \
93 debug_text_event(iucv_dbf_##name,level,text); \
94 } while (0)
95
96#define IUCV_DBF_HEX(name,level,addr,len) \
97 do { \
98 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
99 } while (0)
100
101DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
102
f33780d3
PT
103/* Allow to sort out low debug levels early to avoid wasted sprints */
104static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
105{
106 return (level <= dbf_grp->level);
107}
108
109#define IUCV_DBF_TEXT_(name, level, text...) \
110 do { \
111 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
112 char* iucv_dbf_txt_buf = \
113 get_cpu_var(iucv_dbf_txt_buf); \
114 sprintf(iucv_dbf_txt_buf, text); \
115 debug_text_event(iucv_dbf_##name, level, \
116 iucv_dbf_txt_buf); \
117 put_cpu_var(iucv_dbf_txt_buf); \
118 } \
eebce385
MS
119 } while (0)
120
121#define IUCV_DBF_SPRINTF(name,level,text...) \
122 do { \
123 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
124 debug_sprintf_event(iucv_dbf_trace, level, text ); \
125 } while (0)
126
127/**
128 * some more debug stuff
129 */
130#define IUCV_HEXDUMP16(importance,header,ptr) \
131PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
132 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
133 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
134 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
135 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
136 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
137 *(((char*)ptr)+12),*(((char*)ptr)+13), \
138 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
139PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
140 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
141 *(((char*)ptr)+16),*(((char*)ptr)+17), \
142 *(((char*)ptr)+18),*(((char*)ptr)+19), \
143 *(((char*)ptr)+20),*(((char*)ptr)+21), \
144 *(((char*)ptr)+22),*(((char*)ptr)+23), \
145 *(((char*)ptr)+24),*(((char*)ptr)+25), \
146 *(((char*)ptr)+26),*(((char*)ptr)+27), \
147 *(((char*)ptr)+28),*(((char*)ptr)+29), \
148 *(((char*)ptr)+30),*(((char*)ptr)+31));
149
1da177e4
LT
150#define PRINTK_HEADER " iucv: " /* for debugging */
151
152static struct device_driver netiucv_driver = {
2219510f 153 .owner = THIS_MODULE,
1da177e4
LT
154 .name = "netiucv",
155 .bus = &iucv_bus,
156};
157
eebce385
MS
158static int netiucv_callback_connreq(struct iucv_path *,
159 u8 ipvmid[8], u8 ipuser[16]);
160static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
161static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
162static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
163static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
164static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
165static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
166
167static struct iucv_handler netiucv_handler = {
168 .path_pending = netiucv_callback_connreq,
169 .path_complete = netiucv_callback_connack,
170 .path_severed = netiucv_callback_connrej,
171 .path_quiesced = netiucv_callback_connsusp,
172 .path_resumed = netiucv_callback_connres,
173 .message_pending = netiucv_callback_rx,
174 .message_complete = netiucv_callback_txdone
175};
176
1da177e4
LT
177/**
178 * Per connection profiling data
179 */
180struct connection_profile {
181 unsigned long maxmulti;
182 unsigned long maxcqueue;
183 unsigned long doios_single;
184 unsigned long doios_multi;
185 unsigned long txlen;
186 unsigned long tx_time;
187 struct timespec send_stamp;
188 unsigned long tx_pending;
189 unsigned long tx_max_pending;
190};
191
192/**
193 * Representation of one iucv connection
194 */
195struct iucv_connection {
eebce385
MS
196 struct list_head list;
197 struct iucv_path *path;
1da177e4
LT
198 struct sk_buff *rx_buff;
199 struct sk_buff *tx_buff;
200 struct sk_buff_head collect_queue;
201 struct sk_buff_head commit_queue;
202 spinlock_t collect_lock;
203 int collect_len;
204 int max_buffsize;
205 fsm_timer timer;
206 fsm_instance *fsm;
207 struct net_device *netdev;
208 struct connection_profile prof;
209 char userid[9];
210};
211
212/**
213 * Linked list of all connection structs.
214 */
c11ca97e 215static LIST_HEAD(iucv_connection_list);
bfac0d0b 216static DEFINE_RWLOCK(iucv_connection_rwlock);
1da177e4
LT
217
218/**
219 * Representation of event-data for the
220 * connection state machine.
221 */
222struct iucv_event {
223 struct iucv_connection *conn;
224 void *data;
225};
226
227/**
228 * Private part of the network device structure
229 */
230struct netiucv_priv {
231 struct net_device_stats stats;
232 unsigned long tbusy;
233 fsm_instance *fsm;
234 struct iucv_connection *conn;
235 struct device *dev;
236};
237
238/**
239 * Link level header for a packet.
240 */
eebce385
MS
241struct ll_header {
242 u16 next;
243};
1da177e4 244
eebce385 245#define NETIUCV_HDRLEN (sizeof(struct ll_header))
1da177e4
LT
246#define NETIUCV_BUFSIZE_MAX 32768
247#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
248#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
249#define NETIUCV_MTU_DEFAULT 9216
250#define NETIUCV_QUEUELEN_DEFAULT 50
251#define NETIUCV_TIMEOUT_5SEC 5000
252
253/**
254 * Compatibility macros for busy handling
255 * of network devices.
256 */
eebce385 257static inline void netiucv_clear_busy(struct net_device *dev)
1da177e4 258{
eebce385
MS
259 struct netiucv_priv *priv = netdev_priv(dev);
260 clear_bit(0, &priv->tbusy);
1da177e4
LT
261 netif_wake_queue(dev);
262}
263
eebce385 264static inline int netiucv_test_and_set_busy(struct net_device *dev)
1da177e4 265{
eebce385 266 struct netiucv_priv *priv = netdev_priv(dev);
1da177e4 267 netif_stop_queue(dev);
eebce385 268 return test_and_set_bit(0, &priv->tbusy);
1da177e4
LT
269}
270
eebce385 271static u8 iucvMagic[16] = {
1da177e4
LT
272 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
273 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
274};
275
1da177e4
LT
276/**
277 * Convert an iucv userId to its printable
278 * form (strip whitespace at end).
279 *
280 * @param An iucv userId
281 *
282 * @returns The printable string (static data!!)
283 */
d4614627 284static char *netiucv_printname(char *name)
1da177e4
LT
285{
286 static char tmp[9];
287 char *p = tmp;
288 memcpy(tmp, name, 8);
289 tmp[8] = '\0';
290 while (*p && (!isspace(*p)))
291 p++;
292 *p = '\0';
293 return tmp;
294}
e82b0f2c 295
1da177e4
LT
296/**
297 * States of the interface statemachine.
298 */
299enum dev_states {
300 DEV_STATE_STOPPED,
301 DEV_STATE_STARTWAIT,
302 DEV_STATE_STOPWAIT,
303 DEV_STATE_RUNNING,
304 /**
305 * MUST be always the last element!!
306 */
307 NR_DEV_STATES
308};
309
310static const char *dev_state_names[] = {
311 "Stopped",
312 "StartWait",
313 "StopWait",
314 "Running",
315};
316
317/**
318 * Events of the interface statemachine.
319 */
320enum dev_events {
321 DEV_EVENT_START,
322 DEV_EVENT_STOP,
323 DEV_EVENT_CONUP,
324 DEV_EVENT_CONDOWN,
325 /**
326 * MUST be always the last element!!
327 */
328 NR_DEV_EVENTS
329};
330
331static const char *dev_event_names[] = {
332 "Start",
333 "Stop",
334 "Connection up",
335 "Connection down",
336};
e82b0f2c 337
1da177e4
LT
338/**
339 * Events of the connection statemachine
340 */
341enum conn_events {
342 /**
343 * Events, representing callbacks from
344 * lowlevel iucv layer)
345 */
346 CONN_EVENT_CONN_REQ,
347 CONN_EVENT_CONN_ACK,
348 CONN_EVENT_CONN_REJ,
349 CONN_EVENT_CONN_SUS,
350 CONN_EVENT_CONN_RES,
351 CONN_EVENT_RX,
352 CONN_EVENT_TXDONE,
353
354 /**
355 * Events, representing errors return codes from
356 * calls to lowlevel iucv layer
357 */
358
359 /**
360 * Event, representing timer expiry.
361 */
362 CONN_EVENT_TIMER,
363
364 /**
365 * Events, representing commands from upper levels.
366 */
367 CONN_EVENT_START,
368 CONN_EVENT_STOP,
369
370 /**
371 * MUST be always the last element!!
372 */
373 NR_CONN_EVENTS,
374};
375
376static const char *conn_event_names[] = {
377 "Remote connection request",
378 "Remote connection acknowledge",
379 "Remote connection reject",
380 "Connection suspended",
381 "Connection resumed",
382 "Data received",
383 "Data sent",
384
385 "Timer",
386
387 "Start",
388 "Stop",
389};
390
391/**
392 * States of the connection statemachine.
393 */
394enum conn_states {
395 /**
396 * Connection not assigned to any device,
397 * initial state, invalid
398 */
399 CONN_STATE_INVALID,
400
401 /**
402 * Userid assigned but not operating
403 */
404 CONN_STATE_STOPPED,
405
406 /**
407 * Connection registered,
408 * no connection request sent yet,
409 * no connection request received
410 */
411 CONN_STATE_STARTWAIT,
412
413 /**
414 * Connection registered and connection request sent,
415 * no acknowledge and no connection request received yet.
416 */
417 CONN_STATE_SETUPWAIT,
418
419 /**
420 * Connection up and running idle
421 */
422 CONN_STATE_IDLE,
423
424 /**
425 * Data sent, awaiting CONN_EVENT_TXDONE
426 */
427 CONN_STATE_TX,
428
429 /**
430 * Error during registration.
431 */
432 CONN_STATE_REGERR,
433
434 /**
435 * Error during registration.
436 */
437 CONN_STATE_CONNERR,
438
439 /**
440 * MUST be always the last element!!
441 */
442 NR_CONN_STATES,
443};
444
445static const char *conn_state_names[] = {
446 "Invalid",
447 "Stopped",
448 "StartWait",
449 "SetupWait",
450 "Idle",
451 "TX",
452 "Terminating",
453 "Registration error",
454 "Connect error",
455};
456
e82b0f2c 457
1da177e4
LT
458/**
459 * Debug Facility Stuff
460 */
461static debug_info_t *iucv_dbf_setup = NULL;
462static debug_info_t *iucv_dbf_data = NULL;
463static debug_info_t *iucv_dbf_trace = NULL;
464
465DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
466
eebce385 467static void iucv_unregister_dbf_views(void)
1da177e4
LT
468{
469 if (iucv_dbf_setup)
470 debug_unregister(iucv_dbf_setup);
471 if (iucv_dbf_data)
472 debug_unregister(iucv_dbf_data);
473 if (iucv_dbf_trace)
474 debug_unregister(iucv_dbf_trace);
475}
eebce385 476static int iucv_register_dbf_views(void)
1da177e4
LT
477{
478 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
66a464db 479 IUCV_DBF_SETUP_PAGES,
1da177e4
LT
480 IUCV_DBF_SETUP_NR_AREAS,
481 IUCV_DBF_SETUP_LEN);
482 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
66a464db 483 IUCV_DBF_DATA_PAGES,
1da177e4
LT
484 IUCV_DBF_DATA_NR_AREAS,
485 IUCV_DBF_DATA_LEN);
486 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
66a464db 487 IUCV_DBF_TRACE_PAGES,
1da177e4
LT
488 IUCV_DBF_TRACE_NR_AREAS,
489 IUCV_DBF_TRACE_LEN);
490
491 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
492 (iucv_dbf_trace == NULL)) {
493 iucv_unregister_dbf_views();
494 return -ENOMEM;
495 }
496 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
497 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
498
499 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
500 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
501
502 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
503 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
504
505 return 0;
506}
507
eebce385 508/*
1da177e4 509 * Callback-wrappers, called from lowlevel iucv layer.
eebce385 510 */
1da177e4 511
eebce385
MS
512static void netiucv_callback_rx(struct iucv_path *path,
513 struct iucv_message *msg)
1da177e4 514{
eebce385 515 struct iucv_connection *conn = path->private;
1da177e4
LT
516 struct iucv_event ev;
517
518 ev.conn = conn;
eebce385 519 ev.data = msg;
1da177e4
LT
520 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
521}
522
eebce385
MS
523static void netiucv_callback_txdone(struct iucv_path *path,
524 struct iucv_message *msg)
1da177e4 525{
eebce385 526 struct iucv_connection *conn = path->private;
1da177e4
LT
527 struct iucv_event ev;
528
529 ev.conn = conn;
eebce385 530 ev.data = msg;
1da177e4
LT
531 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
532}
533
eebce385 534static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1da177e4 535{
eebce385 536 struct iucv_connection *conn = path->private;
1da177e4 537
eebce385 538 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
1da177e4
LT
539}
540
eebce385
MS
541static int netiucv_callback_connreq(struct iucv_path *path,
542 u8 ipvmid[8], u8 ipuser[16])
1da177e4 543{
eebce385 544 struct iucv_connection *conn = path->private;
1da177e4 545 struct iucv_event ev;
eebce385 546 int rc;
1da177e4 547
eebce385
MS
548 if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
549 /* ipuser must match iucvMagic. */
550 return -EINVAL;
551 rc = -EINVAL;
552 read_lock_bh(&iucv_connection_rwlock);
553 list_for_each_entry(conn, &iucv_connection_list, list) {
554 if (strncmp(ipvmid, conn->userid, 8))
555 continue;
556 /* Found a matching connection for this path. */
557 conn->path = path;
558 ev.conn = conn;
559 ev.data = path;
560 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
561 rc = 0;
562 }
563 read_unlock_bh(&iucv_connection_rwlock);
564 return rc;
1da177e4
LT
565}
566
eebce385 567static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1da177e4 568{
eebce385 569 struct iucv_connection *conn = path->private;
1da177e4 570
eebce385 571 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
1da177e4
LT
572}
573
eebce385 574static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
1da177e4 575{
eebce385 576 struct iucv_connection *conn = path->private;
1da177e4 577
eebce385 578 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
1da177e4
LT
579}
580
eebce385 581static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
1da177e4 582{
eebce385 583 struct iucv_connection *conn = path->private;
1da177e4 584
eebce385
MS
585 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
586}
1da177e4
LT
587
588/**
21b26f2f 589 * NOP action for statemachines
1da177e4 590 */
21b26f2f 591static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
1da177e4
LT
592{
593}
e82b0f2c 594
eebce385 595/*
1da177e4 596 * Actions of the connection statemachine
eebce385 597 */
1da177e4
LT
598
599/**
eebce385
MS
600 * netiucv_unpack_skb
601 * @conn: The connection where this skb has been received.
602 * @pskb: The received skb.
1da177e4 603 *
eebce385
MS
604 * Unpack a just received skb and hand it over to upper layers.
605 * Helper function for conn_action_rx.
1da177e4 606 */
eebce385
MS
607static void netiucv_unpack_skb(struct iucv_connection *conn,
608 struct sk_buff *pskb)
1da177e4
LT
609{
610 struct net_device *dev = conn->netdev;
eebce385
MS
611 struct netiucv_priv *privptr = netdev_priv(dev);
612 u16 offset = 0;
1da177e4
LT
613
614 skb_put(pskb, NETIUCV_HDRLEN);
615 pskb->dev = dev;
616 pskb->ip_summed = CHECKSUM_NONE;
617 pskb->protocol = ntohs(ETH_P_IP);
618
619 while (1) {
620 struct sk_buff *skb;
eebce385 621 struct ll_header *header = (struct ll_header *) pskb->data;
1da177e4
LT
622
623 if (!header->next)
624 break;
625
626 skb_pull(pskb, NETIUCV_HDRLEN);
627 header->next -= offset;
628 offset += header->next;
629 header->next -= NETIUCV_HDRLEN;
630 if (skb_tailroom(pskb) < header->next) {
1da177e4
LT
631 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
632 header->next, skb_tailroom(pskb));
633 return;
634 }
635 skb_put(pskb, header->next);
459a98ed 636 skb_reset_mac_header(pskb);
1da177e4
LT
637 skb = dev_alloc_skb(pskb->len);
638 if (!skb) {
1da177e4
LT
639 IUCV_DBF_TEXT(data, 2,
640 "Out of memory in netiucv_unpack_skb\n");
641 privptr->stats.rx_dropped++;
642 return;
643 }
d626f62b
ACM
644 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
645 pskb->len);
459a98ed 646 skb_reset_mac_header(skb);
1da177e4
LT
647 skb->dev = pskb->dev;
648 skb->protocol = pskb->protocol;
649 pskb->ip_summed = CHECKSUM_UNNECESSARY;
9b3efc01
JL
650 privptr->stats.rx_packets++;
651 privptr->stats.rx_bytes += skb->len;
1da177e4
LT
652 /*
653 * Since receiving is always initiated from a tasklet (in iucv.c),
654 * we must use netif_rx_ni() instead of netif_rx()
655 */
656 netif_rx_ni(skb);
657 dev->last_rx = jiffies;
1da177e4
LT
658 skb_pull(pskb, header->next);
659 skb_put(pskb, NETIUCV_HDRLEN);
660 }
661}
662
eebce385 663static void conn_action_rx(fsm_instance *fi, int event, void *arg)
1da177e4 664{
eebce385 665 struct iucv_event *ev = arg;
1da177e4 666 struct iucv_connection *conn = ev->conn;
eebce385
MS
667 struct iucv_message *msg = ev->data;
668 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
1da177e4
LT
669 int rc;
670
2a2cf6b1 671 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
672
673 if (!conn->netdev) {
eebce385 674 iucv_message_reject(conn->path, msg);
1da177e4 675 IUCV_DBF_TEXT(data, 2,
eebce385 676 "Received data for unlinked connection\n");
1da177e4
LT
677 return;
678 }
eebce385
MS
679 if (msg->length > conn->max_buffsize) {
680 iucv_message_reject(conn->path, msg);
1da177e4 681 privptr->stats.rx_dropped++;
1da177e4 682 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
eebce385 683 msg->length, conn->max_buffsize);
1da177e4
LT
684 return;
685 }
27a884dc
ACM
686 conn->rx_buff->data = conn->rx_buff->head;
687 skb_reset_tail_pointer(conn->rx_buff);
1da177e4 688 conn->rx_buff->len = 0;
eebce385
MS
689 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
690 msg->length, NULL);
691 if (rc || msg->length < 5) {
1da177e4 692 privptr->stats.rx_errors++;
1da177e4
LT
693 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
694 return;
695 }
696 netiucv_unpack_skb(conn, conn->rx_buff);
697}
698
eebce385 699static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
1da177e4 700{
eebce385 701 struct iucv_event *ev = arg;
1da177e4 702 struct iucv_connection *conn = ev->conn;
eebce385
MS
703 struct iucv_message *msg = ev->data;
704 struct iucv_message txmsg;
1da177e4 705 struct netiucv_priv *privptr = NULL;
eebce385
MS
706 u32 single_flag = msg->tag;
707 u32 txbytes = 0;
708 u32 txpackets = 0;
709 u32 stat_maxcq = 0;
1da177e4
LT
710 struct sk_buff *skb;
711 unsigned long saveflags;
eebce385
MS
712 struct ll_header header;
713 int rc;
1da177e4 714
2a2cf6b1 715 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4 716
eebce385
MS
717 if (conn && conn->netdev)
718 privptr = netdev_priv(conn->netdev);
1da177e4
LT
719 conn->prof.tx_pending--;
720 if (single_flag) {
721 if ((skb = skb_dequeue(&conn->commit_queue))) {
722 atomic_dec(&skb->users);
723 dev_kfree_skb_any(skb);
724 if (privptr) {
725 privptr->stats.tx_packets++;
726 privptr->stats.tx_bytes +=
727 (skb->len - NETIUCV_HDRLEN
728 - NETIUCV_HDRLEN);
729 }
730 }
731 }
27a884dc
ACM
732 conn->tx_buff->data = conn->tx_buff->head;
733 skb_reset_tail_pointer(conn->tx_buff);
1da177e4
LT
734 conn->tx_buff->len = 0;
735 spin_lock_irqsave(&conn->collect_lock, saveflags);
736 while ((skb = skb_dequeue(&conn->collect_queue))) {
737 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
738 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
739 NETIUCV_HDRLEN);
d626f62b
ACM
740 skb_copy_from_linear_data(skb,
741 skb_put(conn->tx_buff, skb->len),
742 skb->len);
1da177e4
LT
743 txbytes += skb->len;
744 txpackets++;
745 stat_maxcq++;
746 atomic_dec(&skb->users);
747 dev_kfree_skb_any(skb);
748 }
749 if (conn->collect_len > conn->prof.maxmulti)
750 conn->prof.maxmulti = conn->collect_len;
751 conn->collect_len = 0;
752 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
eebce385
MS
753 if (conn->tx_buff->len == 0) {
754 fsm_newstate(fi, CONN_STATE_IDLE);
755 return;
756 }
1da177e4 757
eebce385
MS
758 header.next = 0;
759 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
2c6b47de 760 conn->prof.send_stamp = current_kernel_time();
eebce385
MS
761 txmsg.class = 0;
762 txmsg.tag = 0;
763 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
1da177e4 764 conn->tx_buff->data, conn->tx_buff->len);
eebce385
MS
765 conn->prof.doios_multi++;
766 conn->prof.txlen += conn->tx_buff->len;
767 conn->prof.tx_pending++;
768 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
769 conn->prof.tx_max_pending = conn->prof.tx_pending;
770 if (rc) {
771 conn->prof.tx_pending--;
1da177e4 772 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385
MS
773 if (privptr)
774 privptr->stats.tx_errors += txpackets;
eebce385
MS
775 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
776 } else {
777 if (privptr) {
778 privptr->stats.tx_packets += txpackets;
779 privptr->stats.tx_bytes += txbytes;
780 }
781 if (stat_maxcq > conn->prof.maxcqueue)
782 conn->prof.maxcqueue = stat_maxcq;
783 }
1da177e4
LT
784}
785
eebce385 786static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
1da177e4 787{
eebce385 788 struct iucv_event *ev = arg;
1da177e4 789 struct iucv_connection *conn = ev->conn;
eebce385 790 struct iucv_path *path = ev->data;
1da177e4 791 struct net_device *netdev = conn->netdev;
eebce385 792 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 793 int rc;
1da177e4 794
2a2cf6b1 795 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 796
eebce385
MS
797 conn->path = path;
798 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
799 path->flags = 0;
800 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
1da177e4 801 if (rc) {
1da177e4
LT
802 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
803 return;
804 }
805 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385 806 netdev->tx_queue_len = conn->path->msglim;
1da177e4
LT
807 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
808}
809
eebce385 810static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
1da177e4 811{
eebce385
MS
812 struct iucv_event *ev = arg;
813 struct iucv_path *path = ev->data;
1da177e4 814
2a2cf6b1 815 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385 816 iucv_path_sever(path, NULL);
1da177e4
LT
817}
818
eebce385 819static void conn_action_connack(fsm_instance *fi, int event, void *arg)
1da177e4 820{
eebce385 821 struct iucv_connection *conn = arg;
1da177e4 822 struct net_device *netdev = conn->netdev;
eebce385 823 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 824
2a2cf6b1 825 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
826 fsm_deltimer(&conn->timer);
827 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385 828 netdev->tx_queue_len = conn->path->msglim;
1da177e4
LT
829 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
830}
831
eebce385 832static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
1da177e4 833{
eebce385 834 struct iucv_connection *conn = arg;
1da177e4 835
2a2cf6b1 836 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 837 fsm_deltimer(&conn->timer);
eebce385 838 iucv_path_sever(conn->path, NULL);
1da177e4
LT
839 fsm_newstate(fi, CONN_STATE_STARTWAIT);
840}
841
eebce385 842static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
1da177e4 843{
eebce385 844 struct iucv_connection *conn = arg;
1da177e4 845 struct net_device *netdev = conn->netdev;
eebce385 846 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 847
2a2cf6b1 848 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
849
850 fsm_deltimer(&conn->timer);
eebce385 851 iucv_path_sever(conn->path, NULL);
8f7c502c
UB
852 dev_info(privptr->dev, "The peer interface of the IUCV device"
853 " has closed the connection\n");
1da177e4 854 IUCV_DBF_TEXT(data, 2,
eebce385 855 "conn_action_connsever: Remote dropped connection\n");
1da177e4
LT
856 fsm_newstate(fi, CONN_STATE_STARTWAIT);
857 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
858}
859
eebce385 860static void conn_action_start(fsm_instance *fi, int event, void *arg)
1da177e4 861{
eebce385 862 struct iucv_connection *conn = arg;
8f7c502c
UB
863 struct net_device *netdev = conn->netdev;
864 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4
LT
865 int rc;
866
2a2cf6b1 867 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 868
eebce385 869 fsm_newstate(fi, CONN_STATE_STARTWAIT);
f082bcae 870 IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
8f7c502c 871 netdev->name, conn->userid);
1da177e4 872
eebce385
MS
873 /*
874 * We must set the state before calling iucv_connect because the
875 * callback handler could be called at any point after the connection
876 * request is sent
877 */
1da177e4
LT
878
879 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
eebce385
MS
880 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
881 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
882 NULL, iucvMagic, conn);
1da177e4 883 switch (rc) {
eebce385 884 case 0:
8f7c502c 885 netdev->tx_queue_len = conn->path->msglim;
eebce385
MS
886 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
887 CONN_EVENT_TIMER, conn);
888 return;
889 case 11:
8f7c502c
UB
890 dev_warn(privptr->dev,
891 "The IUCV device failed to connect to z/VM guest %s\n",
892 netiucv_printname(conn->userid));
eebce385
MS
893 fsm_newstate(fi, CONN_STATE_STARTWAIT);
894 break;
895 case 12:
8f7c502c
UB
896 dev_warn(privptr->dev,
897 "The IUCV device failed to connect to the peer on z/VM"
898 " guest %s\n", netiucv_printname(conn->userid));
eebce385
MS
899 fsm_newstate(fi, CONN_STATE_STARTWAIT);
900 break;
901 case 13:
8f7c502c
UB
902 dev_err(privptr->dev,
903 "Connecting the IUCV device would exceed the maximum"
904 " number of IUCV connections\n");
eebce385
MS
905 fsm_newstate(fi, CONN_STATE_CONNERR);
906 break;
907 case 14:
8f7c502c
UB
908 dev_err(privptr->dev,
909 "z/VM guest %s has too many IUCV connections"
910 " to connect with the IUCV device\n",
911 netiucv_printname(conn->userid));
eebce385
MS
912 fsm_newstate(fi, CONN_STATE_CONNERR);
913 break;
914 case 15:
8f7c502c
UB
915 dev_err(privptr->dev,
916 "The IUCV device cannot connect to a z/VM guest with no"
917 " IUCV authorization\n");
eebce385
MS
918 fsm_newstate(fi, CONN_STATE_CONNERR);
919 break;
920 default:
8f7c502c
UB
921 dev_err(privptr->dev,
922 "Connecting the IUCV device failed with error %d\n",
923 rc);
eebce385
MS
924 fsm_newstate(fi, CONN_STATE_CONNERR);
925 break;
1da177e4
LT
926 }
927 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
eebce385
MS
928 kfree(conn->path);
929 conn->path = NULL;
1da177e4
LT
930}
931
eebce385 932static void netiucv_purge_skb_queue(struct sk_buff_head *q)
1da177e4
LT
933{
934 struct sk_buff *skb;
935
936 while ((skb = skb_dequeue(q))) {
937 atomic_dec(&skb->users);
938 dev_kfree_skb_any(skb);
939 }
940}
941
eebce385 942static void conn_action_stop(fsm_instance *fi, int event, void *arg)
1da177e4 943{
eebce385 944 struct iucv_event *ev = arg;
1da177e4
LT
945 struct iucv_connection *conn = ev->conn;
946 struct net_device *netdev = conn->netdev;
eebce385 947 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 948
2a2cf6b1 949 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
950
951 fsm_deltimer(&conn->timer);
952 fsm_newstate(fi, CONN_STATE_STOPPED);
953 netiucv_purge_skb_queue(&conn->collect_queue);
eebce385
MS
954 if (conn->path) {
955 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
956 iucv_path_sever(conn->path, iucvMagic);
957 kfree(conn->path);
958 conn->path = NULL;
959 }
1da177e4
LT
960 netiucv_purge_skb_queue(&conn->commit_queue);
961 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
962}
963
eebce385 964static void conn_action_inval(fsm_instance *fi, int event, void *arg)
1da177e4 965{
eebce385 966 struct iucv_connection *conn = arg;
1da177e4
LT
967 struct net_device *netdev = conn->netdev;
968
f082bcae
UB
969 IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
970 netdev->name, conn->userid);
1da177e4
LT
971}
972
973static const fsm_node conn_fsm[] = {
974 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
975 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
976
977 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
978 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
979 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
980 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
981 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
982 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
983 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
984
985 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
986 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
987 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
988 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
989 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
990
991 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
992 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
993
994 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
995 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
996 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
997
998 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
999 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
1000
1001 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1002 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1003};
1004
1005static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1006
e82b0f2c 1007
eebce385 1008/*
1da177e4 1009 * Actions for interface - statemachine.
eebce385 1010 */
1da177e4
LT
1011
1012/**
eebce385
MS
1013 * dev_action_start
1014 * @fi: An instance of an interface statemachine.
1015 * @event: The event, just happened.
1016 * @arg: Generic pointer, casted from struct net_device * upon call.
1da177e4 1017 *
eebce385 1018 * Startup connection by sending CONN_EVENT_START to it.
1da177e4 1019 */
eebce385 1020static void dev_action_start(fsm_instance *fi, int event, void *arg)
1da177e4 1021{
eebce385
MS
1022 struct net_device *dev = arg;
1023 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1024
2a2cf6b1 1025 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 1026
1da177e4 1027 fsm_newstate(fi, DEV_STATE_STARTWAIT);
eebce385 1028 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1da177e4
LT
1029}
1030
1031/**
1032 * Shutdown connection by sending CONN_EVENT_STOP to it.
1033 *
1034 * @param fi An instance of an interface statemachine.
1035 * @param event The event, just happened.
1036 * @param arg Generic pointer, casted from struct net_device * upon call.
1037 */
1038static void
1039dev_action_stop(fsm_instance *fi, int event, void *arg)
1040{
eebce385
MS
1041 struct net_device *dev = arg;
1042 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4
LT
1043 struct iucv_event ev;
1044
2a2cf6b1 1045 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1046
1047 ev.conn = privptr->conn;
1048
1049 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1050 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1051}
1052
1053/**
1054 * Called from connection statemachine
1055 * when a connection is up and running.
1056 *
1057 * @param fi An instance of an interface statemachine.
1058 * @param event The event, just happened.
1059 * @param arg Generic pointer, casted from struct net_device * upon call.
1060 */
1061static void
1062dev_action_connup(fsm_instance *fi, int event, void *arg)
1063{
eebce385
MS
1064 struct net_device *dev = arg;
1065 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1066
2a2cf6b1 1067 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1068
1069 switch (fsm_getstate(fi)) {
1070 case DEV_STATE_STARTWAIT:
1071 fsm_newstate(fi, DEV_STATE_RUNNING);
8f7c502c
UB
1072 dev_info(privptr->dev,
1073 "The IUCV device has been connected"
1074 " successfully to %s\n", privptr->conn->userid);
1da177e4
LT
1075 IUCV_DBF_TEXT(setup, 3,
1076 "connection is up and running\n");
1077 break;
1078 case DEV_STATE_STOPWAIT:
1da177e4
LT
1079 IUCV_DBF_TEXT(data, 2,
1080 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1081 break;
1082 }
1083}
1084
1085/**
1086 * Called from connection statemachine
1087 * when a connection has been shutdown.
1088 *
1089 * @param fi An instance of an interface statemachine.
1090 * @param event The event, just happened.
1091 * @param arg Generic pointer, casted from struct net_device * upon call.
1092 */
1093static void
1094dev_action_conndown(fsm_instance *fi, int event, void *arg)
1095{
2a2cf6b1 1096 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1097
1098 switch (fsm_getstate(fi)) {
1099 case DEV_STATE_RUNNING:
1100 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1101 break;
1102 case DEV_STATE_STOPWAIT:
1103 fsm_newstate(fi, DEV_STATE_STOPPED);
1104 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1105 break;
1106 }
1107}
1108
1109static const fsm_node dev_fsm[] = {
1110 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1111
1112 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1113 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1114
1115 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1116 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1117
1118 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1119 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
21b26f2f 1120 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
1da177e4
LT
1121};
1122
1123static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1124
1125/**
1126 * Transmit a packet.
1127 * This is a helper function for netiucv_tx().
1128 *
1129 * @param conn Connection to be used for sending.
1130 * @param skb Pointer to struct sk_buff of packet to send.
1131 * The linklevel header has already been set up
1132 * by netiucv_tx().
1133 *
1134 * @return 0 on success, -ERRNO on failure. (Never fails.)
1135 */
eebce385
MS
1136static int netiucv_transmit_skb(struct iucv_connection *conn,
1137 struct sk_buff *skb)
1138{
1139 struct iucv_message msg;
1da177e4 1140 unsigned long saveflags;
eebce385
MS
1141 struct ll_header header;
1142 int rc;
1da177e4
LT
1143
1144 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1145 int l = skb->len + NETIUCV_HDRLEN;
1146
1147 spin_lock_irqsave(&conn->collect_lock, saveflags);
1148 if (conn->collect_len + l >
1149 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1150 rc = -EBUSY;
1151 IUCV_DBF_TEXT(data, 2,
eebce385 1152 "EBUSY from netiucv_transmit_skb\n");
1da177e4
LT
1153 } else {
1154 atomic_inc(&skb->users);
1155 skb_queue_tail(&conn->collect_queue, skb);
1156 conn->collect_len += l;
eebce385 1157 rc = 0;
1da177e4
LT
1158 }
1159 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1160 } else {
1161 struct sk_buff *nskb = skb;
1162 /**
1163 * Copy the skb to a new allocated skb in lowmem only if the
1164 * data is located above 2G in memory or tailroom is < 2.
1165 */
27a884dc
ACM
1166 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1167 NETIUCV_HDRLEN)) >> 31;
1da177e4
LT
1168 int copied = 0;
1169 if (hi || (skb_tailroom(skb) < 2)) {
1170 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1171 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1172 if (!nskb) {
1da177e4
LT
1173 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1174 rc = -ENOMEM;
1175 return rc;
1176 } else {
1177 skb_reserve(nskb, NETIUCV_HDRLEN);
1178 memcpy(skb_put(nskb, skb->len),
1179 skb->data, skb->len);
1180 }
1181 copied = 1;
1182 }
1183 /**
1184 * skb now is below 2G and has enough room. Add headers.
1185 */
1186 header.next = nskb->len + NETIUCV_HDRLEN;
1187 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1188 header.next = 0;
1189 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1190
1191 fsm_newstate(conn->fsm, CONN_STATE_TX);
2c6b47de 1192 conn->prof.send_stamp = current_kernel_time();
e82b0f2c 1193
eebce385
MS
1194 msg.tag = 1;
1195 msg.class = 0;
1196 rc = iucv_message_send(conn->path, &msg, 0, 0,
1197 nskb->data, nskb->len);
1da177e4
LT
1198 conn->prof.doios_single++;
1199 conn->prof.txlen += skb->len;
1200 conn->prof.tx_pending++;
1201 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1202 conn->prof.tx_max_pending = conn->prof.tx_pending;
1203 if (rc) {
1204 struct netiucv_priv *privptr;
1205 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1206 conn->prof.tx_pending--;
eebce385 1207 privptr = netdev_priv(conn->netdev);
1da177e4
LT
1208 if (privptr)
1209 privptr->stats.tx_errors++;
1210 if (copied)
1211 dev_kfree_skb(nskb);
1212 else {
1213 /**
1214 * Remove our headers. They get added
1215 * again on retransmit.
1216 */
1217 skb_pull(skb, NETIUCV_HDRLEN);
1218 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1219 }
1da177e4
LT
1220 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1221 } else {
1222 if (copied)
1223 dev_kfree_skb(skb);
1224 atomic_inc(&nskb->users);
1225 skb_queue_tail(&conn->commit_queue, nskb);
1226 }
1227 }
1228
1229 return rc;
1230}
e82b0f2c 1231
eebce385 1232/*
1da177e4 1233 * Interface API for upper network layers
eebce385 1234 */
1da177e4
LT
1235
1236/**
1237 * Open an interface.
1238 * Called from generic network layer when ifconfig up is run.
1239 *
1240 * @param dev Pointer to interface struct.
1241 *
1242 * @return 0 on success, -ERRNO on failure. (Never fails.)
1243 */
eebce385
MS
1244static int netiucv_open(struct net_device *dev)
1245{
1246 struct netiucv_priv *priv = netdev_priv(dev);
1247
1248 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1da177e4
LT
1249 return 0;
1250}
1251
1252/**
1253 * Close an interface.
1254 * Called from generic network layer when ifconfig down is run.
1255 *
1256 * @param dev Pointer to interface struct.
1257 *
1258 * @return 0 on success, -ERRNO on failure. (Never fails.)
1259 */
eebce385
MS
1260static int netiucv_close(struct net_device *dev)
1261{
1262 struct netiucv_priv *priv = netdev_priv(dev);
1263
1264 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1da177e4
LT
1265 return 0;
1266}
1267
1268/**
1269 * Start transmission of a packet.
1270 * Called from generic network device layer.
1271 *
1272 * @param skb Pointer to buffer containing the packet.
1273 * @param dev Pointer to interface struct.
1274 *
1275 * @return 0 if packet consumed, !0 if packet rejected.
1276 * Note: If we return !0, then the packet is free'd by
1277 * the generic network layer.
1278 */
1279static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1280{
eebce385
MS
1281 struct netiucv_priv *privptr = netdev_priv(dev);
1282 int rc;
1da177e4 1283
2a2cf6b1 1284 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1285 /**
1286 * Some sanity checks ...
1287 */
1288 if (skb == NULL) {
1da177e4
LT
1289 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1290 privptr->stats.tx_dropped++;
1291 return 0;
1292 }
1293 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1da177e4
LT
1294 IUCV_DBF_TEXT(data, 2,
1295 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1296 dev_kfree_skb(skb);
1297 privptr->stats.tx_dropped++;
1298 return 0;
1299 }
1300
1301 /**
1302 * If connection is not running, try to restart it
e82b0f2c 1303 * and throw away packet.
1da177e4
LT
1304 */
1305 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1da177e4
LT
1306 dev_kfree_skb(skb);
1307 privptr->stats.tx_dropped++;
1308 privptr->stats.tx_errors++;
1309 privptr->stats.tx_carrier_errors++;
1310 return 0;
1311 }
1312
1313 if (netiucv_test_and_set_busy(dev)) {
1314 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
4e584d66 1315 return NETDEV_TX_BUSY;
1da177e4
LT
1316 }
1317 dev->trans_start = jiffies;
5b548140 1318 rc = netiucv_transmit_skb(privptr->conn, skb);
1da177e4 1319 netiucv_clear_busy(dev);
5b548140 1320 return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1da177e4
LT
1321}
1322
1323/**
eebce385
MS
1324 * netiucv_stats
1325 * @dev: Pointer to interface struct.
1da177e4 1326 *
eebce385 1327 * Returns interface statistics of a device.
1da177e4 1328 *
eebce385 1329 * Returns pointer to stats struct of this interface.
1da177e4 1330 */
eebce385 1331static struct net_device_stats *netiucv_stats (struct net_device * dev)
1da177e4 1332{
eebce385
MS
1333 struct netiucv_priv *priv = netdev_priv(dev);
1334
2a2cf6b1 1335 IUCV_DBF_TEXT(trace, 5, __func__);
eebce385 1336 return &priv->stats;
1da177e4
LT
1337}
1338
1339/**
eebce385
MS
1340 * netiucv_change_mtu
1341 * @dev: Pointer to interface struct.
1342 * @new_mtu: The new MTU to use for this interface.
1da177e4 1343 *
eebce385 1344 * Sets MTU of an interface.
1da177e4 1345 *
eebce385 1346 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1da177e4
LT
1347 * (valid range is 576 .. NETIUCV_MTU_MAX).
1348 */
eebce385 1349static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1da177e4 1350{
2a2cf6b1 1351 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385 1352 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1da177e4
LT
1353 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1354 return -EINVAL;
1355 }
1356 dev->mtu = new_mtu;
1357 return 0;
1358}
1359
eebce385 1360/*
1da177e4 1361 * attributes in sysfs
eebce385 1362 */
1da177e4 1363
eebce385
MS
1364static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1365 char *buf)
1da177e4
LT
1366{
1367 struct netiucv_priv *priv = dev->driver_data;
1368
2a2cf6b1 1369 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1370 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1371}
1372
eebce385
MS
1373static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1374 const char *buf, size_t count)
1da177e4
LT
1375{
1376 struct netiucv_priv *priv = dev->driver_data;
1377 struct net_device *ndev = priv->conn->netdev;
1378 char *p;
1379 char *tmp;
16a83b30 1380 char username[9];
1da177e4 1381 int i;
eebce385 1382 struct iucv_connection *cp;
1da177e4 1383
2a2cf6b1 1384 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385 1385 if (count > 9) {
1da177e4 1386 IUCV_DBF_TEXT_(setup, 2,
eebce385 1387 "%d is length of username\n", (int) count);
1da177e4
LT
1388 return -EINVAL;
1389 }
1390
1391 tmp = strsep((char **) &buf, "\n");
eebce385
MS
1392 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1393 if (isalnum(*p) || (*p == '$')) {
16a83b30 1394 username[i]= toupper(*p);
eebce385
MS
1395 continue;
1396 }
1397 if (*p == '\n') {
1da177e4
LT
1398 /* trailing lf, grr */
1399 break;
1da177e4 1400 }
eebce385
MS
1401 IUCV_DBF_TEXT_(setup, 2,
1402 "username: invalid character %c\n", *p);
1403 return -EINVAL;
1da177e4 1404 }
eebce385 1405 while (i < 8)
1da177e4 1406 username[i++] = ' ';
16a83b30 1407 username[8] = '\0';
1da177e4 1408
eebce385
MS
1409 if (memcmp(username, priv->conn->userid, 9) &&
1410 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1411 /* username changed while the interface is active. */
eebce385 1412 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
f082bcae 1413 return -EPERM;
eebce385
MS
1414 }
1415 read_lock_bh(&iucv_connection_rwlock);
1416 list_for_each_entry(cp, &iucv_connection_list, list) {
1417 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1418 read_unlock_bh(&iucv_connection_rwlock);
f082bcae
UB
1419 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
1420 "to %s already exists\n", username);
eebce385 1421 return -EEXIST;
1da177e4
LT
1422 }
1423 }
eebce385 1424 read_unlock_bh(&iucv_connection_rwlock);
1da177e4 1425 memcpy(priv->conn->userid, username, 9);
1da177e4 1426 return count;
1da177e4
LT
1427}
1428
1429static DEVICE_ATTR(user, 0644, user_show, user_write);
1430
eebce385
MS
1431static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1432 char *buf)
1433{ struct netiucv_priv *priv = dev->driver_data;
1da177e4 1434
2a2cf6b1 1435 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1436 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1437}
1438
eebce385
MS
1439static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1440 const char *buf, size_t count)
1da177e4
LT
1441{
1442 struct netiucv_priv *priv = dev->driver_data;
1443 struct net_device *ndev = priv->conn->netdev;
1444 char *e;
1445 int bs1;
1446
2a2cf6b1 1447 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1448 if (count >= 39)
1449 return -EINVAL;
1450
1451 bs1 = simple_strtoul(buf, &e, 0);
1452
1453 if (e && (!isspace(*e))) {
1da177e4
LT
1454 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1455 return -EINVAL;
1456 }
1457 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1da177e4
LT
1458 IUCV_DBF_TEXT_(setup, 2,
1459 "buffer_write: buffer size %d too large\n",
1460 bs1);
1461 return -EINVAL;
1462 }
1463 if ((ndev->flags & IFF_RUNNING) &&
1464 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1da177e4
LT
1465 IUCV_DBF_TEXT_(setup, 2,
1466 "buffer_write: buffer size %d too small\n",
1467 bs1);
1468 return -EINVAL;
1469 }
1470 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1da177e4
LT
1471 IUCV_DBF_TEXT_(setup, 2,
1472 "buffer_write: buffer size %d too small\n",
1473 bs1);
1474 return -EINVAL;
1475 }
1476
1477 priv->conn->max_buffsize = bs1;
1478 if (!(ndev->flags & IFF_RUNNING))
1479 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1480
1481 return count;
1482
1483}
1484
1485static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1486
eebce385
MS
1487static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1488 char *buf)
1da177e4
LT
1489{
1490 struct netiucv_priv *priv = dev->driver_data;
1491
2a2cf6b1 1492 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1493 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1494}
1495
1496static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1497
eebce385
MS
1498static ssize_t conn_fsm_show (struct device *dev,
1499 struct device_attribute *attr, char *buf)
1da177e4
LT
1500{
1501 struct netiucv_priv *priv = dev->driver_data;
1502
2a2cf6b1 1503 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1504 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1505}
1506
1507static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1508
eebce385
MS
1509static ssize_t maxmulti_show (struct device *dev,
1510 struct device_attribute *attr, char *buf)
1da177e4
LT
1511{
1512 struct netiucv_priv *priv = dev->driver_data;
1513
2a2cf6b1 1514 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1515 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1516}
1517
eebce385
MS
1518static ssize_t maxmulti_write (struct device *dev,
1519 struct device_attribute *attr,
1520 const char *buf, size_t count)
1da177e4
LT
1521{
1522 struct netiucv_priv *priv = dev->driver_data;
1523
2a2cf6b1 1524 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1525 priv->conn->prof.maxmulti = 0;
1526 return count;
1527}
1528
1529static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1530
eebce385
MS
1531static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1532 char *buf)
1da177e4
LT
1533{
1534 struct netiucv_priv *priv = dev->driver_data;
1535
2a2cf6b1 1536 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1537 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1538}
1539
eebce385
MS
1540static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1541 const char *buf, size_t count)
1da177e4
LT
1542{
1543 struct netiucv_priv *priv = dev->driver_data;
e82b0f2c 1544
2a2cf6b1 1545 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1546 priv->conn->prof.maxcqueue = 0;
1547 return count;
1548}
1549
1550static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1551
eebce385
MS
1552static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1553 char *buf)
1da177e4
LT
1554{
1555 struct netiucv_priv *priv = dev->driver_data;
1556
2a2cf6b1 1557 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1558 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1559}
1560
eebce385
MS
1561static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1562 const char *buf, size_t count)
1da177e4
LT
1563{
1564 struct netiucv_priv *priv = dev->driver_data;
e82b0f2c 1565
2a2cf6b1 1566 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1567 priv->conn->prof.doios_single = 0;
1568 return count;
1569}
1570
1571static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1572
eebce385
MS
1573static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1574 char *buf)
1da177e4
LT
1575{
1576 struct netiucv_priv *priv = dev->driver_data;
1577
2a2cf6b1 1578 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1579 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1580}
1581
eebce385
MS
1582static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1583 const char *buf, size_t count)
1da177e4
LT
1584{
1585 struct netiucv_priv *priv = dev->driver_data;
e82b0f2c 1586
2a2cf6b1 1587 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1588 priv->conn->prof.doios_multi = 0;
1589 return count;
1590}
1591
1592static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1593
eebce385
MS
1594static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1595 char *buf)
1da177e4
LT
1596{
1597 struct netiucv_priv *priv = dev->driver_data;
1598
2a2cf6b1 1599 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1600 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1601}
1602
eebce385
MS
1603static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1604 const char *buf, size_t count)
1da177e4
LT
1605{
1606 struct netiucv_priv *priv = dev->driver_data;
e82b0f2c 1607
2a2cf6b1 1608 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1609 priv->conn->prof.txlen = 0;
1610 return count;
1611}
1612
1613static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1614
eebce385
MS
1615static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1616 char *buf)
1da177e4
LT
1617{
1618 struct netiucv_priv *priv = dev->driver_data;
1619
2a2cf6b1 1620 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1621 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1622}
1623
eebce385
MS
1624static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1625 const char *buf, size_t count)
1da177e4
LT
1626{
1627 struct netiucv_priv *priv = dev->driver_data;
e82b0f2c 1628
2a2cf6b1 1629 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1630 priv->conn->prof.tx_time = 0;
1631 return count;
1632}
1633
1634static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1635
eebce385
MS
1636static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1637 char *buf)
1da177e4
LT
1638{
1639 struct netiucv_priv *priv = dev->driver_data;
1640
2a2cf6b1 1641 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1642 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1643}
1644
eebce385
MS
1645static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1646 const char *buf, size_t count)
1da177e4
LT
1647{
1648 struct netiucv_priv *priv = dev->driver_data;
1649
2a2cf6b1 1650 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1651 priv->conn->prof.tx_pending = 0;
1652 return count;
1653}
1654
1655static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1656
eebce385
MS
1657static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1658 char *buf)
1da177e4
LT
1659{
1660 struct netiucv_priv *priv = dev->driver_data;
1661
2a2cf6b1 1662 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1663 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1664}
1665
eebce385
MS
1666static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1667 const char *buf, size_t count)
1da177e4
LT
1668{
1669 struct netiucv_priv *priv = dev->driver_data;
1670
2a2cf6b1 1671 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1672 priv->conn->prof.tx_max_pending = 0;
1673 return count;
1674}
1675
1676static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1677
1678static struct attribute *netiucv_attrs[] = {
1679 &dev_attr_buffer.attr,
1680 &dev_attr_user.attr,
1681 NULL,
1682};
1683
1684static struct attribute_group netiucv_attr_group = {
1685 .attrs = netiucv_attrs,
1686};
1687
1688static struct attribute *netiucv_stat_attrs[] = {
1689 &dev_attr_device_fsm_state.attr,
1690 &dev_attr_connection_fsm_state.attr,
1691 &dev_attr_max_tx_buffer_used.attr,
1692 &dev_attr_max_chained_skbs.attr,
1693 &dev_attr_tx_single_write_ops.attr,
1694 &dev_attr_tx_multi_write_ops.attr,
1695 &dev_attr_netto_bytes.attr,
1696 &dev_attr_max_tx_io_time.attr,
1697 &dev_attr_tx_pending.attr,
1698 &dev_attr_tx_max_pending.attr,
1699 NULL,
1700};
1701
1702static struct attribute_group netiucv_stat_attr_group = {
1703 .name = "stats",
1704 .attrs = netiucv_stat_attrs,
1705};
1706
d4614627 1707static int netiucv_add_files(struct device *dev)
1da177e4
LT
1708{
1709 int ret;
1710
2a2cf6b1 1711 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1712 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1713 if (ret)
1714 return ret;
1715 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1716 if (ret)
1717 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1718 return ret;
1719}
1720
d4614627 1721static void netiucv_remove_files(struct device *dev)
1da177e4 1722{
2a2cf6b1 1723 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1724 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1725 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1726}
1727
eebce385 1728static int netiucv_register_device(struct net_device *ndev)
1da177e4 1729{
eebce385 1730 struct netiucv_priv *priv = netdev_priv(ndev);
88abaab4 1731 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1da177e4
LT
1732 int ret;
1733
1734
2a2cf6b1 1735 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1736
1737 if (dev) {
1bf5b285 1738 dev_set_name(dev, "net%s", ndev->name);
1da177e4
LT
1739 dev->bus = &iucv_bus;
1740 dev->parent = iucv_root;
1741 /*
1742 * The release function could be called after the
1743 * module has been unloaded. It's _only_ task is to
1744 * free the struct. Therefore, we specify kfree()
1745 * directly here. (Probably a little bit obfuscating
1746 * but legitime ...).
1747 */
1748 dev->release = (void (*)(struct device *))kfree;
1749 dev->driver = &netiucv_driver;
1750 } else
1751 return -ENOMEM;
1752
1753 ret = device_register(dev);
1754
1755 if (ret)
1756 return ret;
1757 ret = netiucv_add_files(dev);
1758 if (ret)
1759 goto out_unreg;
1760 priv->dev = dev;
1761 dev->driver_data = priv;
1762 return 0;
1763
1764out_unreg:
1765 device_unregister(dev);
1766 return ret;
1767}
1768
eebce385 1769static void netiucv_unregister_device(struct device *dev)
1da177e4 1770{
2a2cf6b1 1771 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1772 netiucv_remove_files(dev);
1773 device_unregister(dev);
1774}
1775
1776/**
1777 * Allocate and initialize a new connection structure.
1778 * Add it to the list of netiucv connections;
1779 */
eebce385
MS
1780static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1781 char *username)
1782{
1783 struct iucv_connection *conn;
1da177e4 1784
eebce385
MS
1785 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1786 if (!conn)
1787 goto out;
1788 skb_queue_head_init(&conn->collect_queue);
1789 skb_queue_head_init(&conn->commit_queue);
1790 spin_lock_init(&conn->collect_lock);
1791 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1792 conn->netdev = dev;
1793
1794 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1795 if (!conn->rx_buff)
1796 goto out_conn;
1797 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1798 if (!conn->tx_buff)
1799 goto out_rx;
1800 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1801 conn_event_names, NR_CONN_STATES,
1802 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1803 GFP_KERNEL);
1804 if (!conn->fsm)
1805 goto out_tx;
1806
1807 fsm_settimer(conn->fsm, &conn->timer);
1808 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1809
1810 if (username) {
1811 memcpy(conn->userid, username, 9);
1812 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1da177e4 1813 }
eebce385
MS
1814
1815 write_lock_bh(&iucv_connection_rwlock);
1816 list_add_tail(&conn->list, &iucv_connection_list);
1817 write_unlock_bh(&iucv_connection_rwlock);
1da177e4 1818 return conn;
eebce385
MS
1819
1820out_tx:
1821 kfree_skb(conn->tx_buff);
1822out_rx:
1823 kfree_skb(conn->rx_buff);
1824out_conn:
1825 kfree(conn);
1826out:
1827 return NULL;
1da177e4
LT
1828}
1829
1830/**
1831 * Release a connection structure and remove it from the
1832 * list of netiucv connections.
1833 */
eebce385 1834static void netiucv_remove_connection(struct iucv_connection *conn)
1da177e4 1835{
2a2cf6b1 1836 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385
MS
1837 write_lock_bh(&iucv_connection_rwlock);
1838 list_del_init(&conn->list);
1839 write_unlock_bh(&iucv_connection_rwlock);
0be4acec
UB
1840 fsm_deltimer(&conn->timer);
1841 netiucv_purge_skb_queue(&conn->collect_queue);
eebce385
MS
1842 if (conn->path) {
1843 iucv_path_sever(conn->path, iucvMagic);
1844 kfree(conn->path);
1845 conn->path = NULL;
1da177e4 1846 }
0be4acec 1847 netiucv_purge_skb_queue(&conn->commit_queue);
eebce385
MS
1848 kfree_fsm(conn->fsm);
1849 kfree_skb(conn->rx_buff);
1850 kfree_skb(conn->tx_buff);
1da177e4
LT
1851}
1852
1853/**
1854 * Release everything of a net device.
1855 */
eebce385 1856static void netiucv_free_netdevice(struct net_device *dev)
1da177e4 1857{
eebce385 1858 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1859
2a2cf6b1 1860 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1861
1862 if (!dev)
1863 return;
1864
1da177e4
LT
1865 if (privptr) {
1866 if (privptr->conn)
1867 netiucv_remove_connection(privptr->conn);
1868 if (privptr->fsm)
1869 kfree_fsm(privptr->fsm);
1870 privptr->conn = NULL; privptr->fsm = NULL;
1871 /* privptr gets freed by free_netdev() */
1872 }
1873 free_netdev(dev);
1874}
1875
1876/**
1877 * Initialize a net device. (Called from kernel in alloc_netdev())
1878 */
4edd73b5
FB
1879static const struct net_device_ops netiucv_netdev_ops = {
1880 .ndo_open = netiucv_open,
1881 .ndo_stop = netiucv_close,
1882 .ndo_get_stats = netiucv_stats,
1883 .ndo_start_xmit = netiucv_tx,
1884 .ndo_change_mtu = netiucv_change_mtu,
1885};
1886
eebce385 1887static void netiucv_setup_netdevice(struct net_device *dev)
1da177e4 1888{
1da177e4 1889 dev->mtu = NETIUCV_MTU_DEFAULT;
1da177e4
LT
1890 dev->destructor = netiucv_free_netdevice;
1891 dev->hard_header_len = NETIUCV_HDRLEN;
1892 dev->addr_len = 0;
1893 dev->type = ARPHRD_SLIP;
1894 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1895 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
4edd73b5 1896 dev->netdev_ops = &netiucv_netdev_ops;
1da177e4
LT
1897}
1898
1899/**
1900 * Allocate and initialize everything of a net device.
1901 */
eebce385 1902static struct net_device *netiucv_init_netdevice(char *username)
1da177e4
LT
1903{
1904 struct netiucv_priv *privptr;
1905 struct net_device *dev;
1906
1907 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1908 netiucv_setup_netdevice);
1909 if (!dev)
1910 return NULL;
eebce385
MS
1911 if (dev_alloc_name(dev, dev->name) < 0)
1912 goto out_netdev;
1da177e4 1913
eebce385 1914 privptr = netdev_priv(dev);
1da177e4
LT
1915 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1916 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1917 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
eebce385
MS
1918 if (!privptr->fsm)
1919 goto out_netdev;
1920
1da177e4
LT
1921 privptr->conn = netiucv_new_connection(dev, username);
1922 if (!privptr->conn) {
1da177e4 1923 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
eebce385 1924 goto out_fsm;
1da177e4
LT
1925 }
1926 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1da177e4 1927 return dev;
eebce385
MS
1928
1929out_fsm:
1930 kfree_fsm(privptr->fsm);
1931out_netdev:
1932 free_netdev(dev);
1933 return NULL;
1da177e4
LT
1934}
1935
eebce385
MS
1936static ssize_t conn_write(struct device_driver *drv,
1937 const char *buf, size_t count)
1da177e4 1938{
eebce385 1939 const char *p;
16a83b30 1940 char username[9];
eebce385 1941 int i, rc;
1da177e4 1942 struct net_device *dev;
eebce385
MS
1943 struct netiucv_priv *priv;
1944 struct iucv_connection *cp;
1da177e4 1945
2a2cf6b1 1946 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 1947 if (count>9) {
1da177e4
LT
1948 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1949 return -EINVAL;
1950 }
1951
eebce385
MS
1952 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1953 if (isalnum(*p) || *p == '$') {
1954 username[i] = toupper(*p);
1955 continue;
1956 }
1957 if (*p == '\n')
1da177e4
LT
1958 /* trailing lf, grr */
1959 break;
eebce385
MS
1960 IUCV_DBF_TEXT_(setup, 2,
1961 "conn_write: invalid character %c\n", *p);
1962 return -EINVAL;
1da177e4 1963 }
eebce385 1964 while (i < 8)
1da177e4 1965 username[i++] = ' ';
16a83b30
FP
1966 username[8] = '\0';
1967
eebce385
MS
1968 read_lock_bh(&iucv_connection_rwlock);
1969 list_for_each_entry(cp, &iucv_connection_list, list) {
1970 if (!strncmp(username, cp->userid, 9)) {
1971 read_unlock_bh(&iucv_connection_rwlock);
f082bcae
UB
1972 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
1973 "to %s already exists\n", username);
eebce385
MS
1974 return -EEXIST;
1975 }
16a83b30 1976 }
eebce385
MS
1977 read_unlock_bh(&iucv_connection_rwlock);
1978
1da177e4
LT
1979 dev = netiucv_init_netdevice(username);
1980 if (!dev) {
1da177e4
LT
1981 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1982 return -ENODEV;
1983 }
1984
eebce385
MS
1985 rc = netiucv_register_device(dev);
1986 if (rc) {
1da177e4 1987 IUCV_DBF_TEXT_(setup, 2,
eebce385 1988 "ret %d from netiucv_register_device\n", rc);
1da177e4
LT
1989 goto out_free_ndev;
1990 }
1991
1992 /* sysfs magic */
eebce385
MS
1993 priv = netdev_priv(dev);
1994 SET_NETDEV_DEV(dev, priv->dev);
1da177e4 1995
eebce385
MS
1996 rc = register_netdev(dev);
1997 if (rc)
1998 goto out_unreg;
1da177e4 1999
8f7c502c
UB
2000 dev_info(priv->dev, "The IUCV interface to %s has been"
2001 " established successfully\n", netiucv_printname(username));
e82b0f2c 2002
1da177e4
LT
2003 return count;
2004
eebce385
MS
2005out_unreg:
2006 netiucv_unregister_device(priv->dev);
1da177e4 2007out_free_ndev:
1da177e4 2008 netiucv_free_netdevice(dev);
eebce385 2009 return rc;
1da177e4
LT
2010}
2011
2b67fc46 2012static DRIVER_ATTR(connection, 0200, NULL, conn_write);
1da177e4 2013
eebce385
MS
2014static ssize_t remove_write (struct device_driver *drv,
2015 const char *buf, size_t count)
1da177e4 2016{
eebce385 2017 struct iucv_connection *cp;
1da177e4
LT
2018 struct net_device *ndev;
2019 struct netiucv_priv *priv;
2020 struct device *dev;
2021 char name[IFNAMSIZ];
eebce385 2022 const char *p;
1da177e4
LT
2023 int i;
2024
2a2cf6b1 2025 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
2026
2027 if (count >= IFNAMSIZ)
16a83b30 2028 count = IFNAMSIZ - 1;;
1da177e4 2029
eebce385
MS
2030 for (i = 0, p = buf; i < count && *p; i++, p++) {
2031 if (*p == '\n' || *p == ' ')
1da177e4
LT
2032 /* trailing lf, grr */
2033 break;
eebce385 2034 name[i] = *p;
1da177e4
LT
2035 }
2036 name[i] = '\0';
2037
eebce385
MS
2038 read_lock_bh(&iucv_connection_rwlock);
2039 list_for_each_entry(cp, &iucv_connection_list, list) {
2040 ndev = cp->netdev;
2041 priv = netdev_priv(ndev);
1da177e4 2042 dev = priv->dev;
eebce385
MS
2043 if (strncmp(name, ndev->name, count))
2044 continue;
2045 read_unlock_bh(&iucv_connection_rwlock);
1da177e4 2046 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
8f7c502c
UB
2047 dev_warn(dev, "The IUCV device is connected"
2048 " to %s and cannot be removed\n",
2049 priv->conn->userid);
1da177e4 2050 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
f082bcae 2051 return -EPERM;
1da177e4
LT
2052 }
2053 unregister_netdev(ndev);
2054 netiucv_unregister_device(dev);
2055 return count;
2056 }
eebce385 2057 read_unlock_bh(&iucv_connection_rwlock);
1da177e4
LT
2058 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2059 return -EINVAL;
2060}
2061
2b67fc46 2062static DRIVER_ATTR(remove, 0200, NULL, remove_write);
1da177e4 2063
eebce385
MS
2064static struct attribute * netiucv_drv_attrs[] = {
2065 &driver_attr_connection.attr,
2066 &driver_attr_remove.attr,
2067 NULL,
2068};
2069
2070static struct attribute_group netiucv_drv_attr_group = {
2071 .attrs = netiucv_drv_attrs,
2072};
2073
5b88feb1
CH
2074static struct attribute_group *netiucv_drv_attr_groups[] = {
2075 &netiucv_drv_attr_group,
2076 NULL,
2077};
2078
eebce385 2079static void netiucv_banner(void)
1da177e4 2080{
8f7c502c 2081 pr_info("driver initialized\n");
1da177e4
LT
2082}
2083
eebce385 2084static void __exit netiucv_exit(void)
1da177e4 2085{
eebce385
MS
2086 struct iucv_connection *cp;
2087 struct net_device *ndev;
2088 struct netiucv_priv *priv;
2089 struct device *dev;
2090
2a2cf6b1 2091 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385
MS
2092 while (!list_empty(&iucv_connection_list)) {
2093 cp = list_entry(iucv_connection_list.next,
2094 struct iucv_connection, list);
eebce385
MS
2095 ndev = cp->netdev;
2096 priv = netdev_priv(ndev);
2097 dev = priv->dev;
1da177e4
LT
2098
2099 unregister_netdev(ndev);
2100 netiucv_unregister_device(dev);
2101 }
2102
1da177e4 2103 driver_unregister(&netiucv_driver);
eebce385 2104 iucv_unregister(&netiucv_handler, 1);
1da177e4
LT
2105 iucv_unregister_dbf_views();
2106
8f7c502c 2107 pr_info("driver unloaded\n");
1da177e4
LT
2108 return;
2109}
2110
eebce385 2111static int __init netiucv_init(void)
1da177e4 2112{
eebce385 2113 int rc;
e82b0f2c 2114
eebce385
MS
2115 rc = iucv_register_dbf_views();
2116 if (rc)
2117 goto out;
2118 rc = iucv_register(&netiucv_handler, 1);
2119 if (rc)
2120 goto out_dbf;
2a2cf6b1 2121 IUCV_DBF_TEXT(trace, 3, __func__);
0a0a8310 2122 netiucv_driver.groups = netiucv_drv_attr_groups;
eebce385
MS
2123 rc = driver_register(&netiucv_driver);
2124 if (rc) {
eebce385
MS
2125 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2126 goto out_iucv;
1da177e4
LT
2127 }
2128
eebce385
MS
2129 netiucv_banner();
2130 return rc;
2131
eebce385
MS
2132out_iucv:
2133 iucv_unregister(&netiucv_handler, 1);
2134out_dbf:
2135 iucv_unregister_dbf_views();
2136out:
2137 return rc;
1da177e4 2138}
e82b0f2c 2139
1da177e4
LT
2140module_init(netiucv_init);
2141module_exit(netiucv_exit);
2142MODULE_LICENSE("GPL");