]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/s390/net/claw.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux...
[mirror_ubuntu-jammy-kernel.git] / drivers / s390 / net / claw.c
1 /*
2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
4 *
5 * Linux for zSeries version
6 * Copyright IBM Corp. 2002, 2009
7 * Author(s) Original code written by:
8 * Kazuo Iimura <iimura@jp.ibm.com>
9 * Rewritten by
10 * Andy Richter <richtera@us.ibm.com>
11 * Marc Price <mwprice@us.ibm.com>
12 *
13 * sysfs parms:
14 * group x.x.rrrr,x.x.wwww
15 * read_buffer nnnnnnn
16 * write_buffer nnnnnn
17 * host_name aaaaaaaa
18 * adapter_name aaaaaaaa
19 * api_type aaaaaaaa
20 *
21 * eg.
22 * group 0.0.0200 0.0.0201
23 * read_buffer 25
24 * write_buffer 20
25 * host_name LINUX390
26 * adapter_name RS6K
27 * api_type TCPIP
28 *
29 * where
30 *
31 * The device id is decided by the order entries
32 * are added to the group the first is claw0 the second claw1
33 * up to CLAW_MAX_DEV
34 *
35 * rrrr - the first of 2 consecutive device addresses used for the
36 * CLAW protocol.
37 * The specified address is always used as the input (Read)
38 * channel and the next address is used as the output channel.
39 *
40 * wwww - the second of 2 consecutive device addresses used for
41 * the CLAW protocol.
42 * The specified address is always used as the output
43 * channel and the previous address is used as the input channel.
44 *
45 * read_buffer - specifies number of input buffers to allocate.
46 * write_buffer - specifies number of output buffers to allocate.
47 * host_name - host name
48 * adaptor_name - adaptor name
49 * api_type - API type TCPIP or API will be sent and expected
50 * as ws_name
51 *
52 * Note the following requirements:
53 * 1) host_name must match the configured adapter_name on the remote side
54 * 2) adaptor_name must match the configured host name on the remote side
55 *
56 * Change History
57 * 1.00 Initial release shipped
58 * 1.10 Changes for Buffer allocation
59 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
60 * 1.25 Added Packing support
61 * 1.5
62 */
63
64 #define KMSG_COMPONENT "claw"
65
66 #include <linux/kernel_stat.h>
67 #include <asm/ccwdev.h>
68 #include <asm/ccwgroup.h>
69 #include <asm/debug.h>
70 #include <asm/idals.h>
71 #include <asm/io.h>
72 #include <linux/bitops.h>
73 #include <linux/ctype.h>
74 #include <linux/delay.h>
75 #include <linux/errno.h>
76 #include <linux/if_arp.h>
77 #include <linux/init.h>
78 #include <linux/interrupt.h>
79 #include <linux/ip.h>
80 #include <linux/kernel.h>
81 #include <linux/module.h>
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/proc_fs.h>
85 #include <linux/sched.h>
86 #include <linux/signal.h>
87 #include <linux/skbuff.h>
88 #include <linux/slab.h>
89 #include <linux/string.h>
90 #include <linux/tcp.h>
91 #include <linux/timer.h>
92 #include <linux/types.h>
93
94 #include "claw.h"
95
96 /*
97 CLAW uses the s390dbf file system see claw_trace and claw_setup
98 */
99
100 static char version[] __initdata = "CLAW driver";
101 static char debug_buffer[255];
102 /**
103 * Debug Facility Stuff
104 */
105 static debug_info_t *claw_dbf_setup;
106 static debug_info_t *claw_dbf_trace;
107
108 /**
109 * CLAW Debug Facility functions
110 */
111 static void
112 claw_unregister_debug_facility(void)
113 {
114 if (claw_dbf_setup)
115 debug_unregister(claw_dbf_setup);
116 if (claw_dbf_trace)
117 debug_unregister(claw_dbf_trace);
118 }
119
120 static int
121 claw_register_debug_facility(void)
122 {
123 claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
124 claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
125 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
126 claw_unregister_debug_facility();
127 return -ENOMEM;
128 }
129 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
130 debug_set_level(claw_dbf_setup, 2);
131 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
132 debug_set_level(claw_dbf_trace, 2);
133 return 0;
134 }
135
136 static inline void
137 claw_set_busy(struct net_device *dev)
138 {
139 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
140 eieio();
141 }
142
143 static inline void
144 claw_clear_busy(struct net_device *dev)
145 {
146 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
147 netif_wake_queue(dev);
148 eieio();
149 }
150
151 static inline int
152 claw_check_busy(struct net_device *dev)
153 {
154 eieio();
155 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
156 }
157
158 static inline void
159 claw_setbit_busy(int nr,struct net_device *dev)
160 {
161 netif_stop_queue(dev);
162 set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
163 }
164
165 static inline void
166 claw_clearbit_busy(int nr,struct net_device *dev)
167 {
168 clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
169 netif_wake_queue(dev);
170 }
171
172 static inline int
173 claw_test_and_setbit_busy(int nr,struct net_device *dev)
174 {
175 netif_stop_queue(dev);
176 return test_and_set_bit(nr,
177 (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
178 }
179
180
181 /* Functions for the DEV methods */
182
183 static int claw_probe(struct ccwgroup_device *cgdev);
184 static void claw_remove_device(struct ccwgroup_device *cgdev);
185 static void claw_purge_skb_queue(struct sk_buff_head *q);
186 static int claw_new_device(struct ccwgroup_device *cgdev);
187 static int claw_shutdown_device(struct ccwgroup_device *cgdev);
188 static int claw_tx(struct sk_buff *skb, struct net_device *dev);
189 static int claw_change_mtu( struct net_device *dev, int new_mtu);
190 static int claw_open(struct net_device *dev);
191 static void claw_irq_handler(struct ccw_device *cdev,
192 unsigned long intparm, struct irb *irb);
193 static void claw_irq_tasklet ( unsigned long data );
194 static int claw_release(struct net_device *dev);
195 static void claw_write_retry ( struct chbk * p_ch );
196 static void claw_write_next ( struct chbk * p_ch );
197 static void claw_timer ( struct chbk * p_ch );
198
199 /* Functions */
200 static int add_claw_reads(struct net_device *dev,
201 struct ccwbk* p_first, struct ccwbk* p_last);
202 static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
203 static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
204 static int find_link(struct net_device *dev, char *host_name, char *ws_name );
205 static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
206 static int init_ccw_bk(struct net_device *dev);
207 static void probe_error( struct ccwgroup_device *cgdev);
208 static struct net_device_stats *claw_stats(struct net_device *dev);
209 static int pages_to_order_of_mag(int num_of_pages);
210 static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
211 /* sysfs Functions */
212 static ssize_t claw_hname_show(struct device *dev,
213 struct device_attribute *attr, char *buf);
214 static ssize_t claw_hname_write(struct device *dev,
215 struct device_attribute *attr,
216 const char *buf, size_t count);
217 static ssize_t claw_adname_show(struct device *dev,
218 struct device_attribute *attr, char *buf);
219 static ssize_t claw_adname_write(struct device *dev,
220 struct device_attribute *attr,
221 const char *buf, size_t count);
222 static ssize_t claw_apname_show(struct device *dev,
223 struct device_attribute *attr, char *buf);
224 static ssize_t claw_apname_write(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t count);
227 static ssize_t claw_wbuff_show(struct device *dev,
228 struct device_attribute *attr, char *buf);
229 static ssize_t claw_wbuff_write(struct device *dev,
230 struct device_attribute *attr,
231 const char *buf, size_t count);
232 static ssize_t claw_rbuff_show(struct device *dev,
233 struct device_attribute *attr, char *buf);
234 static ssize_t claw_rbuff_write(struct device *dev,
235 struct device_attribute *attr,
236 const char *buf, size_t count);
237 static int claw_add_files(struct device *dev);
238 static void claw_remove_files(struct device *dev);
239
240 /* Functions for System Validate */
241 static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
242 static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
243 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
244 static int claw_snd_conn_req(struct net_device *dev, __u8 link);
245 static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
246 static int claw_snd_sys_validate_rsp(struct net_device *dev,
247 struct clawctl * p_ctl, __u32 return_code);
248 static int claw_strt_conn_req(struct net_device *dev );
249 static void claw_strt_read(struct net_device *dev, int lock);
250 static void claw_strt_out_IO(struct net_device *dev);
251 static void claw_free_wrt_buf(struct net_device *dev);
252
253 /* Functions for unpack reads */
254 static void unpack_read(struct net_device *dev);
255
256 static int claw_pm_prepare(struct ccwgroup_device *gdev)
257 {
258 return -EPERM;
259 }
260
261 /* the root device for claw group devices */
262 static struct device *claw_root_dev;
263
264 /* ccwgroup table */
265
266 static struct ccwgroup_driver claw_group_driver = {
267 .owner = THIS_MODULE,
268 .name = "claw",
269 .max_slaves = 2,
270 .driver_id = 0xC3D3C1E6,
271 .probe = claw_probe,
272 .remove = claw_remove_device,
273 .set_online = claw_new_device,
274 .set_offline = claw_shutdown_device,
275 .prepare = claw_pm_prepare,
276 };
277
278 static struct ccw_device_id claw_ids[] = {
279 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
280 {},
281 };
282 MODULE_DEVICE_TABLE(ccw, claw_ids);
283
284 static struct ccw_driver claw_ccw_driver = {
285 .owner = THIS_MODULE,
286 .name = "claw",
287 .ids = claw_ids,
288 .probe = ccwgroup_probe_ccwdev,
289 .remove = ccwgroup_remove_ccwdev,
290 };
291
292 static ssize_t
293 claw_driver_group_store(struct device_driver *ddrv, const char *buf,
294 size_t count)
295 {
296 int err;
297 err = ccwgroup_create_from_string(claw_root_dev,
298 claw_group_driver.driver_id,
299 &claw_ccw_driver, 2, buf);
300 return err ? err : count;
301 }
302
303 static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
304
305 static struct attribute *claw_group_attrs[] = {
306 &driver_attr_group.attr,
307 NULL,
308 };
309
310 static struct attribute_group claw_group_attr_group = {
311 .attrs = claw_group_attrs,
312 };
313
314 static const struct attribute_group *claw_group_attr_groups[] = {
315 &claw_group_attr_group,
316 NULL,
317 };
318
319 /*
320 * Key functions
321 */
322
323 /*----------------------------------------------------------------*
324 * claw_probe *
325 * this function is called for each CLAW device. *
326 *----------------------------------------------------------------*/
327 static int
328 claw_probe(struct ccwgroup_device *cgdev)
329 {
330 int rc;
331 struct claw_privbk *privptr=NULL;
332
333 CLAW_DBF_TEXT(2, setup, "probe");
334 if (!get_device(&cgdev->dev))
335 return -ENODEV;
336 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
337 dev_set_drvdata(&cgdev->dev, privptr);
338 if (privptr == NULL) {
339 probe_error(cgdev);
340 put_device(&cgdev->dev);
341 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
342 return -ENOMEM;
343 }
344 privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
345 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
346 if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
347 probe_error(cgdev);
348 put_device(&cgdev->dev);
349 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
350 return -ENOMEM;
351 }
352 memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
353 memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
354 memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
355 privptr->p_env->packing = 0;
356 privptr->p_env->write_buffers = 5;
357 privptr->p_env->read_buffers = 5;
358 privptr->p_env->read_size = CLAW_FRAME_SIZE;
359 privptr->p_env->write_size = CLAW_FRAME_SIZE;
360 rc = claw_add_files(&cgdev->dev);
361 if (rc) {
362 probe_error(cgdev);
363 put_device(&cgdev->dev);
364 dev_err(&cgdev->dev, "Creating the /proc files for a new"
365 " CLAW device failed\n");
366 CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
367 return rc;
368 }
369 privptr->p_env->p_priv = privptr;
370 cgdev->cdev[0]->handler = claw_irq_handler;
371 cgdev->cdev[1]->handler = claw_irq_handler;
372 CLAW_DBF_TEXT(2, setup, "prbext 0");
373
374 return 0;
375 } /* end of claw_probe */
376
377 /*-------------------------------------------------------------------*
378 * claw_tx *
379 *-------------------------------------------------------------------*/
380
381 static int
382 claw_tx(struct sk_buff *skb, struct net_device *dev)
383 {
384 int rc;
385 struct claw_privbk *privptr = dev->ml_priv;
386 unsigned long saveflags;
387 struct chbk *p_ch;
388
389 CLAW_DBF_TEXT(4, trace, "claw_tx");
390 p_ch = &privptr->channel[WRITE_CHANNEL];
391 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
392 rc=claw_hw_tx( skb, dev, 1 );
393 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
394 CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
395 if (rc)
396 rc = NETDEV_TX_BUSY;
397 else
398 rc = NETDEV_TX_OK;
399 return rc;
400 } /* end of claw_tx */
401
402 /*------------------------------------------------------------------*
403 * pack the collect queue into an skb and return it *
404 * If not packing just return the top skb from the queue *
405 *------------------------------------------------------------------*/
406
407 static struct sk_buff *
408 claw_pack_skb(struct claw_privbk *privptr)
409 {
410 struct sk_buff *new_skb,*held_skb;
411 struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
412 struct claw_env *p_env = privptr->p_env;
413 int pkt_cnt,pk_ind,so_far;
414
415 new_skb = NULL; /* assume no dice */
416 pkt_cnt = 0;
417 CLAW_DBF_TEXT(4, trace, "PackSKBe");
418 if (!skb_queue_empty(&p_ch->collect_queue)) {
419 /* some data */
420 held_skb = skb_dequeue(&p_ch->collect_queue);
421 if (held_skb)
422 dev_kfree_skb_any(held_skb);
423 else
424 return NULL;
425 if (p_env->packing != DO_PACKED)
426 return held_skb;
427 /* get a new SKB we will pack at least one */
428 new_skb = dev_alloc_skb(p_env->write_size);
429 if (new_skb == NULL) {
430 atomic_inc(&held_skb->users);
431 skb_queue_head(&p_ch->collect_queue,held_skb);
432 return NULL;
433 }
434 /* we have packed packet and a place to put it */
435 pk_ind = 1;
436 so_far = 0;
437 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
438 while ((pk_ind) && (held_skb != NULL)) {
439 if (held_skb->len+so_far <= p_env->write_size-8) {
440 memcpy(skb_put(new_skb,held_skb->len),
441 held_skb->data,held_skb->len);
442 privptr->stats.tx_packets++;
443 so_far += held_skb->len;
444 pkt_cnt++;
445 dev_kfree_skb_any(held_skb);
446 held_skb = skb_dequeue(&p_ch->collect_queue);
447 if (held_skb)
448 atomic_dec(&held_skb->users);
449 } else {
450 pk_ind = 0;
451 atomic_inc(&held_skb->users);
452 skb_queue_head(&p_ch->collect_queue,held_skb);
453 }
454 }
455 }
456 CLAW_DBF_TEXT(4, trace, "PackSKBx");
457 return new_skb;
458 }
459
460 /*-------------------------------------------------------------------*
461 * claw_change_mtu *
462 * *
463 *-------------------------------------------------------------------*/
464
465 static int
466 claw_change_mtu(struct net_device *dev, int new_mtu)
467 {
468 struct claw_privbk *privptr = dev->ml_priv;
469 int buff_size;
470 CLAW_DBF_TEXT(4, trace, "setmtu");
471 buff_size = privptr->p_env->write_size;
472 if ((new_mtu < 60) || (new_mtu > buff_size)) {
473 return -EINVAL;
474 }
475 dev->mtu = new_mtu;
476 return 0;
477 } /* end of claw_change_mtu */
478
479
480 /*-------------------------------------------------------------------*
481 * claw_open *
482 * *
483 *-------------------------------------------------------------------*/
484 static int
485 claw_open(struct net_device *dev)
486 {
487
488 int rc;
489 int i;
490 unsigned long saveflags=0;
491 unsigned long parm;
492 struct claw_privbk *privptr;
493 DECLARE_WAITQUEUE(wait, current);
494 struct timer_list timer;
495 struct ccwbk *p_buf;
496
497 CLAW_DBF_TEXT(4, trace, "open");
498 privptr = (struct claw_privbk *)dev->ml_priv;
499 /* allocate and initialize CCW blocks */
500 if (privptr->buffs_alloc == 0) {
501 rc=init_ccw_bk(dev);
502 if (rc) {
503 CLAW_DBF_TEXT(2, trace, "openmem");
504 return -ENOMEM;
505 }
506 }
507 privptr->system_validate_comp=0;
508 privptr->release_pend=0;
509 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
510 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
511 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
512 privptr->p_env->packing=PACKING_ASK;
513 } else {
514 privptr->p_env->packing=0;
515 privptr->p_env->read_size=CLAW_FRAME_SIZE;
516 privptr->p_env->write_size=CLAW_FRAME_SIZE;
517 }
518 claw_set_busy(dev);
519 tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
520 (unsigned long) &privptr->channel[READ_CHANNEL]);
521 for ( i = 0; i < 2; i++) {
522 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
523 init_waitqueue_head(&privptr->channel[i].wait);
524 /* skb_queue_head_init(&p_ch->io_queue); */
525 if (i == WRITE_CHANNEL)
526 skb_queue_head_init(
527 &privptr->channel[WRITE_CHANNEL].collect_queue);
528 privptr->channel[i].flag_a = 0;
529 privptr->channel[i].IO_active = 0;
530 privptr->channel[i].flag &= ~CLAW_TIMER;
531 init_timer(&timer);
532 timer.function = (void *)claw_timer;
533 timer.data = (unsigned long)(&privptr->channel[i]);
534 timer.expires = jiffies + 15*HZ;
535 add_timer(&timer);
536 spin_lock_irqsave(get_ccwdev_lock(
537 privptr->channel[i].cdev), saveflags);
538 parm = (unsigned long) &privptr->channel[i];
539 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
540 rc = 0;
541 add_wait_queue(&privptr->channel[i].wait, &wait);
542 rc = ccw_device_halt(
543 (struct ccw_device *)privptr->channel[i].cdev,parm);
544 set_current_state(TASK_INTERRUPTIBLE);
545 spin_unlock_irqrestore(
546 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
547 schedule();
548 set_current_state(TASK_RUNNING);
549 remove_wait_queue(&privptr->channel[i].wait, &wait);
550 if(rc != 0)
551 ccw_check_return_code(privptr->channel[i].cdev, rc);
552 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
553 del_timer(&timer);
554 }
555 if ((((privptr->channel[READ_CHANNEL].last_dstat |
556 privptr->channel[WRITE_CHANNEL].last_dstat) &
557 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
558 (((privptr->channel[READ_CHANNEL].flag |
559 privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
560 dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
561 "%s: remote side is not ready\n", dev->name);
562 CLAW_DBF_TEXT(2, trace, "notrdy");
563
564 for ( i = 0; i < 2; i++) {
565 spin_lock_irqsave(
566 get_ccwdev_lock(privptr->channel[i].cdev),
567 saveflags);
568 parm = (unsigned long) &privptr->channel[i];
569 privptr->channel[i].claw_state = CLAW_STOP;
570 rc = ccw_device_halt(
571 (struct ccw_device *)&privptr->channel[i].cdev,
572 parm);
573 spin_unlock_irqrestore(
574 get_ccwdev_lock(privptr->channel[i].cdev),
575 saveflags);
576 if (rc != 0) {
577 ccw_check_return_code(
578 privptr->channel[i].cdev, rc);
579 }
580 }
581 free_pages((unsigned long)privptr->p_buff_ccw,
582 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
583 if (privptr->p_env->read_size < PAGE_SIZE) {
584 free_pages((unsigned long)privptr->p_buff_read,
585 (int)pages_to_order_of_mag(
586 privptr->p_buff_read_num));
587 }
588 else {
589 p_buf=privptr->p_read_active_first;
590 while (p_buf!=NULL) {
591 free_pages((unsigned long)p_buf->p_buffer,
592 (int)pages_to_order_of_mag(
593 privptr->p_buff_pages_perread ));
594 p_buf=p_buf->next;
595 }
596 }
597 if (privptr->p_env->write_size < PAGE_SIZE ) {
598 free_pages((unsigned long)privptr->p_buff_write,
599 (int)pages_to_order_of_mag(
600 privptr->p_buff_write_num));
601 }
602 else {
603 p_buf=privptr->p_write_active_first;
604 while (p_buf!=NULL) {
605 free_pages((unsigned long)p_buf->p_buffer,
606 (int)pages_to_order_of_mag(
607 privptr->p_buff_pages_perwrite ));
608 p_buf=p_buf->next;
609 }
610 }
611 privptr->buffs_alloc = 0;
612 privptr->channel[READ_CHANNEL].flag = 0x00;
613 privptr->channel[WRITE_CHANNEL].flag = 0x00;
614 privptr->p_buff_ccw=NULL;
615 privptr->p_buff_read=NULL;
616 privptr->p_buff_write=NULL;
617 claw_clear_busy(dev);
618 CLAW_DBF_TEXT(2, trace, "open EIO");
619 return -EIO;
620 }
621
622 /* Send SystemValidate command */
623
624 claw_clear_busy(dev);
625 CLAW_DBF_TEXT(4, trace, "openok");
626 return 0;
627 } /* end of claw_open */
628
629 /*-------------------------------------------------------------------*
630 * *
631 * claw_irq_handler *
632 * *
633 *--------------------------------------------------------------------*/
634 static void
635 claw_irq_handler(struct ccw_device *cdev,
636 unsigned long intparm, struct irb *irb)
637 {
638 struct chbk *p_ch = NULL;
639 struct claw_privbk *privptr = NULL;
640 struct net_device *dev = NULL;
641 struct claw_env *p_env;
642 struct chbk *p_ch_r=NULL;
643
644 kstat_cpu(smp_processor_id()).irqs[IOINT_CLW]++;
645 CLAW_DBF_TEXT(4, trace, "clawirq");
646 /* Bypass all 'unsolicited interrupts' */
647 privptr = dev_get_drvdata(&cdev->dev);
648 if (!privptr) {
649 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
650 " IRQ, c-%02x d-%02x\n",
651 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
652 CLAW_DBF_TEXT(2, trace, "badirq");
653 return;
654 }
655
656 /* Try to extract channel from driver data. */
657 if (privptr->channel[READ_CHANNEL].cdev == cdev)
658 p_ch = &privptr->channel[READ_CHANNEL];
659 else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
660 p_ch = &privptr->channel[WRITE_CHANNEL];
661 else {
662 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
663 CLAW_DBF_TEXT(2, trace, "badchan");
664 return;
665 }
666 CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
667
668 dev = (struct net_device *) (p_ch->ndev);
669 p_env=privptr->p_env;
670
671 /* Copy interruption response block. */
672 memcpy(p_ch->irb, irb, sizeof(struct irb));
673
674 /* Check for good subchannel return code, otherwise info message */
675 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
676 dev_info(&cdev->dev,
677 "%s: subchannel check for device: %04x -"
678 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
679 dev->name, p_ch->devno,
680 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
681 irb->scsw.cmd.cpa);
682 CLAW_DBF_TEXT(2, trace, "chanchk");
683 /* return; */
684 }
685
686 /* Check the reason-code of a unit check */
687 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
688 ccw_check_unit_check(p_ch, irb->ecw[0]);
689
690 /* State machine to bring the connection up, down and to restart */
691 p_ch->last_dstat = irb->scsw.cmd.dstat;
692
693 switch (p_ch->claw_state) {
694 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
695 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
696 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
697 (p_ch->irb->scsw.cmd.stctl ==
698 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
699 return;
700 wake_up(&p_ch->wait); /* wake up claw_release */
701 CLAW_DBF_TEXT(4, trace, "stop");
702 return;
703 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
704 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
705 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
706 (p_ch->irb->scsw.cmd.stctl ==
707 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
708 CLAW_DBF_TEXT(4, trace, "haltio");
709 return;
710 }
711 if (p_ch->flag == CLAW_READ) {
712 p_ch->claw_state = CLAW_START_READ;
713 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
714 } else if (p_ch->flag == CLAW_WRITE) {
715 p_ch->claw_state = CLAW_START_WRITE;
716 /* send SYSTEM_VALIDATE */
717 claw_strt_read(dev, LOCK_NO);
718 claw_send_control(dev,
719 SYSTEM_VALIDATE_REQUEST,
720 0, 0, 0,
721 p_env->host_name,
722 p_env->adapter_name);
723 } else {
724 dev_warn(&cdev->dev, "The CLAW device received"
725 " an unexpected IRQ, "
726 "c-%02x d-%02x\n",
727 irb->scsw.cmd.cstat,
728 irb->scsw.cmd.dstat);
729 return;
730 }
731 CLAW_DBF_TEXT(4, trace, "haltio");
732 return;
733 case CLAW_START_READ:
734 CLAW_DBF_TEXT(4, trace, "ReadIRQ");
735 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
736 clear_bit(0, (void *)&p_ch->IO_active);
737 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
738 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
739 (p_ch->irb->ecw[0]) == 0) {
740 privptr->stats.rx_errors++;
741 dev_info(&cdev->dev,
742 "%s: Restart is required after remote "
743 "side recovers \n",
744 dev->name);
745 }
746 CLAW_DBF_TEXT(4, trace, "notrdy");
747 return;
748 }
749 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
750 (p_ch->irb->scsw.cmd.dstat == 0)) {
751 if (test_and_set_bit(CLAW_BH_ACTIVE,
752 (void *)&p_ch->flag_a) == 0)
753 tasklet_schedule(&p_ch->tasklet);
754 else
755 CLAW_DBF_TEXT(4, trace, "PCINoBH");
756 CLAW_DBF_TEXT(4, trace, "PCI_read");
757 return;
758 }
759 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
760 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
761 (p_ch->irb->scsw.cmd.stctl ==
762 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
763 CLAW_DBF_TEXT(4, trace, "SPend_rd");
764 return;
765 }
766 clear_bit(0, (void *)&p_ch->IO_active);
767 claw_clearbit_busy(TB_RETRY, dev);
768 if (test_and_set_bit(CLAW_BH_ACTIVE,
769 (void *)&p_ch->flag_a) == 0)
770 tasklet_schedule(&p_ch->tasklet);
771 else
772 CLAW_DBF_TEXT(4, trace, "RdBHAct");
773 CLAW_DBF_TEXT(4, trace, "RdIRQXit");
774 return;
775 case CLAW_START_WRITE:
776 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
777 dev_info(&cdev->dev,
778 "%s: Unit Check Occured in "
779 "write channel\n", dev->name);
780 clear_bit(0, (void *)&p_ch->IO_active);
781 if (p_ch->irb->ecw[0] & 0x80) {
782 dev_info(&cdev->dev,
783 "%s: Resetting Event "
784 "occurred:\n", dev->name);
785 init_timer(&p_ch->timer);
786 p_ch->timer.function =
787 (void *)claw_write_retry;
788 p_ch->timer.data = (unsigned long)p_ch;
789 p_ch->timer.expires = jiffies + 10*HZ;
790 add_timer(&p_ch->timer);
791 dev_info(&cdev->dev,
792 "%s: write connection "
793 "restarting\n", dev->name);
794 }
795 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
796 return;
797 }
798 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
799 clear_bit(0, (void *)&p_ch->IO_active);
800 dev_info(&cdev->dev,
801 "%s: Unit Exception "
802 "occurred in write channel\n",
803 dev->name);
804 }
805 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
806 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
807 (p_ch->irb->scsw.cmd.stctl ==
808 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
809 CLAW_DBF_TEXT(4, trace, "writeUE");
810 return;
811 }
812 clear_bit(0, (void *)&p_ch->IO_active);
813 if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
814 claw_write_next(p_ch);
815 claw_clearbit_busy(TB_TX, dev);
816 claw_clear_busy(dev);
817 }
818 p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
819 if (test_and_set_bit(CLAW_BH_ACTIVE,
820 (void *)&p_ch_r->flag_a) == 0)
821 tasklet_schedule(&p_ch_r->tasklet);
822 CLAW_DBF_TEXT(4, trace, "StWtExit");
823 return;
824 default:
825 dev_warn(&cdev->dev,
826 "The CLAW device for %s received an unexpected IRQ\n",
827 dev->name);
828 CLAW_DBF_TEXT(2, trace, "badIRQ");
829 return;
830 }
831
832 } /* end of claw_irq_handler */
833
834
835 /*-------------------------------------------------------------------*
836 * claw_irq_tasklet *
837 * *
838 *--------------------------------------------------------------------*/
839 static void
840 claw_irq_tasklet ( unsigned long data )
841 {
842 struct chbk * p_ch;
843 struct net_device *dev;
844 struct claw_privbk * privptr;
845
846 p_ch = (struct chbk *) data;
847 dev = (struct net_device *)p_ch->ndev;
848 CLAW_DBF_TEXT(4, trace, "IRQtask");
849 privptr = (struct claw_privbk *)dev->ml_priv;
850 unpack_read(dev);
851 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
852 CLAW_DBF_TEXT(4, trace, "TskletXt");
853 return;
854 } /* end of claw_irq_bh */
855
856 /*-------------------------------------------------------------------*
857 * claw_release *
858 * *
859 *--------------------------------------------------------------------*/
860 static int
861 claw_release(struct net_device *dev)
862 {
863 int rc;
864 int i;
865 unsigned long saveflags;
866 unsigned long parm;
867 struct claw_privbk *privptr;
868 DECLARE_WAITQUEUE(wait, current);
869 struct ccwbk* p_this_ccw;
870 struct ccwbk* p_buf;
871
872 if (!dev)
873 return 0;
874 privptr = (struct claw_privbk *)dev->ml_priv;
875 if (!privptr)
876 return 0;
877 CLAW_DBF_TEXT(4, trace, "release");
878 privptr->release_pend=1;
879 claw_setbit_busy(TB_STOP,dev);
880 for ( i = 1; i >=0 ; i--) {
881 spin_lock_irqsave(
882 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
883 /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
884 privptr->channel[i].claw_state = CLAW_STOP;
885 privptr->channel[i].IO_active = 0;
886 parm = (unsigned long) &privptr->channel[i];
887 if (i == WRITE_CHANNEL)
888 claw_purge_skb_queue(
889 &privptr->channel[WRITE_CHANNEL].collect_queue);
890 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
891 if (privptr->system_validate_comp==0x00) /* never opened? */
892 init_waitqueue_head(&privptr->channel[i].wait);
893 add_wait_queue(&privptr->channel[i].wait, &wait);
894 set_current_state(TASK_INTERRUPTIBLE);
895 spin_unlock_irqrestore(
896 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
897 schedule();
898 set_current_state(TASK_RUNNING);
899 remove_wait_queue(&privptr->channel[i].wait, &wait);
900 if (rc != 0) {
901 ccw_check_return_code(privptr->channel[i].cdev, rc);
902 }
903 }
904 if (privptr->pk_skb != NULL) {
905 dev_kfree_skb_any(privptr->pk_skb);
906 privptr->pk_skb = NULL;
907 }
908 if(privptr->buffs_alloc != 1) {
909 CLAW_DBF_TEXT(4, trace, "none2fre");
910 return 0;
911 }
912 CLAW_DBF_TEXT(4, trace, "freebufs");
913 if (privptr->p_buff_ccw != NULL) {
914 free_pages((unsigned long)privptr->p_buff_ccw,
915 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
916 }
917 CLAW_DBF_TEXT(4, trace, "freeread");
918 if (privptr->p_env->read_size < PAGE_SIZE) {
919 if (privptr->p_buff_read != NULL) {
920 free_pages((unsigned long)privptr->p_buff_read,
921 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
922 }
923 }
924 else {
925 p_buf=privptr->p_read_active_first;
926 while (p_buf!=NULL) {
927 free_pages((unsigned long)p_buf->p_buffer,
928 (int)pages_to_order_of_mag(
929 privptr->p_buff_pages_perread ));
930 p_buf=p_buf->next;
931 }
932 }
933 CLAW_DBF_TEXT(4, trace, "freewrit");
934 if (privptr->p_env->write_size < PAGE_SIZE ) {
935 free_pages((unsigned long)privptr->p_buff_write,
936 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
937 }
938 else {
939 p_buf=privptr->p_write_active_first;
940 while (p_buf!=NULL) {
941 free_pages((unsigned long)p_buf->p_buffer,
942 (int)pages_to_order_of_mag(
943 privptr->p_buff_pages_perwrite ));
944 p_buf=p_buf->next;
945 }
946 }
947 CLAW_DBF_TEXT(4, trace, "clearptr");
948 privptr->buffs_alloc = 0;
949 privptr->p_buff_ccw=NULL;
950 privptr->p_buff_read=NULL;
951 privptr->p_buff_write=NULL;
952 privptr->system_validate_comp=0;
953 privptr->release_pend=0;
954 /* Remove any writes that were pending and reset all reads */
955 p_this_ccw=privptr->p_read_active_first;
956 while (p_this_ccw!=NULL) {
957 p_this_ccw->header.length=0xffff;
958 p_this_ccw->header.opcode=0xff;
959 p_this_ccw->header.flag=0x00;
960 p_this_ccw=p_this_ccw->next;
961 }
962
963 while (privptr->p_write_active_first!=NULL) {
964 p_this_ccw=privptr->p_write_active_first;
965 p_this_ccw->header.flag=CLAW_PENDING;
966 privptr->p_write_active_first=p_this_ccw->next;
967 p_this_ccw->next=privptr->p_write_free_chain;
968 privptr->p_write_free_chain=p_this_ccw;
969 ++privptr->write_free_count;
970 }
971 privptr->p_write_active_last=NULL;
972 privptr->mtc_logical_link = -1;
973 privptr->mtc_skipping = 1;
974 privptr->mtc_offset=0;
975
976 if (((privptr->channel[READ_CHANNEL].last_dstat |
977 privptr->channel[WRITE_CHANNEL].last_dstat) &
978 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
979 dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
980 "Deactivating %s completed with incorrect"
981 " subchannel status "
982 "(read %02x, write %02x)\n",
983 dev->name,
984 privptr->channel[READ_CHANNEL].last_dstat,
985 privptr->channel[WRITE_CHANNEL].last_dstat);
986 CLAW_DBF_TEXT(2, trace, "badclose");
987 }
988 CLAW_DBF_TEXT(4, trace, "rlsexit");
989 return 0;
990 } /* end of claw_release */
991
992 /*-------------------------------------------------------------------*
993 * claw_write_retry *
994 * *
995 *--------------------------------------------------------------------*/
996
997 static void
998 claw_write_retry ( struct chbk *p_ch )
999 {
1000
1001 struct net_device *dev=p_ch->ndev;
1002
1003 CLAW_DBF_TEXT(4, trace, "w_retry");
1004 if (p_ch->claw_state == CLAW_STOP) {
1005 return;
1006 }
1007 claw_strt_out_IO( dev );
1008 CLAW_DBF_TEXT(4, trace, "rtry_xit");
1009 return;
1010 } /* end of claw_write_retry */
1011
1012
1013 /*-------------------------------------------------------------------*
1014 * claw_write_next *
1015 * *
1016 *--------------------------------------------------------------------*/
1017
1018 static void
1019 claw_write_next ( struct chbk * p_ch )
1020 {
1021
1022 struct net_device *dev;
1023 struct claw_privbk *privptr=NULL;
1024 struct sk_buff *pk_skb;
1025 int rc;
1026
1027 CLAW_DBF_TEXT(4, trace, "claw_wrt");
1028 if (p_ch->claw_state == CLAW_STOP)
1029 return;
1030 dev = (struct net_device *) p_ch->ndev;
1031 privptr = (struct claw_privbk *) dev->ml_priv;
1032 claw_free_wrt_buf( dev );
1033 if ((privptr->write_free_count > 0) &&
1034 !skb_queue_empty(&p_ch->collect_queue)) {
1035 pk_skb = claw_pack_skb(privptr);
1036 while (pk_skb != NULL) {
1037 rc = claw_hw_tx( pk_skb, dev,1);
1038 if (privptr->write_free_count > 0) {
1039 pk_skb = claw_pack_skb(privptr);
1040 } else
1041 pk_skb = NULL;
1042 }
1043 }
1044 if (privptr->p_write_active_first!=NULL) {
1045 claw_strt_out_IO(dev);
1046 }
1047 return;
1048 } /* end of claw_write_next */
1049
1050 /*-------------------------------------------------------------------*
1051 * *
1052 * claw_timer *
1053 *--------------------------------------------------------------------*/
1054
1055 static void
1056 claw_timer ( struct chbk * p_ch )
1057 {
1058 CLAW_DBF_TEXT(4, trace, "timer");
1059 p_ch->flag |= CLAW_TIMER;
1060 wake_up(&p_ch->wait);
1061 return;
1062 } /* end of claw_timer */
1063
1064 /*
1065 *
1066 * functions
1067 */
1068
1069
1070 /*-------------------------------------------------------------------*
1071 * *
1072 * pages_to_order_of_mag *
1073 * *
1074 * takes a number of pages from 1 to 512 and returns the *
1075 * log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1076 * of magnitude get_free_pages() has an upper order of 9 *
1077 *--------------------------------------------------------------------*/
1078
1079 static int
1080 pages_to_order_of_mag(int num_of_pages)
1081 {
1082 int order_of_mag=1; /* assume 2 pages */
1083 int nump;
1084
1085 CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
1086 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1087 /* 512 pages = 2Meg on 4k page systems */
1088 if (num_of_pages >= 512) {return 9; }
1089 /* we have two or more pages order is at least 1 */
1090 for (nump=2 ;nump <= 512;nump*=2) {
1091 if (num_of_pages <= nump)
1092 break;
1093 order_of_mag +=1;
1094 }
1095 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1096 CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
1097 return order_of_mag;
1098 }
1099
1100 /*-------------------------------------------------------------------*
1101 * *
1102 * add_claw_reads *
1103 * *
1104 *--------------------------------------------------------------------*/
1105 static int
1106 add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1107 struct ccwbk* p_last)
1108 {
1109 struct claw_privbk *privptr;
1110 struct ccw1 temp_ccw;
1111 struct endccw * p_end;
1112 CLAW_DBF_TEXT(4, trace, "addreads");
1113 privptr = dev->ml_priv;
1114 p_end = privptr->p_end_ccw;
1115
1116 /* first CCW and last CCW contains a new set of read channel programs
1117 * to apend the running channel programs
1118 */
1119 if ( p_first==NULL) {
1120 CLAW_DBF_TEXT(4, trace, "addexit");
1121 return 0;
1122 }
1123
1124 /* set up ending CCW sequence for this segment */
1125 if (p_end->read1) {
1126 p_end->read1=0x00; /* second ending CCW is now active */
1127 /* reset ending CCWs and setup TIC CCWs */
1128 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1129 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1130 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1131 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1132 p_end->read2_nop2.cda=0;
1133 p_end->read2_nop2.count=1;
1134 }
1135 else {
1136 p_end->read1=0x01; /* first ending CCW is now active */
1137 /* reset ending CCWs and setup TIC CCWs */
1138 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1139 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1140 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1141 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1142 p_end->read1_nop2.cda=0;
1143 p_end->read1_nop2.count=1;
1144 }
1145
1146 if ( privptr-> p_read_active_first ==NULL ) {
1147 privptr->p_read_active_first = p_first; /* set new first */
1148 privptr->p_read_active_last = p_last; /* set new last */
1149 }
1150 else {
1151
1152 /* set up TIC ccw */
1153 temp_ccw.cda= (__u32)__pa(&p_first->read);
1154 temp_ccw.count=0;
1155 temp_ccw.flags=0;
1156 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1157
1158
1159 if (p_end->read1) {
1160
1161 /* first set of CCW's is chained to the new read */
1162 /* chain, so the second set is chained to the active chain. */
1163 /* Therefore modify the second set to point to the new */
1164 /* read chain set up TIC CCWs */
1165 /* make sure we update the CCW so channel doesn't fetch it */
1166 /* when it's only half done */
1167 memcpy( &p_end->read2_nop2, &temp_ccw ,
1168 sizeof(struct ccw1));
1169 privptr->p_read_active_last->r_TIC_1.cda=
1170 (__u32)__pa(&p_first->read);
1171 privptr->p_read_active_last->r_TIC_2.cda=
1172 (__u32)__pa(&p_first->read);
1173 }
1174 else {
1175 /* make sure we update the CCW so channel doesn't */
1176 /* fetch it when it is only half done */
1177 memcpy( &p_end->read1_nop2, &temp_ccw ,
1178 sizeof(struct ccw1));
1179 privptr->p_read_active_last->r_TIC_1.cda=
1180 (__u32)__pa(&p_first->read);
1181 privptr->p_read_active_last->r_TIC_2.cda=
1182 (__u32)__pa(&p_first->read);
1183 }
1184 /* chain in new set of blocks */
1185 privptr->p_read_active_last->next = p_first;
1186 privptr->p_read_active_last=p_last;
1187 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1188 CLAW_DBF_TEXT(4, trace, "addexit");
1189 return 0;
1190 } /* end of add_claw_reads */
1191
1192 /*-------------------------------------------------------------------*
1193 * ccw_check_return_code *
1194 * *
1195 *-------------------------------------------------------------------*/
1196
1197 static void
1198 ccw_check_return_code(struct ccw_device *cdev, int return_code)
1199 {
1200 CLAW_DBF_TEXT(4, trace, "ccwret");
1201 if (return_code != 0) {
1202 switch (return_code) {
1203 case -EBUSY: /* BUSY is a transient state no action needed */
1204 break;
1205 case -ENODEV:
1206 dev_err(&cdev->dev, "The remote channel adapter is not"
1207 " available\n");
1208 break;
1209 case -EINVAL:
1210 dev_err(&cdev->dev,
1211 "The status of the remote channel adapter"
1212 " is not valid\n");
1213 break;
1214 default:
1215 dev_err(&cdev->dev, "The common device layer"
1216 " returned error code %d\n",
1217 return_code);
1218 }
1219 }
1220 CLAW_DBF_TEXT(4, trace, "ccwret");
1221 } /* end of ccw_check_return_code */
1222
1223 /*-------------------------------------------------------------------*
1224 * ccw_check_unit_check *
1225 *--------------------------------------------------------------------*/
1226
1227 static void
1228 ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1229 {
1230 struct net_device *ndev = p_ch->ndev;
1231 struct device *dev = &p_ch->cdev->dev;
1232
1233 CLAW_DBF_TEXT(4, trace, "unitchek");
1234 dev_warn(dev, "The communication peer of %s disconnected\n",
1235 ndev->name);
1236
1237 if (sense & 0x40) {
1238 if (sense & 0x01) {
1239 dev_warn(dev, "The remote channel adapter for"
1240 " %s has been reset\n",
1241 ndev->name);
1242 }
1243 } else if (sense & 0x20) {
1244 if (sense & 0x04) {
1245 dev_warn(dev, "A data streaming timeout occurred"
1246 " for %s\n",
1247 ndev->name);
1248 } else if (sense & 0x10) {
1249 dev_warn(dev, "The remote channel adapter for %s"
1250 " is faulty\n",
1251 ndev->name);
1252 } else {
1253 dev_warn(dev, "A data transfer parity error occurred"
1254 " for %s\n",
1255 ndev->name);
1256 }
1257 } else if (sense & 0x10) {
1258 dev_warn(dev, "A read data parity error occurred"
1259 " for %s\n",
1260 ndev->name);
1261 }
1262
1263 } /* end of ccw_check_unit_check */
1264
1265 /*-------------------------------------------------------------------*
1266 * find_link *
1267 *--------------------------------------------------------------------*/
1268 static int
1269 find_link(struct net_device *dev, char *host_name, char *ws_name )
1270 {
1271 struct claw_privbk *privptr;
1272 struct claw_env *p_env;
1273 int rc=0;
1274
1275 CLAW_DBF_TEXT(2, setup, "findlink");
1276 privptr = dev->ml_priv;
1277 p_env=privptr->p_env;
1278 switch (p_env->packing)
1279 {
1280 case PACKING_ASK:
1281 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1282 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1283 rc = EINVAL;
1284 break;
1285 case DO_PACKED:
1286 case PACK_SEND:
1287 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1288 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1289 rc = EINVAL;
1290 break;
1291 default:
1292 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1293 (memcmp(p_env->api_type , ws_name, 8)!=0))
1294 rc = EINVAL;
1295 break;
1296 }
1297
1298 return rc;
1299 } /* end of find_link */
1300
1301 /*-------------------------------------------------------------------*
1302 * claw_hw_tx *
1303 * *
1304 * *
1305 *-------------------------------------------------------------------*/
1306
1307 static int
1308 claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1309 {
1310 int rc=0;
1311 struct claw_privbk *privptr;
1312 struct ccwbk *p_this_ccw;
1313 struct ccwbk *p_first_ccw;
1314 struct ccwbk *p_last_ccw;
1315 __u32 numBuffers;
1316 signed long len_of_data;
1317 unsigned long bytesInThisBuffer;
1318 unsigned char *pDataAddress;
1319 struct endccw *pEnd;
1320 struct ccw1 tempCCW;
1321 struct chbk *p_ch;
1322 struct claw_env *p_env;
1323 int lock;
1324 struct clawph *pk_head;
1325 struct chbk *ch;
1326
1327 CLAW_DBF_TEXT(4, trace, "hw_tx");
1328 privptr = (struct claw_privbk *)(dev->ml_priv);
1329 p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
1330 p_env =privptr->p_env;
1331 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1332 /* scan the write queue to free any completed write packets */
1333 p_first_ccw=NULL;
1334 p_last_ccw=NULL;
1335 if ((p_env->packing >= PACK_SEND) &&
1336 (skb->cb[1] != 'P')) {
1337 skb_push(skb,sizeof(struct clawph));
1338 pk_head=(struct clawph *)skb->data;
1339 pk_head->len=skb->len-sizeof(struct clawph);
1340 if (pk_head->len%4) {
1341 pk_head->len+= 4-(pk_head->len%4);
1342 skb_pad(skb,4-(pk_head->len%4));
1343 skb_put(skb,4-(pk_head->len%4));
1344 }
1345 if (p_env->packing == DO_PACKED)
1346 pk_head->link_num = linkid;
1347 else
1348 pk_head->link_num = 0;
1349 pk_head->flag = 0x00;
1350 skb_pad(skb,4);
1351 skb->cb[1] = 'P';
1352 }
1353 if (linkid == 0) {
1354 if (claw_check_busy(dev)) {
1355 if (privptr->write_free_count!=0) {
1356 claw_clear_busy(dev);
1357 }
1358 else {
1359 claw_strt_out_IO(dev );
1360 claw_free_wrt_buf( dev );
1361 if (privptr->write_free_count==0) {
1362 ch = &privptr->channel[WRITE_CHANNEL];
1363 atomic_inc(&skb->users);
1364 skb_queue_tail(&ch->collect_queue, skb);
1365 goto Done;
1366 }
1367 else {
1368 claw_clear_busy(dev);
1369 }
1370 }
1371 }
1372 /* tx lock */
1373 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1374 ch = &privptr->channel[WRITE_CHANNEL];
1375 atomic_inc(&skb->users);
1376 skb_queue_tail(&ch->collect_queue, skb);
1377 claw_strt_out_IO(dev );
1378 rc=-EBUSY;
1379 goto Done2;
1380 }
1381 }
1382 /* See how many write buffers are required to hold this data */
1383 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1384
1385 /* If that number of buffers isn't available, give up for now */
1386 if (privptr->write_free_count < numBuffers ||
1387 privptr->p_write_free_chain == NULL ) {
1388
1389 claw_setbit_busy(TB_NOBUFFER,dev);
1390 ch = &privptr->channel[WRITE_CHANNEL];
1391 atomic_inc(&skb->users);
1392 skb_queue_tail(&ch->collect_queue, skb);
1393 CLAW_DBF_TEXT(2, trace, "clawbusy");
1394 goto Done2;
1395 }
1396 pDataAddress=skb->data;
1397 len_of_data=skb->len;
1398
1399 while (len_of_data > 0) {
1400 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1401 if (p_this_ccw == NULL) { /* lost the race */
1402 ch = &privptr->channel[WRITE_CHANNEL];
1403 atomic_inc(&skb->users);
1404 skb_queue_tail(&ch->collect_queue, skb);
1405 goto Done2;
1406 }
1407 privptr->p_write_free_chain=p_this_ccw->next;
1408 p_this_ccw->next=NULL;
1409 --privptr->write_free_count; /* -1 */
1410 if (len_of_data >= privptr->p_env->write_size)
1411 bytesInThisBuffer = privptr->p_env->write_size;
1412 else
1413 bytesInThisBuffer = len_of_data;
1414 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1415 len_of_data-=bytesInThisBuffer;
1416 pDataAddress+=(unsigned long)bytesInThisBuffer;
1417 /* setup write CCW */
1418 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1419 if (len_of_data>0) {
1420 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1421 }
1422 p_this_ccw->write.count=bytesInThisBuffer;
1423 /* now add to end of this chain */
1424 if (p_first_ccw==NULL) {
1425 p_first_ccw=p_this_ccw;
1426 }
1427 if (p_last_ccw!=NULL) {
1428 p_last_ccw->next=p_this_ccw;
1429 /* set up TIC ccws */
1430 p_last_ccw->w_TIC_1.cda=
1431 (__u32)__pa(&p_this_ccw->write);
1432 }
1433 p_last_ccw=p_this_ccw; /* save new last block */
1434 }
1435
1436 /* FirstCCW and LastCCW now contain a new set of write channel
1437 * programs to append to the running channel program
1438 */
1439
1440 if (p_first_ccw!=NULL) {
1441 /* setup ending ccw sequence for this segment */
1442 pEnd=privptr->p_end_ccw;
1443 if (pEnd->write1) {
1444 pEnd->write1=0x00; /* second end ccw is now active */
1445 /* set up Tic CCWs */
1446 p_last_ccw->w_TIC_1.cda=
1447 (__u32)__pa(&pEnd->write2_nop1);
1448 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1449 pEnd->write2_nop2.flags =
1450 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1451 pEnd->write2_nop2.cda=0;
1452 pEnd->write2_nop2.count=1;
1453 }
1454 else { /* end of if (pEnd->write1)*/
1455 pEnd->write1=0x01; /* first end ccw is now active */
1456 /* set up Tic CCWs */
1457 p_last_ccw->w_TIC_1.cda=
1458 (__u32)__pa(&pEnd->write1_nop1);
1459 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1460 pEnd->write1_nop2.flags =
1461 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1462 pEnd->write1_nop2.cda=0;
1463 pEnd->write1_nop2.count=1;
1464 } /* end if if (pEnd->write1) */
1465
1466 if (privptr->p_write_active_first==NULL ) {
1467 privptr->p_write_active_first=p_first_ccw;
1468 privptr->p_write_active_last=p_last_ccw;
1469 }
1470 else {
1471 /* set up Tic CCWs */
1472
1473 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1474 tempCCW.count=0;
1475 tempCCW.flags=0;
1476 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1477
1478 if (pEnd->write1) {
1479
1480 /*
1481 * first set of ending CCW's is chained to the new write
1482 * chain, so the second set is chained to the active chain
1483 * Therefore modify the second set to point the new write chain.
1484 * make sure we update the CCW atomically
1485 * so channel does not fetch it when it's only half done
1486 */
1487 memcpy( &pEnd->write2_nop2, &tempCCW ,
1488 sizeof(struct ccw1));
1489 privptr->p_write_active_last->w_TIC_1.cda=
1490 (__u32)__pa(&p_first_ccw->write);
1491 }
1492 else {
1493
1494 /*make sure we update the CCW atomically
1495 *so channel does not fetch it when it's only half done
1496 */
1497 memcpy(&pEnd->write1_nop2, &tempCCW ,
1498 sizeof(struct ccw1));
1499 privptr->p_write_active_last->w_TIC_1.cda=
1500 (__u32)__pa(&p_first_ccw->write);
1501
1502 } /* end if if (pEnd->write1) */
1503
1504 privptr->p_write_active_last->next=p_first_ccw;
1505 privptr->p_write_active_last=p_last_ccw;
1506 }
1507
1508 } /* endif (p_first_ccw!=NULL) */
1509 dev_kfree_skb_any(skb);
1510 if (linkid==0) {
1511 lock=LOCK_NO;
1512 }
1513 else {
1514 lock=LOCK_YES;
1515 }
1516 claw_strt_out_IO(dev );
1517 /* if write free count is zero , set NOBUFFER */
1518 if (privptr->write_free_count==0) {
1519 claw_setbit_busy(TB_NOBUFFER,dev);
1520 }
1521 Done2:
1522 claw_clearbit_busy(TB_TX,dev);
1523 Done:
1524 return(rc);
1525 } /* end of claw_hw_tx */
1526
1527 /*-------------------------------------------------------------------*
1528 * *
1529 * init_ccw_bk *
1530 * *
1531 *--------------------------------------------------------------------*/
1532
1533 static int
1534 init_ccw_bk(struct net_device *dev)
1535 {
1536
1537 __u32 ccw_blocks_required;
1538 __u32 ccw_blocks_perpage;
1539 __u32 ccw_pages_required;
1540 __u32 claw_reads_perpage=1;
1541 __u32 claw_read_pages;
1542 __u32 claw_writes_perpage=1;
1543 __u32 claw_write_pages;
1544 void *p_buff=NULL;
1545 struct ccwbk*p_free_chain;
1546 struct ccwbk*p_buf;
1547 struct ccwbk*p_last_CCWB;
1548 struct ccwbk*p_first_CCWB;
1549 struct endccw *p_endccw=NULL;
1550 addr_t real_address;
1551 struct claw_privbk *privptr = dev->ml_priv;
1552 struct clawh *pClawH=NULL;
1553 addr_t real_TIC_address;
1554 int i,j;
1555 CLAW_DBF_TEXT(4, trace, "init_ccw");
1556
1557 /* initialize statistics field */
1558 privptr->active_link_ID=0;
1559 /* initialize ccwbk pointers */
1560 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
1561 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
1562 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
1563 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
1564 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
1565 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
1566 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
1567 privptr->buffs_alloc = 0;
1568 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
1569 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
1570 /* initialize free write ccwbk counter */
1571 privptr->write_free_count=0; /* number of free bufs on write chain */
1572 p_last_CCWB = NULL;
1573 p_first_CCWB= NULL;
1574 /*
1575 * We need 1 CCW block for each read buffer, 1 for each
1576 * write buffer, plus 1 for ClawSignalBlock
1577 */
1578 ccw_blocks_required =
1579 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
1580 /*
1581 * compute number of CCW blocks that will fit in a page
1582 */
1583 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
1584 ccw_pages_required=
1585 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
1586
1587 /*
1588 * read and write sizes are set by 2 constants in claw.h
1589 * 4k and 32k. Unpacked values other than 4k are not going to
1590 * provide good performance. With packing buffers support 32k
1591 * buffers are used.
1592 */
1593 if (privptr->p_env->read_size < PAGE_SIZE) {
1594 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
1595 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
1596 claw_reads_perpage);
1597 }
1598 else { /* > or equal */
1599 privptr->p_buff_pages_perread =
1600 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1601 claw_read_pages = privptr->p_env->read_buffers *
1602 privptr->p_buff_pages_perread;
1603 }
1604 if (privptr->p_env->write_size < PAGE_SIZE) {
1605 claw_writes_perpage =
1606 PAGE_SIZE / privptr->p_env->write_size;
1607 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
1608 claw_writes_perpage);
1609
1610 }
1611 else { /* > or equal */
1612 privptr->p_buff_pages_perwrite =
1613 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1614 claw_write_pages = privptr->p_env->write_buffers *
1615 privptr->p_buff_pages_perwrite;
1616 }
1617 /*
1618 * allocate ccw_pages_required
1619 */
1620 if (privptr->p_buff_ccw==NULL) {
1621 privptr->p_buff_ccw=
1622 (void *)__get_free_pages(__GFP_DMA,
1623 (int)pages_to_order_of_mag(ccw_pages_required ));
1624 if (privptr->p_buff_ccw==NULL) {
1625 return -ENOMEM;
1626 }
1627 privptr->p_buff_ccw_num=ccw_pages_required;
1628 }
1629 memset(privptr->p_buff_ccw, 0x00,
1630 privptr->p_buff_ccw_num * PAGE_SIZE);
1631
1632 /*
1633 * obtain ending ccw block address
1634 *
1635 */
1636 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
1637 real_address = (__u32)__pa(privptr->p_end_ccw);
1638 /* Initialize ending CCW block */
1639 p_endccw=privptr->p_end_ccw;
1640 p_endccw->real=real_address;
1641 p_endccw->write1=0x00;
1642 p_endccw->read1=0x00;
1643
1644 /* write1_nop1 */
1645 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1646 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1647 p_endccw->write1_nop1.count = 1;
1648 p_endccw->write1_nop1.cda = 0;
1649
1650 /* write1_nop2 */
1651 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1652 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1653 p_endccw->write1_nop2.count = 1;
1654 p_endccw->write1_nop2.cda = 0;
1655
1656 /* write2_nop1 */
1657 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1658 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1659 p_endccw->write2_nop1.count = 1;
1660 p_endccw->write2_nop1.cda = 0;
1661
1662 /* write2_nop2 */
1663 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1664 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1665 p_endccw->write2_nop2.count = 1;
1666 p_endccw->write2_nop2.cda = 0;
1667
1668 /* read1_nop1 */
1669 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1670 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1671 p_endccw->read1_nop1.count = 1;
1672 p_endccw->read1_nop1.cda = 0;
1673
1674 /* read1_nop2 */
1675 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1676 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1677 p_endccw->read1_nop2.count = 1;
1678 p_endccw->read1_nop2.cda = 0;
1679
1680 /* read2_nop1 */
1681 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1682 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1683 p_endccw->read2_nop1.count = 1;
1684 p_endccw->read2_nop1.cda = 0;
1685
1686 /* read2_nop2 */
1687 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1688 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1689 p_endccw->read2_nop2.count = 1;
1690 p_endccw->read2_nop2.cda = 0;
1691
1692 /*
1693 * Build a chain of CCWs
1694 *
1695 */
1696 p_buff=privptr->p_buff_ccw;
1697
1698 p_free_chain=NULL;
1699 for (i=0 ; i < ccw_pages_required; i++ ) {
1700 real_address = (__u32)__pa(p_buff);
1701 p_buf=p_buff;
1702 for (j=0 ; j < ccw_blocks_perpage ; j++) {
1703 p_buf->next = p_free_chain;
1704 p_free_chain = p_buf;
1705 p_buf->real=(__u32)__pa(p_buf);
1706 ++p_buf;
1707 }
1708 p_buff+=PAGE_SIZE;
1709 }
1710 /*
1711 * Initialize ClawSignalBlock
1712 *
1713 */
1714 if (privptr->p_claw_signal_blk==NULL) {
1715 privptr->p_claw_signal_blk=p_free_chain;
1716 p_free_chain=p_free_chain->next;
1717 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
1718 pClawH->length=0xffff;
1719 pClawH->opcode=0xff;
1720 pClawH->flag=CLAW_BUSY;
1721 }
1722
1723 /*
1724 * allocate write_pages_required and add to free chain
1725 */
1726 if (privptr->p_buff_write==NULL) {
1727 if (privptr->p_env->write_size < PAGE_SIZE) {
1728 privptr->p_buff_write=
1729 (void *)__get_free_pages(__GFP_DMA,
1730 (int)pages_to_order_of_mag(claw_write_pages ));
1731 if (privptr->p_buff_write==NULL) {
1732 privptr->p_buff_ccw=NULL;
1733 return -ENOMEM;
1734 }
1735 /*
1736 * Build CLAW write free chain
1737 *
1738 */
1739
1740 memset(privptr->p_buff_write, 0x00,
1741 ccw_pages_required * PAGE_SIZE);
1742 privptr->p_write_free_chain=NULL;
1743
1744 p_buff=privptr->p_buff_write;
1745
1746 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
1747 p_buf = p_free_chain; /* get a CCW */
1748 p_free_chain = p_buf->next;
1749 p_buf->next =privptr->p_write_free_chain;
1750 privptr->p_write_free_chain = p_buf;
1751 p_buf-> p_buffer = (struct clawbuf *)p_buff;
1752 p_buf-> write.cda = (__u32)__pa(p_buff);
1753 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1754 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1755 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1756 p_buf-> w_read_FF.count = 1;
1757 p_buf-> w_read_FF.cda =
1758 (__u32)__pa(&p_buf-> header.flag);
1759 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1760 p_buf-> w_TIC_1.flags = 0;
1761 p_buf-> w_TIC_1.count = 0;
1762
1763 if (((unsigned long)p_buff +
1764 privptr->p_env->write_size) >=
1765 ((unsigned long)(p_buff+2*
1766 (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
1767 p_buff = p_buff+privptr->p_env->write_size;
1768 }
1769 }
1770 }
1771 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1772 {
1773 privptr->p_write_free_chain=NULL;
1774 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
1775 p_buff=(void *)__get_free_pages(__GFP_DMA,
1776 (int)pages_to_order_of_mag(
1777 privptr->p_buff_pages_perwrite) );
1778 if (p_buff==NULL) {
1779 free_pages((unsigned long)privptr->p_buff_ccw,
1780 (int)pages_to_order_of_mag(
1781 privptr->p_buff_ccw_num));
1782 privptr->p_buff_ccw=NULL;
1783 p_buf=privptr->p_buff_write;
1784 while (p_buf!=NULL) {
1785 free_pages((unsigned long)
1786 p_buf->p_buffer,
1787 (int)pages_to_order_of_mag(
1788 privptr->p_buff_pages_perwrite));
1789 p_buf=p_buf->next;
1790 }
1791 return -ENOMEM;
1792 } /* Error on get_pages */
1793 memset(p_buff, 0x00, privptr->p_env->write_size );
1794 p_buf = p_free_chain;
1795 p_free_chain = p_buf->next;
1796 p_buf->next = privptr->p_write_free_chain;
1797 privptr->p_write_free_chain = p_buf;
1798 privptr->p_buff_write = p_buf;
1799 p_buf->p_buffer=(struct clawbuf *)p_buff;
1800 p_buf-> write.cda = (__u32)__pa(p_buff);
1801 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1802 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1803 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1804 p_buf-> w_read_FF.count = 1;
1805 p_buf-> w_read_FF.cda =
1806 (__u32)__pa(&p_buf-> header.flag);
1807 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1808 p_buf-> w_TIC_1.flags = 0;
1809 p_buf-> w_TIC_1.count = 0;
1810 } /* for all write_buffers */
1811
1812 } /* else buffers are PAGE_SIZE or bigger */
1813
1814 }
1815 privptr->p_buff_write_num=claw_write_pages;
1816 privptr->write_free_count=privptr->p_env->write_buffers;
1817
1818
1819 /*
1820 * allocate read_pages_required and chain to free chain
1821 */
1822 if (privptr->p_buff_read==NULL) {
1823 if (privptr->p_env->read_size < PAGE_SIZE) {
1824 privptr->p_buff_read=
1825 (void *)__get_free_pages(__GFP_DMA,
1826 (int)pages_to_order_of_mag(claw_read_pages) );
1827 if (privptr->p_buff_read==NULL) {
1828 free_pages((unsigned long)privptr->p_buff_ccw,
1829 (int)pages_to_order_of_mag(
1830 privptr->p_buff_ccw_num));
1831 /* free the write pages size is < page size */
1832 free_pages((unsigned long)privptr->p_buff_write,
1833 (int)pages_to_order_of_mag(
1834 privptr->p_buff_write_num));
1835 privptr->p_buff_ccw=NULL;
1836 privptr->p_buff_write=NULL;
1837 return -ENOMEM;
1838 }
1839 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
1840 privptr->p_buff_read_num=claw_read_pages;
1841 /*
1842 * Build CLAW read free chain
1843 *
1844 */
1845 p_buff=privptr->p_buff_read;
1846 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1847 p_buf = p_free_chain;
1848 p_free_chain = p_buf->next;
1849
1850 if (p_last_CCWB==NULL) {
1851 p_buf->next=NULL;
1852 real_TIC_address=0;
1853 p_last_CCWB=p_buf;
1854 }
1855 else {
1856 p_buf->next=p_first_CCWB;
1857 real_TIC_address=
1858 (__u32)__pa(&p_first_CCWB -> read );
1859 }
1860
1861 p_first_CCWB=p_buf;
1862
1863 p_buf->p_buffer=(struct clawbuf *)p_buff;
1864 /* initialize read command */
1865 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1866 p_buf-> read.cda = (__u32)__pa(p_buff);
1867 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1868 p_buf-> read.count = privptr->p_env->read_size;
1869
1870 /* initialize read_h command */
1871 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1872 p_buf-> read_h.cda =
1873 (__u32)__pa(&(p_buf->header));
1874 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1875 p_buf-> read_h.count = sizeof(struct clawh);
1876
1877 /* initialize Signal command */
1878 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1879 p_buf-> signal.cda =
1880 (__u32)__pa(&(pClawH->flag));
1881 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1882 p_buf-> signal.count = 1;
1883
1884 /* initialize r_TIC_1 command */
1885 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1886 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1887 p_buf-> r_TIC_1.flags = 0;
1888 p_buf-> r_TIC_1.count = 0;
1889
1890 /* initialize r_read_FF command */
1891 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1892 p_buf-> r_read_FF.cda =
1893 (__u32)__pa(&(pClawH->flag));
1894 p_buf-> r_read_FF.flags =
1895 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1896 p_buf-> r_read_FF.count = 1;
1897
1898 /* initialize r_TIC_2 */
1899 memcpy(&p_buf->r_TIC_2,
1900 &p_buf->r_TIC_1, sizeof(struct ccw1));
1901
1902 /* initialize Header */
1903 p_buf->header.length=0xffff;
1904 p_buf->header.opcode=0xff;
1905 p_buf->header.flag=CLAW_PENDING;
1906
1907 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1908 ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
1909 -1)
1910 & PAGE_MASK)) {
1911 p_buff= p_buff+privptr->p_env->read_size;
1912 }
1913 else {
1914 p_buff=
1915 (void *)((unsigned long)
1916 (p_buff+2*(privptr->p_env->read_size)-1)
1917 & PAGE_MASK) ;
1918 }
1919 } /* for read_buffers */
1920 } /* read_size < PAGE_SIZE */
1921 else { /* read Size >= PAGE_SIZE */
1922 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1923 p_buff = (void *)__get_free_pages(__GFP_DMA,
1924 (int)pages_to_order_of_mag(
1925 privptr->p_buff_pages_perread));
1926 if (p_buff==NULL) {
1927 free_pages((unsigned long)privptr->p_buff_ccw,
1928 (int)pages_to_order_of_mag(privptr->
1929 p_buff_ccw_num));
1930 /* free the write pages */
1931 p_buf=privptr->p_buff_write;
1932 while (p_buf!=NULL) {
1933 free_pages(
1934 (unsigned long)p_buf->p_buffer,
1935 (int)pages_to_order_of_mag(
1936 privptr->p_buff_pages_perwrite));
1937 p_buf=p_buf->next;
1938 }
1939 /* free any read pages already alloc */
1940 p_buf=privptr->p_buff_read;
1941 while (p_buf!=NULL) {
1942 free_pages(
1943 (unsigned long)p_buf->p_buffer,
1944 (int)pages_to_order_of_mag(
1945 privptr->p_buff_pages_perread));
1946 p_buf=p_buf->next;
1947 }
1948 privptr->p_buff_ccw=NULL;
1949 privptr->p_buff_write=NULL;
1950 return -ENOMEM;
1951 }
1952 memset(p_buff, 0x00, privptr->p_env->read_size);
1953 p_buf = p_free_chain;
1954 privptr->p_buff_read = p_buf;
1955 p_free_chain = p_buf->next;
1956
1957 if (p_last_CCWB==NULL) {
1958 p_buf->next=NULL;
1959 real_TIC_address=0;
1960 p_last_CCWB=p_buf;
1961 }
1962 else {
1963 p_buf->next=p_first_CCWB;
1964 real_TIC_address=
1965 (addr_t)__pa(
1966 &p_first_CCWB -> read );
1967 }
1968
1969 p_first_CCWB=p_buf;
1970 /* save buff address */
1971 p_buf->p_buffer=(struct clawbuf *)p_buff;
1972 /* initialize read command */
1973 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1974 p_buf-> read.cda = (__u32)__pa(p_buff);
1975 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1976 p_buf-> read.count = privptr->p_env->read_size;
1977
1978 /* initialize read_h command */
1979 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1980 p_buf-> read_h.cda =
1981 (__u32)__pa(&(p_buf->header));
1982 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1983 p_buf-> read_h.count = sizeof(struct clawh);
1984
1985 /* initialize Signal command */
1986 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1987 p_buf-> signal.cda =
1988 (__u32)__pa(&(pClawH->flag));
1989 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1990 p_buf-> signal.count = 1;
1991
1992 /* initialize r_TIC_1 command */
1993 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1994 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1995 p_buf-> r_TIC_1.flags = 0;
1996 p_buf-> r_TIC_1.count = 0;
1997
1998 /* initialize r_read_FF command */
1999 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
2000 p_buf-> r_read_FF.cda =
2001 (__u32)__pa(&(pClawH->flag));
2002 p_buf-> r_read_FF.flags =
2003 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
2004 p_buf-> r_read_FF.count = 1;
2005
2006 /* initialize r_TIC_2 */
2007 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
2008 sizeof(struct ccw1));
2009
2010 /* initialize Header */
2011 p_buf->header.length=0xffff;
2012 p_buf->header.opcode=0xff;
2013 p_buf->header.flag=CLAW_PENDING;
2014
2015 } /* For read_buffers */
2016 } /* read_size >= PAGE_SIZE */
2017 } /* pBuffread = NULL */
2018 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
2019 privptr->buffs_alloc = 1;
2020
2021 return 0;
2022 } /* end of init_ccw_bk */
2023
2024 /*-------------------------------------------------------------------*
2025 * *
2026 * probe_error *
2027 * *
2028 *--------------------------------------------------------------------*/
2029
2030 static void
2031 probe_error( struct ccwgroup_device *cgdev)
2032 {
2033 struct claw_privbk *privptr;
2034
2035 CLAW_DBF_TEXT(4, trace, "proberr");
2036 privptr = dev_get_drvdata(&cgdev->dev);
2037 if (privptr != NULL) {
2038 dev_set_drvdata(&cgdev->dev, NULL);
2039 kfree(privptr->p_env);
2040 kfree(privptr->p_mtc_envelope);
2041 kfree(privptr);
2042 }
2043 } /* probe_error */
2044
2045 /*-------------------------------------------------------------------*
2046 * claw_process_control *
2047 * *
2048 * *
2049 *--------------------------------------------------------------------*/
2050
2051 static int
2052 claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2053 {
2054
2055 struct clawbuf *p_buf;
2056 struct clawctl ctlbk;
2057 struct clawctl *p_ctlbk;
2058 char temp_host_name[8];
2059 char temp_ws_name[8];
2060 struct claw_privbk *privptr;
2061 struct claw_env *p_env;
2062 struct sysval *p_sysval;
2063 struct conncmd *p_connect=NULL;
2064 int rc;
2065 struct chbk *p_ch = NULL;
2066 struct device *tdev;
2067 CLAW_DBF_TEXT(2, setup, "clw_cntl");
2068 udelay(1000); /* Wait a ms for the control packets to
2069 *catch up to each other */
2070 privptr = dev->ml_priv;
2071 p_env=privptr->p_env;
2072 tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
2073 memcpy( &temp_host_name, p_env->host_name, 8);
2074 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2075 dev_info(tdev, "%s: CLAW device %.8s: "
2076 "Received Control Packet\n",
2077 dev->name, temp_ws_name);
2078 if (privptr->release_pend==1) {
2079 return 0;
2080 }
2081 p_buf=p_ccw->p_buffer;
2082 p_ctlbk=&ctlbk;
2083 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2084 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2085 } else {
2086 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2087 }
2088 switch (p_ctlbk->command)
2089 {
2090 case SYSTEM_VALIDATE_REQUEST:
2091 if (p_ctlbk->version != CLAW_VERSION_ID) {
2092 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2093 CLAW_RC_WRONG_VERSION);
2094 dev_warn(tdev, "The communication peer of %s"
2095 " uses an incorrect API version %d\n",
2096 dev->name, p_ctlbk->version);
2097 }
2098 p_sysval = (struct sysval *)&(p_ctlbk->data);
2099 dev_info(tdev, "%s: Recv Sys Validate Request: "
2100 "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
2101 "Host name=%.8s\n",
2102 dev->name, p_ctlbk->version,
2103 p_ctlbk->linkid,
2104 p_ctlbk->correlator,
2105 p_sysval->WS_name,
2106 p_sysval->host_name);
2107 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2108 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2109 CLAW_RC_NAME_MISMATCH);
2110 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2111 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2112 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2113 dev_warn(tdev,
2114 "Host name %s for %s does not match the"
2115 " remote adapter name %s\n",
2116 p_sysval->host_name,
2117 dev->name,
2118 temp_host_name);
2119 }
2120 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
2121 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2122 CLAW_RC_NAME_MISMATCH);
2123 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2124 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2125 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2126 dev_warn(tdev, "Adapter name %s for %s does not match"
2127 " the remote host name %s\n",
2128 p_sysval->WS_name,
2129 dev->name,
2130 temp_ws_name);
2131 }
2132 if ((p_sysval->write_frame_size < p_env->write_size) &&
2133 (p_env->packing == 0)) {
2134 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2135 CLAW_RC_HOST_RCV_TOO_SMALL);
2136 dev_warn(tdev,
2137 "The local write buffer is smaller than the"
2138 " remote read buffer\n");
2139 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2140 }
2141 if ((p_sysval->read_frame_size < p_env->read_size) &&
2142 (p_env->packing == 0)) {
2143 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2144 CLAW_RC_HOST_RCV_TOO_SMALL);
2145 dev_warn(tdev,
2146 "The local read buffer is smaller than the"
2147 " remote write buffer\n");
2148 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2149 }
2150 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2151 dev_info(tdev,
2152 "CLAW device %.8s: System validate"
2153 " completed.\n", temp_ws_name);
2154 dev_info(tdev,
2155 "%s: sys Validate Rsize:%d Wsize:%d\n",
2156 dev->name, p_sysval->read_frame_size,
2157 p_sysval->write_frame_size);
2158 privptr->system_validate_comp = 1;
2159 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2160 p_env->packing = PACKING_ASK;
2161 claw_strt_conn_req(dev);
2162 break;
2163 case SYSTEM_VALIDATE_RESPONSE:
2164 p_sysval = (struct sysval *)&(p_ctlbk->data);
2165 dev_info(tdev,
2166 "Settings for %s validated (version=%d, "
2167 "remote device=%d, rc=%d, adapter name=%.8s, "
2168 "host name=%.8s)\n",
2169 dev->name,
2170 p_ctlbk->version,
2171 p_ctlbk->correlator,
2172 p_ctlbk->rc,
2173 p_sysval->WS_name,
2174 p_sysval->host_name);
2175 switch (p_ctlbk->rc) {
2176 case 0:
2177 dev_info(tdev, "%s: CLAW device "
2178 "%.8s: System validate completed.\n",
2179 dev->name, temp_ws_name);
2180 if (privptr->system_validate_comp == 0)
2181 claw_strt_conn_req(dev);
2182 privptr->system_validate_comp = 1;
2183 break;
2184 case CLAW_RC_NAME_MISMATCH:
2185 dev_warn(tdev, "Validating %s failed because of"
2186 " a host or adapter name mismatch\n",
2187 dev->name);
2188 break;
2189 case CLAW_RC_WRONG_VERSION:
2190 dev_warn(tdev, "Validating %s failed because of a"
2191 " version conflict\n",
2192 dev->name);
2193 break;
2194 case CLAW_RC_HOST_RCV_TOO_SMALL:
2195 dev_warn(tdev, "Validating %s failed because of a"
2196 " frame size conflict\n",
2197 dev->name);
2198 break;
2199 default:
2200 dev_warn(tdev, "The communication peer of %s rejected"
2201 " the connection\n",
2202 dev->name);
2203 break;
2204 }
2205 break;
2206
2207 case CONNECTION_REQUEST:
2208 p_connect = (struct conncmd *)&(p_ctlbk->data);
2209 dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2210 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2211 dev->name,
2212 p_ctlbk->version,
2213 p_ctlbk->linkid,
2214 p_ctlbk->correlator,
2215 p_connect->host_name,
2216 p_connect->WS_name);
2217 if (privptr->active_link_ID != 0) {
2218 claw_snd_disc(dev, p_ctlbk);
2219 dev_info(tdev, "%s rejected a connection request"
2220 " because it is already active\n",
2221 dev->name);
2222 }
2223 if (p_ctlbk->linkid != 1) {
2224 claw_snd_disc(dev, p_ctlbk);
2225 dev_info(tdev, "%s rejected a request to open multiple"
2226 " connections\n",
2227 dev->name);
2228 }
2229 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2230 if (rc != 0) {
2231 claw_snd_disc(dev, p_ctlbk);
2232 dev_info(tdev, "%s rejected a connection request"
2233 " because of a type mismatch\n",
2234 dev->name);
2235 }
2236 claw_send_control(dev,
2237 CONNECTION_CONFIRM, p_ctlbk->linkid,
2238 p_ctlbk->correlator,
2239 0, p_connect->host_name,
2240 p_connect->WS_name);
2241 if (p_env->packing == PACKING_ASK) {
2242 p_env->packing = PACK_SEND;
2243 claw_snd_conn_req(dev, 0);
2244 }
2245 dev_info(tdev, "%s: CLAW device %.8s: Connection "
2246 "completed link_id=%d.\n",
2247 dev->name, temp_ws_name,
2248 p_ctlbk->linkid);
2249 privptr->active_link_ID = p_ctlbk->linkid;
2250 p_ch = &privptr->channel[WRITE_CHANNEL];
2251 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2252 break;
2253 case CONNECTION_RESPONSE:
2254 p_connect = (struct conncmd *)&(p_ctlbk->data);
2255 dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
2256 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2257 dev->name,
2258 p_ctlbk->version,
2259 p_ctlbk->linkid,
2260 p_ctlbk->correlator,
2261 p_ctlbk->rc,
2262 p_connect->host_name,
2263 p_connect->WS_name);
2264
2265 if (p_ctlbk->rc != 0) {
2266 dev_warn(tdev, "The communication peer of %s rejected"
2267 " a connection request\n",
2268 dev->name);
2269 return 1;
2270 }
2271 rc = find_link(dev,
2272 p_connect->host_name, p_connect->WS_name);
2273 if (rc != 0) {
2274 claw_snd_disc(dev, p_ctlbk);
2275 dev_warn(tdev, "The communication peer of %s"
2276 " rejected a connection "
2277 "request because of a type mismatch\n",
2278 dev->name);
2279 }
2280 /* should be until CONNECTION_CONFIRM */
2281 privptr->active_link_ID = -(p_ctlbk->linkid);
2282 break;
2283 case CONNECTION_CONFIRM:
2284 p_connect = (struct conncmd *)&(p_ctlbk->data);
2285 dev_info(tdev,
2286 "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2287 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2288 dev->name,
2289 p_ctlbk->version,
2290 p_ctlbk->linkid,
2291 p_ctlbk->correlator,
2292 p_connect->host_name,
2293 p_connect->WS_name);
2294 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2295 privptr->active_link_ID = p_ctlbk->linkid;
2296 if (p_env->packing > PACKING_ASK) {
2297 dev_info(tdev,
2298 "%s: Confirmed Now packing\n", dev->name);
2299 p_env->packing = DO_PACKED;
2300 }
2301 p_ch = &privptr->channel[WRITE_CHANNEL];
2302 wake_up(&p_ch->wait);
2303 } else {
2304 dev_warn(tdev, "Activating %s failed because of"
2305 " an incorrect link ID=%d\n",
2306 dev->name, p_ctlbk->linkid);
2307 claw_snd_disc(dev, p_ctlbk);
2308 }
2309 break;
2310 case DISCONNECT:
2311 dev_info(tdev, "%s: Disconnect: "
2312 "Vers=%d,link_id=%d,Corr=%d\n",
2313 dev->name, p_ctlbk->version,
2314 p_ctlbk->linkid, p_ctlbk->correlator);
2315 if ((p_ctlbk->linkid == 2) &&
2316 (p_env->packing == PACK_SEND)) {
2317 privptr->active_link_ID = 1;
2318 p_env->packing = DO_PACKED;
2319 } else
2320 privptr->active_link_ID = 0;
2321 break;
2322 case CLAW_ERROR:
2323 dev_warn(tdev, "The communication peer of %s failed\n",
2324 dev->name);
2325 break;
2326 default:
2327 dev_warn(tdev, "The communication peer of %s sent"
2328 " an unknown command code\n",
2329 dev->name);
2330 break;
2331 }
2332
2333 return 0;
2334 } /* end of claw_process_control */
2335
2336
2337 /*-------------------------------------------------------------------*
2338 * claw_send_control *
2339 * *
2340 *--------------------------------------------------------------------*/
2341
2342 static int
2343 claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2344 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
2345 {
2346 struct claw_privbk *privptr;
2347 struct clawctl *p_ctl;
2348 struct sysval *p_sysval;
2349 struct conncmd *p_connect;
2350 struct sk_buff *skb;
2351
2352 CLAW_DBF_TEXT(2, setup, "sndcntl");
2353 privptr = dev->ml_priv;
2354 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2355
2356 p_ctl->command=type;
2357 p_ctl->version=CLAW_VERSION_ID;
2358 p_ctl->linkid=link;
2359 p_ctl->correlator=correlator;
2360 p_ctl->rc=rc;
2361
2362 p_sysval=(struct sysval *)&p_ctl->data;
2363 p_connect=(struct conncmd *)&p_ctl->data;
2364
2365 switch (p_ctl->command) {
2366 case SYSTEM_VALIDATE_REQUEST:
2367 case SYSTEM_VALIDATE_RESPONSE:
2368 memcpy(&p_sysval->host_name, local_name, 8);
2369 memcpy(&p_sysval->WS_name, remote_name, 8);
2370 if (privptr->p_env->packing > 0) {
2371 p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
2372 p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
2373 } else {
2374 /* how big is the biggest group of packets */
2375 p_sysval->read_frame_size =
2376 privptr->p_env->read_size;
2377 p_sysval->write_frame_size =
2378 privptr->p_env->write_size;
2379 }
2380 memset(&p_sysval->reserved, 0x00, 4);
2381 break;
2382 case CONNECTION_REQUEST:
2383 case CONNECTION_RESPONSE:
2384 case CONNECTION_CONFIRM:
2385 case DISCONNECT:
2386 memcpy(&p_sysval->host_name, local_name, 8);
2387 memcpy(&p_sysval->WS_name, remote_name, 8);
2388 if (privptr->p_env->packing > 0) {
2389 /* How big is the biggest packet */
2390 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
2391 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
2392 } else {
2393 memset(&p_connect->reserved1, 0x00, 4);
2394 memset(&p_connect->reserved2, 0x00, 4);
2395 }
2396 break;
2397 default:
2398 break;
2399 }
2400
2401 /* write Control Record to the device */
2402
2403
2404 skb = dev_alloc_skb(sizeof(struct clawctl));
2405 if (!skb) {
2406 return -ENOMEM;
2407 }
2408 memcpy(skb_put(skb, sizeof(struct clawctl)),
2409 p_ctl, sizeof(struct clawctl));
2410 if (privptr->p_env->packing >= PACK_SEND)
2411 claw_hw_tx(skb, dev, 1);
2412 else
2413 claw_hw_tx(skb, dev, 0);
2414 return 0;
2415 } /* end of claw_send_control */
2416
2417 /*-------------------------------------------------------------------*
2418 * claw_snd_conn_req *
2419 * *
2420 *--------------------------------------------------------------------*/
2421 static int
2422 claw_snd_conn_req(struct net_device *dev, __u8 link)
2423 {
2424 int rc;
2425 struct claw_privbk *privptr = dev->ml_priv;
2426 struct clawctl *p_ctl;
2427
2428 CLAW_DBF_TEXT(2, setup, "snd_conn");
2429 rc = 1;
2430 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2431 p_ctl->linkid = link;
2432 if ( privptr->system_validate_comp==0x00 ) {
2433 return rc;
2434 }
2435 if (privptr->p_env->packing == PACKING_ASK )
2436 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2437 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
2438 if (privptr->p_env->packing == PACK_SEND) {
2439 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2440 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
2441 }
2442 if (privptr->p_env->packing == 0)
2443 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2444 HOST_APPL_NAME, privptr->p_env->api_type);
2445 return rc;
2446
2447 } /* end of claw_snd_conn_req */
2448
2449
2450 /*-------------------------------------------------------------------*
2451 * claw_snd_disc *
2452 * *
2453 *--------------------------------------------------------------------*/
2454
2455 static int
2456 claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
2457 {
2458 int rc;
2459 struct conncmd * p_connect;
2460
2461 CLAW_DBF_TEXT(2, setup, "snd_dsc");
2462 p_connect=(struct conncmd *)&p_ctl->data;
2463
2464 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
2465 p_ctl->correlator, 0,
2466 p_connect->host_name, p_connect->WS_name);
2467 return rc;
2468 } /* end of claw_snd_disc */
2469
2470
2471 /*-------------------------------------------------------------------*
2472 * claw_snd_sys_validate_rsp *
2473 * *
2474 *--------------------------------------------------------------------*/
2475
2476 static int
2477 claw_snd_sys_validate_rsp(struct net_device *dev,
2478 struct clawctl *p_ctl, __u32 return_code)
2479 {
2480 struct claw_env * p_env;
2481 struct claw_privbk *privptr;
2482 int rc;
2483
2484 CLAW_DBF_TEXT(2, setup, "chkresp");
2485 privptr = dev->ml_priv;
2486 p_env=privptr->p_env;
2487 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
2488 p_ctl->linkid,
2489 p_ctl->correlator,
2490 return_code,
2491 p_env->host_name,
2492 p_env->adapter_name );
2493 return rc;
2494 } /* end of claw_snd_sys_validate_rsp */
2495
2496 /*-------------------------------------------------------------------*
2497 * claw_strt_conn_req *
2498 * *
2499 *--------------------------------------------------------------------*/
2500
2501 static int
2502 claw_strt_conn_req(struct net_device *dev )
2503 {
2504 int rc;
2505
2506 CLAW_DBF_TEXT(2, setup, "conn_req");
2507 rc=claw_snd_conn_req(dev, 1);
2508 return rc;
2509 } /* end of claw_strt_conn_req */
2510
2511
2512
2513 /*-------------------------------------------------------------------*
2514 * claw_stats *
2515 *-------------------------------------------------------------------*/
2516
2517 static struct
2518 net_device_stats *claw_stats(struct net_device *dev)
2519 {
2520 struct claw_privbk *privptr;
2521
2522 CLAW_DBF_TEXT(4, trace, "stats");
2523 privptr = dev->ml_priv;
2524 return &privptr->stats;
2525 } /* end of claw_stats */
2526
2527
2528 /*-------------------------------------------------------------------*
2529 * unpack_read *
2530 * *
2531 *--------------------------------------------------------------------*/
2532 static void
2533 unpack_read(struct net_device *dev )
2534 {
2535 struct sk_buff *skb;
2536 struct claw_privbk *privptr;
2537 struct claw_env *p_env;
2538 struct ccwbk *p_this_ccw;
2539 struct ccwbk *p_first_ccw;
2540 struct ccwbk *p_last_ccw;
2541 struct clawph *p_packh;
2542 void *p_packd;
2543 struct clawctl *p_ctlrec=NULL;
2544 struct device *p_dev;
2545
2546 __u32 len_of_data;
2547 __u32 pack_off;
2548 __u8 link_num;
2549 __u8 mtc_this_frm=0;
2550 __u32 bytes_to_mov;
2551 int i=0;
2552 int p=0;
2553
2554 CLAW_DBF_TEXT(4, trace, "unpkread");
2555 p_first_ccw=NULL;
2556 p_last_ccw=NULL;
2557 p_packh=NULL;
2558 p_packd=NULL;
2559 privptr = dev->ml_priv;
2560
2561 p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
2562 p_env = privptr->p_env;
2563 p_this_ccw=privptr->p_read_active_first;
2564 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
2565 pack_off = 0;
2566 p = 0;
2567 p_this_ccw->header.flag=CLAW_PENDING;
2568 privptr->p_read_active_first=p_this_ccw->next;
2569 p_this_ccw->next=NULL;
2570 p_packh = (struct clawph *)p_this_ccw->p_buffer;
2571 if ((p_env->packing == PACK_SEND) &&
2572 (p_packh->len == 32) &&
2573 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
2574 p_packh++; /* peek past pack header */
2575 p_ctlrec = (struct clawctl *)p_packh;
2576 p_packh--; /* un peek */
2577 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
2578 (p_ctlrec->command == CONNECTION_CONFIRM))
2579 p_env->packing = DO_PACKED;
2580 }
2581 if (p_env->packing == DO_PACKED)
2582 link_num=p_packh->link_num;
2583 else
2584 link_num=p_this_ccw->header.opcode / 8;
2585 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
2586 mtc_this_frm=1;
2587 if (p_this_ccw->header.length!=
2588 privptr->p_env->read_size ) {
2589 dev_warn(p_dev,
2590 "The communication peer of %s"
2591 " sent a faulty"
2592 " frame of length %02x\n",
2593 dev->name, p_this_ccw->header.length);
2594 }
2595 }
2596
2597 if (privptr->mtc_skipping) {
2598 /*
2599 * We're in the mode of skipping past a
2600 * multi-frame message
2601 * that we can't process for some reason or other.
2602 * The first frame without the More-To-Come flag is
2603 * the last frame of the skipped message.
2604 */
2605 /* in case of More-To-Come not set in this frame */
2606 if (mtc_this_frm==0) {
2607 privptr->mtc_skipping=0; /* Ok, the end */
2608 privptr->mtc_logical_link=-1;
2609 }
2610 goto NextFrame;
2611 }
2612
2613 if (link_num==0) {
2614 claw_process_control(dev, p_this_ccw);
2615 CLAW_DBF_TEXT(4, trace, "UnpkCntl");
2616 goto NextFrame;
2617 }
2618 unpack_next:
2619 if (p_env->packing == DO_PACKED) {
2620 if (pack_off > p_env->read_size)
2621 goto NextFrame;
2622 p_packd = p_this_ccw->p_buffer+pack_off;
2623 p_packh = (struct clawph *) p_packd;
2624 if ((p_packh->len == 0) || /* done with this frame? */
2625 (p_packh->flag != 0))
2626 goto NextFrame;
2627 bytes_to_mov = p_packh->len;
2628 pack_off += bytes_to_mov+sizeof(struct clawph);
2629 p++;
2630 } else {
2631 bytes_to_mov=p_this_ccw->header.length;
2632 }
2633 if (privptr->mtc_logical_link<0) {
2634
2635 /*
2636 * if More-To-Come is set in this frame then we don't know
2637 * length of entire message, and hence have to allocate
2638 * large buffer */
2639
2640 /* We are starting a new envelope */
2641 privptr->mtc_offset=0;
2642 privptr->mtc_logical_link=link_num;
2643 }
2644
2645 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
2646 /* error */
2647 privptr->stats.rx_frame_errors++;
2648 goto NextFrame;
2649 }
2650 if (p_env->packing == DO_PACKED) {
2651 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2652 p_packd+sizeof(struct clawph), bytes_to_mov);
2653
2654 } else {
2655 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2656 p_this_ccw->p_buffer, bytes_to_mov);
2657 }
2658 if (mtc_this_frm==0) {
2659 len_of_data=privptr->mtc_offset+bytes_to_mov;
2660 skb=dev_alloc_skb(len_of_data);
2661 if (skb) {
2662 memcpy(skb_put(skb,len_of_data),
2663 privptr->p_mtc_envelope,
2664 len_of_data);
2665 skb->dev=dev;
2666 skb_reset_mac_header(skb);
2667 skb->protocol=htons(ETH_P_IP);
2668 skb->ip_summed=CHECKSUM_UNNECESSARY;
2669 privptr->stats.rx_packets++;
2670 privptr->stats.rx_bytes+=len_of_data;
2671 netif_rx(skb);
2672 }
2673 else {
2674 dev_info(p_dev, "Allocating a buffer for"
2675 " incoming data failed\n");
2676 privptr->stats.rx_dropped++;
2677 }
2678 privptr->mtc_offset=0;
2679 privptr->mtc_logical_link=-1;
2680 }
2681 else {
2682 privptr->mtc_offset+=bytes_to_mov;
2683 }
2684 if (p_env->packing == DO_PACKED)
2685 goto unpack_next;
2686 NextFrame:
2687 /*
2688 * Remove ThisCCWblock from active read queue, and add it
2689 * to queue of free blocks to be reused.
2690 */
2691 i++;
2692 p_this_ccw->header.length=0xffff;
2693 p_this_ccw->header.opcode=0xff;
2694 /*
2695 * add this one to the free queue for later reuse
2696 */
2697 if (p_first_ccw==NULL) {
2698 p_first_ccw = p_this_ccw;
2699 }
2700 else {
2701 p_last_ccw->next = p_this_ccw;
2702 }
2703 p_last_ccw = p_this_ccw;
2704 /*
2705 * chain to next block on active read queue
2706 */
2707 p_this_ccw = privptr->p_read_active_first;
2708 CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
2709 } /* end of while */
2710
2711 /* check validity */
2712
2713 CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
2714 add_claw_reads(dev, p_first_ccw, p_last_ccw);
2715 claw_strt_read(dev, LOCK_YES);
2716 return;
2717 } /* end of unpack_read */
2718
2719 /*-------------------------------------------------------------------*
2720 * claw_strt_read *
2721 * *
2722 *--------------------------------------------------------------------*/
2723 static void
2724 claw_strt_read (struct net_device *dev, int lock )
2725 {
2726 int rc = 0;
2727 __u32 parm;
2728 unsigned long saveflags = 0;
2729 struct claw_privbk *privptr = dev->ml_priv;
2730 struct ccwbk*p_ccwbk;
2731 struct chbk *p_ch;
2732 struct clawh *p_clawh;
2733 p_ch = &privptr->channel[READ_CHANNEL];
2734
2735 CLAW_DBF_TEXT(4, trace, "StRdNter");
2736 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
2737 p_clawh->flag=CLAW_IDLE; /* 0x00 */
2738
2739 if ((privptr->p_write_active_first!=NULL &&
2740 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
2741 (privptr->p_read_active_first!=NULL &&
2742 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
2743 p_clawh->flag=CLAW_BUSY; /* 0xff */
2744 }
2745 if (lock==LOCK_YES) {
2746 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
2747 }
2748 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2749 CLAW_DBF_TEXT(4, trace, "HotRead");
2750 p_ccwbk=privptr->p_read_active_first;
2751 parm = (unsigned long) p_ch;
2752 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
2753 0xff, 0);
2754 if (rc != 0) {
2755 ccw_check_return_code(p_ch->cdev, rc);
2756 }
2757 }
2758 else {
2759 CLAW_DBF_TEXT(2, trace, "ReadAct");
2760 }
2761
2762 if (lock==LOCK_YES) {
2763 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
2764 }
2765 CLAW_DBF_TEXT(4, trace, "StRdExit");
2766 return;
2767 } /* end of claw_strt_read */
2768
2769 /*-------------------------------------------------------------------*
2770 * claw_strt_out_IO *
2771 * *
2772 *--------------------------------------------------------------------*/
2773
2774 static void
2775 claw_strt_out_IO( struct net_device *dev )
2776 {
2777 int rc = 0;
2778 unsigned long parm;
2779 struct claw_privbk *privptr;
2780 struct chbk *p_ch;
2781 struct ccwbk *p_first_ccw;
2782
2783 if (!dev) {
2784 return;
2785 }
2786 privptr = (struct claw_privbk *)dev->ml_priv;
2787 p_ch = &privptr->channel[WRITE_CHANNEL];
2788
2789 CLAW_DBF_TEXT(4, trace, "strt_io");
2790 p_first_ccw=privptr->p_write_active_first;
2791
2792 if (p_ch->claw_state == CLAW_STOP)
2793 return;
2794 if (p_first_ccw == NULL) {
2795 return;
2796 }
2797 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2798 parm = (unsigned long) p_ch;
2799 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2800 rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
2801 0xff, 0);
2802 if (rc != 0) {
2803 ccw_check_return_code(p_ch->cdev, rc);
2804 }
2805 }
2806 dev->trans_start = jiffies;
2807 return;
2808 } /* end of claw_strt_out_IO */
2809
2810 /*-------------------------------------------------------------------*
2811 * Free write buffers *
2812 * *
2813 *--------------------------------------------------------------------*/
2814
2815 static void
2816 claw_free_wrt_buf( struct net_device *dev )
2817 {
2818
2819 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2820 struct ccwbk*p_first_ccw;
2821 struct ccwbk*p_last_ccw;
2822 struct ccwbk*p_this_ccw;
2823 struct ccwbk*p_next_ccw;
2824
2825 CLAW_DBF_TEXT(4, trace, "freewrtb");
2826 /* scan the write queue to free any completed write packets */
2827 p_first_ccw=NULL;
2828 p_last_ccw=NULL;
2829 p_this_ccw=privptr->p_write_active_first;
2830 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2831 {
2832 p_next_ccw = p_this_ccw->next;
2833 if (((p_next_ccw!=NULL) &&
2834 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
2835 ((p_this_ccw == privptr->p_write_active_last) &&
2836 (p_this_ccw->header.flag!=CLAW_PENDING))) {
2837 /* The next CCW is OK or this is */
2838 /* the last CCW...free it @A1A */
2839 privptr->p_write_active_first=p_this_ccw->next;
2840 p_this_ccw->header.flag=CLAW_PENDING;
2841 p_this_ccw->next=privptr->p_write_free_chain;
2842 privptr->p_write_free_chain=p_this_ccw;
2843 ++privptr->write_free_count;
2844 privptr->stats.tx_bytes+= p_this_ccw->write.count;
2845 p_this_ccw=privptr->p_write_active_first;
2846 privptr->stats.tx_packets++;
2847 }
2848 else {
2849 break;
2850 }
2851 }
2852 if (privptr->write_free_count!=0) {
2853 claw_clearbit_busy(TB_NOBUFFER,dev);
2854 }
2855 /* whole chain removed? */
2856 if (privptr->p_write_active_first==NULL) {
2857 privptr->p_write_active_last=NULL;
2858 }
2859 CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
2860 return;
2861 }
2862
2863 /*-------------------------------------------------------------------*
2864 * claw free netdevice *
2865 * *
2866 *--------------------------------------------------------------------*/
2867 static void
2868 claw_free_netdevice(struct net_device * dev, int free_dev)
2869 {
2870 struct claw_privbk *privptr;
2871
2872 CLAW_DBF_TEXT(2, setup, "free_dev");
2873 if (!dev)
2874 return;
2875 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2876 privptr = dev->ml_priv;
2877 if (dev->flags & IFF_RUNNING)
2878 claw_release(dev);
2879 if (privptr) {
2880 privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
2881 }
2882 dev->ml_priv = NULL;
2883 #ifdef MODULE
2884 if (free_dev) {
2885 free_netdev(dev);
2886 }
2887 #endif
2888 CLAW_DBF_TEXT(2, setup, "free_ok");
2889 }
2890
2891 /**
2892 * Claw init netdevice
2893 * Initialize everything of the net device except the name and the
2894 * channel structs.
2895 */
2896 static const struct net_device_ops claw_netdev_ops = {
2897 .ndo_open = claw_open,
2898 .ndo_stop = claw_release,
2899 .ndo_get_stats = claw_stats,
2900 .ndo_start_xmit = claw_tx,
2901 .ndo_change_mtu = claw_change_mtu,
2902 };
2903
2904 static void
2905 claw_init_netdevice(struct net_device * dev)
2906 {
2907 CLAW_DBF_TEXT(2, setup, "init_dev");
2908 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2909 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2910 dev->hard_header_len = 0;
2911 dev->addr_len = 0;
2912 dev->type = ARPHRD_SLIP;
2913 dev->tx_queue_len = 1300;
2914 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2915 dev->netdev_ops = &claw_netdev_ops;
2916 CLAW_DBF_TEXT(2, setup, "initok");
2917 return;
2918 }
2919
2920 /**
2921 * Init a new channel in the privptr->channel[i].
2922 *
2923 * @param cdev The ccw_device to be added.
2924 *
2925 * @return 0 on success, !0 on error.
2926 */
2927 static int
2928 add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2929 {
2930 struct chbk *p_ch;
2931 struct ccw_dev_id dev_id;
2932
2933 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2934 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2935 p_ch = &privptr->channel[i];
2936 p_ch->cdev = cdev;
2937 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2938 ccw_device_get_id(cdev, &dev_id);
2939 p_ch->devno = dev_id.devno;
2940 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
2941 return -ENOMEM;
2942 }
2943 return 0;
2944 }
2945
2946
2947 /**
2948 *
2949 * Setup an interface.
2950 *
2951 * @param cgdev Device to be setup.
2952 *
2953 * @returns 0 on success, !0 on failure.
2954 */
2955 static int
2956 claw_new_device(struct ccwgroup_device *cgdev)
2957 {
2958 struct claw_privbk *privptr;
2959 struct claw_env *p_env;
2960 struct net_device *dev;
2961 int ret;
2962 struct ccw_dev_id dev_id;
2963
2964 dev_info(&cgdev->dev, "add for %s\n",
2965 dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
2966 CLAW_DBF_TEXT(2, setup, "new_dev");
2967 privptr = dev_get_drvdata(&cgdev->dev);
2968 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2969 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2970 if (!privptr)
2971 return -ENODEV;
2972 p_env = privptr->p_env;
2973 ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
2974 p_env->devno[READ_CHANNEL] = dev_id.devno;
2975 ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
2976 p_env->devno[WRITE_CHANNEL] = dev_id.devno;
2977 ret = add_channel(cgdev->cdev[0],0,privptr);
2978 if (ret == 0)
2979 ret = add_channel(cgdev->cdev[1],1,privptr);
2980 if (ret != 0) {
2981 dev_warn(&cgdev->dev, "Creating a CLAW group device"
2982 " failed with error code %d\n", ret);
2983 goto out;
2984 }
2985 ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
2986 if (ret != 0) {
2987 dev_warn(&cgdev->dev,
2988 "Setting the read subchannel online"
2989 " failed with error code %d\n", ret);
2990 goto out;
2991 }
2992 ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
2993 if (ret != 0) {
2994 dev_warn(&cgdev->dev,
2995 "Setting the write subchannel online "
2996 "failed with error code %d\n", ret);
2997 goto out;
2998 }
2999 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
3000 if (!dev) {
3001 dev_warn(&cgdev->dev,
3002 "Activating the CLAW device failed\n");
3003 goto out;
3004 }
3005 dev->ml_priv = privptr;
3006 dev_set_drvdata(&cgdev->dev, privptr);
3007 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
3008 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
3009 /* sysfs magic */
3010 SET_NETDEV_DEV(dev, &cgdev->dev);
3011 if (register_netdev(dev) != 0) {
3012 claw_free_netdevice(dev, 1);
3013 CLAW_DBF_TEXT(2, trace, "regfail");
3014 goto out;
3015 }
3016 dev->flags &=~IFF_RUNNING;
3017 if (privptr->buffs_alloc == 0) {
3018 ret=init_ccw_bk(dev);
3019 if (ret !=0) {
3020 unregister_netdev(dev);
3021 claw_free_netdevice(dev,1);
3022 CLAW_DBF_TEXT(2, trace, "ccwmem");
3023 goto out;
3024 }
3025 }
3026 privptr->channel[READ_CHANNEL].ndev = dev;
3027 privptr->channel[WRITE_CHANNEL].ndev = dev;
3028 privptr->p_env->ndev = dev;
3029
3030 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
3031 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
3032 dev->name, p_env->read_size,
3033 p_env->write_size, p_env->read_buffers,
3034 p_env->write_buffers, p_env->devno[READ_CHANNEL],
3035 p_env->devno[WRITE_CHANNEL]);
3036 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
3037 ":%.8s api_type: %.8s\n",
3038 dev->name, p_env->host_name,
3039 p_env->adapter_name , p_env->api_type);
3040 return 0;
3041 out:
3042 ccw_device_set_offline(cgdev->cdev[1]);
3043 ccw_device_set_offline(cgdev->cdev[0]);
3044 return -ENODEV;
3045 }
3046
3047 static void
3048 claw_purge_skb_queue(struct sk_buff_head *q)
3049 {
3050 struct sk_buff *skb;
3051
3052 CLAW_DBF_TEXT(4, trace, "purgque");
3053 while ((skb = skb_dequeue(q))) {
3054 atomic_dec(&skb->users);
3055 dev_kfree_skb_any(skb);
3056 }
3057 }
3058
3059 /**
3060 * Shutdown an interface.
3061 *
3062 * @param cgdev Device to be shut down.
3063 *
3064 * @returns 0 on success, !0 on failure.
3065 */
3066 static int
3067 claw_shutdown_device(struct ccwgroup_device *cgdev)
3068 {
3069 struct claw_privbk *priv;
3070 struct net_device *ndev;
3071 int ret;
3072
3073 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3074 priv = dev_get_drvdata(&cgdev->dev);
3075 if (!priv)
3076 return -ENODEV;
3077 ndev = priv->channel[READ_CHANNEL].ndev;
3078 if (ndev) {
3079 /* Close the device */
3080 dev_info(&cgdev->dev, "%s: shutting down\n",
3081 ndev->name);
3082 if (ndev->flags & IFF_RUNNING)
3083 ret = claw_release(ndev);
3084 ndev->flags &=~IFF_RUNNING;
3085 unregister_netdev(ndev);
3086 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3087 claw_free_netdevice(ndev, 1);
3088 priv->channel[READ_CHANNEL].ndev = NULL;
3089 priv->channel[WRITE_CHANNEL].ndev = NULL;
3090 priv->p_env->ndev = NULL;
3091 }
3092 ccw_device_set_offline(cgdev->cdev[1]);
3093 ccw_device_set_offline(cgdev->cdev[0]);
3094 return 0;
3095 }
3096
3097 static void
3098 claw_remove_device(struct ccwgroup_device *cgdev)
3099 {
3100 struct claw_privbk *priv;
3101
3102 BUG_ON(!cgdev);
3103 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3104 priv = dev_get_drvdata(&cgdev->dev);
3105 BUG_ON(!priv);
3106 dev_info(&cgdev->dev, " will be removed.\n");
3107 if (cgdev->state == CCWGROUP_ONLINE)
3108 claw_shutdown_device(cgdev);
3109 claw_remove_files(&cgdev->dev);
3110 kfree(priv->p_mtc_envelope);
3111 priv->p_mtc_envelope=NULL;
3112 kfree(priv->p_env);
3113 priv->p_env=NULL;
3114 kfree(priv->channel[0].irb);
3115 priv->channel[0].irb=NULL;
3116 kfree(priv->channel[1].irb);
3117 priv->channel[1].irb=NULL;
3118 kfree(priv);
3119 dev_set_drvdata(&cgdev->dev, NULL);
3120 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
3121 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
3122 put_device(&cgdev->dev);
3123
3124 return;
3125 }
3126
3127
3128 /*
3129 * sysfs attributes
3130 */
3131 static ssize_t
3132 claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3133 {
3134 struct claw_privbk *priv;
3135 struct claw_env * p_env;
3136
3137 priv = dev_get_drvdata(dev);
3138 if (!priv)
3139 return -ENODEV;
3140 p_env = priv->p_env;
3141 return sprintf(buf, "%s\n",p_env->host_name);
3142 }
3143
3144 static ssize_t
3145 claw_hname_write(struct device *dev, struct device_attribute *attr,
3146 const char *buf, size_t count)
3147 {
3148 struct claw_privbk *priv;
3149 struct claw_env * p_env;
3150
3151 priv = dev_get_drvdata(dev);
3152 if (!priv)
3153 return -ENODEV;
3154 p_env = priv->p_env;
3155 if (count > MAX_NAME_LEN+1)
3156 return -EINVAL;
3157 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
3158 strncpy(p_env->host_name,buf, count);
3159 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
3160 p_env->host_name[MAX_NAME_LEN] = 0x00;
3161 CLAW_DBF_TEXT(2, setup, "HstnSet");
3162 CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
3163
3164 return count;
3165 }
3166
3167 static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
3168
3169 static ssize_t
3170 claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3171 {
3172 struct claw_privbk *priv;
3173 struct claw_env * p_env;
3174
3175 priv = dev_get_drvdata(dev);
3176 if (!priv)
3177 return -ENODEV;
3178 p_env = priv->p_env;
3179 return sprintf(buf, "%s\n", p_env->adapter_name);
3180 }
3181
3182 static ssize_t
3183 claw_adname_write(struct device *dev, struct device_attribute *attr,
3184 const char *buf, size_t count)
3185 {
3186 struct claw_privbk *priv;
3187 struct claw_env * p_env;
3188
3189 priv = dev_get_drvdata(dev);
3190 if (!priv)
3191 return -ENODEV;
3192 p_env = priv->p_env;
3193 if (count > MAX_NAME_LEN+1)
3194 return -EINVAL;
3195 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
3196 strncpy(p_env->adapter_name,buf, count);
3197 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
3198 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
3199 CLAW_DBF_TEXT(2, setup, "AdnSet");
3200 CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
3201
3202 return count;
3203 }
3204
3205 static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
3206
3207 static ssize_t
3208 claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3209 {
3210 struct claw_privbk *priv;
3211 struct claw_env * p_env;
3212
3213 priv = dev_get_drvdata(dev);
3214 if (!priv)
3215 return -ENODEV;
3216 p_env = priv->p_env;
3217 return sprintf(buf, "%s\n",
3218 p_env->api_type);
3219 }
3220
3221 static ssize_t
3222 claw_apname_write(struct device *dev, struct device_attribute *attr,
3223 const char *buf, size_t count)
3224 {
3225 struct claw_privbk *priv;
3226 struct claw_env * p_env;
3227
3228 priv = dev_get_drvdata(dev);
3229 if (!priv)
3230 return -ENODEV;
3231 p_env = priv->p_env;
3232 if (count > MAX_NAME_LEN+1)
3233 return -EINVAL;
3234 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
3235 strncpy(p_env->api_type,buf, count);
3236 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
3237 p_env->api_type[MAX_NAME_LEN] = 0x00;
3238 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
3239 p_env->read_size=DEF_PACK_BUFSIZE;
3240 p_env->write_size=DEF_PACK_BUFSIZE;
3241 p_env->packing=PACKING_ASK;
3242 CLAW_DBF_TEXT(2, setup, "PACKING");
3243 }
3244 else {
3245 p_env->packing=0;
3246 p_env->read_size=CLAW_FRAME_SIZE;
3247 p_env->write_size=CLAW_FRAME_SIZE;
3248 CLAW_DBF_TEXT(2, setup, "ApiSet");
3249 }
3250 CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
3251 return count;
3252 }
3253
3254 static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
3255
3256 static ssize_t
3257 claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3258 {
3259 struct claw_privbk *priv;
3260 struct claw_env * p_env;
3261
3262 priv = dev_get_drvdata(dev);
3263 if (!priv)
3264 return -ENODEV;
3265 p_env = priv->p_env;
3266 return sprintf(buf, "%d\n", p_env->write_buffers);
3267 }
3268
3269 static ssize_t
3270 claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3271 const char *buf, size_t count)
3272 {
3273 struct claw_privbk *priv;
3274 struct claw_env * p_env;
3275 int nnn,max;
3276
3277 priv = dev_get_drvdata(dev);
3278 if (!priv)
3279 return -ENODEV;
3280 p_env = priv->p_env;
3281 sscanf(buf, "%i", &nnn);
3282 if (p_env->packing) {
3283 max = 64;
3284 }
3285 else {
3286 max = 512;
3287 }
3288 if ((nnn > max ) || (nnn < 2))
3289 return -EINVAL;
3290 p_env->write_buffers = nnn;
3291 CLAW_DBF_TEXT(2, setup, "Wbufset");
3292 CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
3293 return count;
3294 }
3295
3296 static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
3297
3298 static ssize_t
3299 claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3300 {
3301 struct claw_privbk *priv;
3302 struct claw_env * p_env;
3303
3304 priv = dev_get_drvdata(dev);
3305 if (!priv)
3306 return -ENODEV;
3307 p_env = priv->p_env;
3308 return sprintf(buf, "%d\n", p_env->read_buffers);
3309 }
3310
3311 static ssize_t
3312 claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3313 const char *buf, size_t count)
3314 {
3315 struct claw_privbk *priv;
3316 struct claw_env *p_env;
3317 int nnn,max;
3318
3319 priv = dev_get_drvdata(dev);
3320 if (!priv)
3321 return -ENODEV;
3322 p_env = priv->p_env;
3323 sscanf(buf, "%i", &nnn);
3324 if (p_env->packing) {
3325 max = 64;
3326 }
3327 else {
3328 max = 512;
3329 }
3330 if ((nnn > max ) || (nnn < 2))
3331 return -EINVAL;
3332 p_env->read_buffers = nnn;
3333 CLAW_DBF_TEXT(2, setup, "Rbufset");
3334 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3335 return count;
3336 }
3337
3338 static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3339
3340 static struct attribute *claw_attr[] = {
3341 &dev_attr_read_buffer.attr,
3342 &dev_attr_write_buffer.attr,
3343 &dev_attr_adapter_name.attr,
3344 &dev_attr_api_type.attr,
3345 &dev_attr_host_name.attr,
3346 NULL,
3347 };
3348
3349 static struct attribute_group claw_attr_group = {
3350 .attrs = claw_attr,
3351 };
3352
3353 static int
3354 claw_add_files(struct device *dev)
3355 {
3356 CLAW_DBF_TEXT(2, setup, "add_file");
3357 return sysfs_create_group(&dev->kobj, &claw_attr_group);
3358 }
3359
3360 static void
3361 claw_remove_files(struct device *dev)
3362 {
3363 CLAW_DBF_TEXT(2, setup, "rem_file");
3364 sysfs_remove_group(&dev->kobj, &claw_attr_group);
3365 }
3366
3367 /*--------------------------------------------------------------------*
3368 * claw_init and cleanup *
3369 *---------------------------------------------------------------------*/
3370
3371 static void __exit
3372 claw_cleanup(void)
3373 {
3374 driver_remove_file(&claw_group_driver.driver,
3375 &driver_attr_group);
3376 ccwgroup_driver_unregister(&claw_group_driver);
3377 ccw_driver_unregister(&claw_ccw_driver);
3378 root_device_unregister(claw_root_dev);
3379 claw_unregister_debug_facility();
3380 pr_info("Driver unloaded\n");
3381
3382 }
3383
3384 /**
3385 * Initialize module.
3386 * This is called just after the module is loaded.
3387 *
3388 * @return 0 on success, !0 on error.
3389 */
3390 static int __init
3391 claw_init(void)
3392 {
3393 int ret = 0;
3394
3395 pr_info("Loading %s\n", version);
3396 ret = claw_register_debug_facility();
3397 if (ret) {
3398 pr_err("Registering with the S/390 debug feature"
3399 " failed with error code %d\n", ret);
3400 goto out_err;
3401 }
3402 CLAW_DBF_TEXT(2, setup, "init_mod");
3403 claw_root_dev = root_device_register("claw");
3404 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3405 if (ret)
3406 goto register_err;
3407 ret = ccw_driver_register(&claw_ccw_driver);
3408 if (ret)
3409 goto ccw_err;
3410 claw_group_driver.driver.groups = claw_group_attr_groups;
3411 ret = ccwgroup_driver_register(&claw_group_driver);
3412 if (ret)
3413 goto ccwgroup_err;
3414 return 0;
3415
3416 ccwgroup_err:
3417 ccw_driver_unregister(&claw_ccw_driver);
3418 ccw_err:
3419 root_device_unregister(claw_root_dev);
3420 register_err:
3421 CLAW_DBF_TEXT(2, setup, "init_bad");
3422 claw_unregister_debug_facility();
3423 out_err:
3424 pr_err("Initializing the claw device driver failed\n");
3425 return ret;
3426 }
3427
3428 module_init(claw_init);
3429 module_exit(claw_cleanup);
3430
3431 MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3432 MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3433 "Copyright 2000,2008 IBM Corporation\n");
3434 MODULE_LICENSE("GPL");