]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/s390/net/qeth_core_main.c
s390/qeth: conclude all event processing before offlining a card
[mirror_ubuntu-jammy-kernel.git] / drivers / s390 / net / qeth_core_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
23 #include <linux/kthread.h>
24 #include <linux/slab.h>
25 #include <linux/if_vlan.h>
26 #include <linux/netdevice.h>
27 #include <linux/netdev_features.h>
28 #include <linux/skbuff.h>
29 #include <linux/vmalloc.h>
30
31 #include <net/iucv/af_iucv.h>
32 #include <net/dsfield.h>
33
34 #include <asm/ebcdic.h>
35 #include <asm/chpid.h>
36 #include <asm/io.h>
37 #include <asm/sysinfo.h>
38 #include <asm/diag.h>
39 #include <asm/cio.h>
40 #include <asm/ccwdev.h>
41 #include <asm/cpcmd.h>
42
43 #include "qeth_core.h"
44
45 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
46 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
47 /* N P A M L V H */
48 [QETH_DBF_SETUP] = {"qeth_setup",
49 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
50 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
51 &debug_sprintf_view, NULL},
52 [QETH_DBF_CTRL] = {"qeth_control",
53 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
54 };
55 EXPORT_SYMBOL_GPL(qeth_dbf);
56
57 struct kmem_cache *qeth_core_header_cache;
58 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
59 static struct kmem_cache *qeth_qdio_outbuf_cache;
60
61 static struct device *qeth_core_root_dev;
62 static struct lock_class_key qdio_out_skb_queue_key;
63
64 static void qeth_send_control_data_cb(struct qeth_card *card,
65 struct qeth_channel *channel,
66 struct qeth_cmd_buffer *iob);
67 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
68 static void qeth_free_buffer_pool(struct qeth_card *);
69 static int qeth_qdio_establish(struct qeth_card *);
70 static void qeth_free_qdio_buffers(struct qeth_card *);
71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
72 struct qeth_qdio_out_buffer *buf,
73 enum iucv_tx_notify notification);
74 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
75 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
76
77 static struct workqueue_struct *qeth_wq;
78
79 int qeth_card_hw_is_reachable(struct qeth_card *card)
80 {
81 return (card->state == CARD_STATE_SOFTSETUP) ||
82 (card->state == CARD_STATE_UP);
83 }
84 EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
85
86 static void qeth_close_dev_handler(struct work_struct *work)
87 {
88 struct qeth_card *card;
89
90 card = container_of(work, struct qeth_card, close_dev_work);
91 QETH_CARD_TEXT(card, 2, "cldevhdl");
92 rtnl_lock();
93 dev_close(card->dev);
94 rtnl_unlock();
95 ccwgroup_set_offline(card->gdev);
96 }
97
98 void qeth_close_dev(struct qeth_card *card)
99 {
100 QETH_CARD_TEXT(card, 2, "cldevsubm");
101 queue_work(qeth_wq, &card->close_dev_work);
102 }
103 EXPORT_SYMBOL_GPL(qeth_close_dev);
104
105 static const char *qeth_get_cardname(struct qeth_card *card)
106 {
107 if (card->info.guestlan) {
108 switch (card->info.type) {
109 case QETH_CARD_TYPE_OSD:
110 return " Virtual NIC QDIO";
111 case QETH_CARD_TYPE_IQD:
112 return " Virtual NIC Hiper";
113 case QETH_CARD_TYPE_OSM:
114 return " Virtual NIC QDIO - OSM";
115 case QETH_CARD_TYPE_OSX:
116 return " Virtual NIC QDIO - OSX";
117 default:
118 return " unknown";
119 }
120 } else {
121 switch (card->info.type) {
122 case QETH_CARD_TYPE_OSD:
123 return " OSD Express";
124 case QETH_CARD_TYPE_IQD:
125 return " HiperSockets";
126 case QETH_CARD_TYPE_OSN:
127 return " OSN QDIO";
128 case QETH_CARD_TYPE_OSM:
129 return " OSM QDIO";
130 case QETH_CARD_TYPE_OSX:
131 return " OSX QDIO";
132 default:
133 return " unknown";
134 }
135 }
136 return " n/a";
137 }
138
139 /* max length to be returned: 14 */
140 const char *qeth_get_cardname_short(struct qeth_card *card)
141 {
142 if (card->info.guestlan) {
143 switch (card->info.type) {
144 case QETH_CARD_TYPE_OSD:
145 return "Virt.NIC QDIO";
146 case QETH_CARD_TYPE_IQD:
147 return "Virt.NIC Hiper";
148 case QETH_CARD_TYPE_OSM:
149 return "Virt.NIC OSM";
150 case QETH_CARD_TYPE_OSX:
151 return "Virt.NIC OSX";
152 default:
153 return "unknown";
154 }
155 } else {
156 switch (card->info.type) {
157 case QETH_CARD_TYPE_OSD:
158 switch (card->info.link_type) {
159 case QETH_LINK_TYPE_FAST_ETH:
160 return "OSD_100";
161 case QETH_LINK_TYPE_HSTR:
162 return "HSTR";
163 case QETH_LINK_TYPE_GBIT_ETH:
164 return "OSD_1000";
165 case QETH_LINK_TYPE_10GBIT_ETH:
166 return "OSD_10GIG";
167 case QETH_LINK_TYPE_25GBIT_ETH:
168 return "OSD_25GIG";
169 case QETH_LINK_TYPE_LANE_ETH100:
170 return "OSD_FE_LANE";
171 case QETH_LINK_TYPE_LANE_TR:
172 return "OSD_TR_LANE";
173 case QETH_LINK_TYPE_LANE_ETH1000:
174 return "OSD_GbE_LANE";
175 case QETH_LINK_TYPE_LANE:
176 return "OSD_ATM_LANE";
177 default:
178 return "OSD_Express";
179 }
180 case QETH_CARD_TYPE_IQD:
181 return "HiperSockets";
182 case QETH_CARD_TYPE_OSN:
183 return "OSN";
184 case QETH_CARD_TYPE_OSM:
185 return "OSM_1000";
186 case QETH_CARD_TYPE_OSX:
187 return "OSX_10GIG";
188 default:
189 return "unknown";
190 }
191 }
192 return "n/a";
193 }
194
195 void qeth_set_recovery_task(struct qeth_card *card)
196 {
197 card->recovery_task = current;
198 }
199 EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
200
201 void qeth_clear_recovery_task(struct qeth_card *card)
202 {
203 card->recovery_task = NULL;
204 }
205 EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
206
207 static bool qeth_is_recovery_task(const struct qeth_card *card)
208 {
209 return card->recovery_task == current;
210 }
211
212 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
213 int clear_start_mask)
214 {
215 unsigned long flags;
216
217 spin_lock_irqsave(&card->thread_mask_lock, flags);
218 card->thread_allowed_mask = threads;
219 if (clear_start_mask)
220 card->thread_start_mask &= threads;
221 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
222 wake_up(&card->wait_q);
223 }
224 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
225
226 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
227 {
228 unsigned long flags;
229 int rc = 0;
230
231 spin_lock_irqsave(&card->thread_mask_lock, flags);
232 rc = (card->thread_running_mask & threads);
233 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
234 return rc;
235 }
236 EXPORT_SYMBOL_GPL(qeth_threads_running);
237
238 int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
239 {
240 if (qeth_is_recovery_task(card))
241 return 0;
242 return wait_event_interruptible(card->wait_q,
243 qeth_threads_running(card, threads) == 0);
244 }
245 EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
246
247 void qeth_clear_working_pool_list(struct qeth_card *card)
248 {
249 struct qeth_buffer_pool_entry *pool_entry, *tmp;
250
251 QETH_CARD_TEXT(card, 5, "clwrklst");
252 list_for_each_entry_safe(pool_entry, tmp,
253 &card->qdio.in_buf_pool.entry_list, list){
254 list_del(&pool_entry->list);
255 }
256 }
257 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
258
259 static int qeth_alloc_buffer_pool(struct qeth_card *card)
260 {
261 struct qeth_buffer_pool_entry *pool_entry;
262 void *ptr;
263 int i, j;
264
265 QETH_CARD_TEXT(card, 5, "alocpool");
266 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
267 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
268 if (!pool_entry) {
269 qeth_free_buffer_pool(card);
270 return -ENOMEM;
271 }
272 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
273 ptr = (void *) __get_free_page(GFP_KERNEL);
274 if (!ptr) {
275 while (j > 0)
276 free_page((unsigned long)
277 pool_entry->elements[--j]);
278 kfree(pool_entry);
279 qeth_free_buffer_pool(card);
280 return -ENOMEM;
281 }
282 pool_entry->elements[j] = ptr;
283 }
284 list_add(&pool_entry->init_list,
285 &card->qdio.init_pool.entry_list);
286 }
287 return 0;
288 }
289
290 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
291 {
292 QETH_CARD_TEXT(card, 2, "realcbp");
293
294 if ((card->state != CARD_STATE_DOWN) &&
295 (card->state != CARD_STATE_RECOVER))
296 return -EPERM;
297
298 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
299 qeth_clear_working_pool_list(card);
300 qeth_free_buffer_pool(card);
301 card->qdio.in_buf_pool.buf_count = bufcnt;
302 card->qdio.init_pool.buf_count = bufcnt;
303 return qeth_alloc_buffer_pool(card);
304 }
305 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
306
307 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
308 {
309 if (!q)
310 return;
311
312 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
313 kfree(q);
314 }
315
316 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
317 {
318 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
319 int i;
320
321 if (!q)
322 return NULL;
323
324 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
325 kfree(q);
326 return NULL;
327 }
328
329 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
330 q->bufs[i].buffer = q->qdio_bufs[i];
331
332 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
333 return q;
334 }
335
336 static int qeth_cq_init(struct qeth_card *card)
337 {
338 int rc;
339
340 if (card->options.cq == QETH_CQ_ENABLED) {
341 QETH_DBF_TEXT(SETUP, 2, "cqinit");
342 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
343 QDIO_MAX_BUFFERS_PER_Q);
344 card->qdio.c_q->next_buf_to_init = 127;
345 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
346 card->qdio.no_in_queues - 1, 0,
347 127);
348 if (rc) {
349 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
350 goto out;
351 }
352 }
353 rc = 0;
354 out:
355 return rc;
356 }
357
358 static int qeth_alloc_cq(struct qeth_card *card)
359 {
360 int rc;
361
362 if (card->options.cq == QETH_CQ_ENABLED) {
363 int i;
364 struct qdio_outbuf_state *outbuf_states;
365
366 QETH_DBF_TEXT(SETUP, 2, "cqon");
367 card->qdio.c_q = qeth_alloc_qdio_queue();
368 if (!card->qdio.c_q) {
369 rc = -1;
370 goto kmsg_out;
371 }
372 card->qdio.no_in_queues = 2;
373 card->qdio.out_bufstates =
374 kcalloc(card->qdio.no_out_queues *
375 QDIO_MAX_BUFFERS_PER_Q,
376 sizeof(struct qdio_outbuf_state),
377 GFP_KERNEL);
378 outbuf_states = card->qdio.out_bufstates;
379 if (outbuf_states == NULL) {
380 rc = -1;
381 goto free_cq_out;
382 }
383 for (i = 0; i < card->qdio.no_out_queues; ++i) {
384 card->qdio.out_qs[i]->bufstates = outbuf_states;
385 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
386 }
387 } else {
388 QETH_DBF_TEXT(SETUP, 2, "nocq");
389 card->qdio.c_q = NULL;
390 card->qdio.no_in_queues = 1;
391 }
392 QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
393 rc = 0;
394 out:
395 return rc;
396 free_cq_out:
397 qeth_free_qdio_queue(card->qdio.c_q);
398 card->qdio.c_q = NULL;
399 kmsg_out:
400 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
401 goto out;
402 }
403
404 static void qeth_free_cq(struct qeth_card *card)
405 {
406 if (card->qdio.c_q) {
407 --card->qdio.no_in_queues;
408 qeth_free_qdio_queue(card->qdio.c_q);
409 card->qdio.c_q = NULL;
410 }
411 kfree(card->qdio.out_bufstates);
412 card->qdio.out_bufstates = NULL;
413 }
414
415 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
416 int delayed)
417 {
418 enum iucv_tx_notify n;
419
420 switch (sbalf15) {
421 case 0:
422 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
423 break;
424 case 4:
425 case 16:
426 case 17:
427 case 18:
428 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
429 TX_NOTIFY_UNREACHABLE;
430 break;
431 default:
432 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
433 TX_NOTIFY_GENERALERROR;
434 break;
435 }
436
437 return n;
438 }
439
440 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
441 int forced_cleanup)
442 {
443 if (q->card->options.cq != QETH_CQ_ENABLED)
444 return;
445
446 if (q->bufs[bidx]->next_pending != NULL) {
447 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
448 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
449
450 while (c) {
451 if (forced_cleanup ||
452 atomic_read(&c->state) ==
453 QETH_QDIO_BUF_HANDLED_DELAYED) {
454 struct qeth_qdio_out_buffer *f = c;
455 QETH_CARD_TEXT(f->q->card, 5, "fp");
456 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
457 /* release here to avoid interleaving between
458 outbound tasklet and inbound tasklet
459 regarding notifications and lifecycle */
460 qeth_release_skbs(c);
461
462 c = f->next_pending;
463 WARN_ON_ONCE(head->next_pending != f);
464 head->next_pending = c;
465 kmem_cache_free(qeth_qdio_outbuf_cache, f);
466 } else {
467 head = c;
468 c = c->next_pending;
469 }
470
471 }
472 }
473 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
474 QETH_QDIO_BUF_HANDLED_DELAYED)) {
475 /* for recovery situations */
476 qeth_init_qdio_out_buf(q, bidx);
477 QETH_CARD_TEXT(q->card, 2, "clprecov");
478 }
479 }
480
481
482 static void qeth_qdio_handle_aob(struct qeth_card *card,
483 unsigned long phys_aob_addr)
484 {
485 struct qaob *aob;
486 struct qeth_qdio_out_buffer *buffer;
487 enum iucv_tx_notify notification;
488 unsigned int i;
489
490 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
491 QETH_CARD_TEXT(card, 5, "haob");
492 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
493 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
494 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
495
496 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
497 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
498 notification = TX_NOTIFY_OK;
499 } else {
500 WARN_ON_ONCE(atomic_read(&buffer->state) !=
501 QETH_QDIO_BUF_PENDING);
502 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
503 notification = TX_NOTIFY_DELAYED_OK;
504 }
505
506 if (aob->aorc != 0) {
507 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
508 notification = qeth_compute_cq_notification(aob->aorc, 1);
509 }
510 qeth_notify_skbs(buffer->q, buffer, notification);
511
512 /* Free dangling allocations. The attached skbs are handled by
513 * qeth_cleanup_handled_pending().
514 */
515 for (i = 0;
516 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
517 i++) {
518 if (aob->sba[i] && buffer->is_header[i])
519 kmem_cache_free(qeth_core_header_cache,
520 (void *) aob->sba[i]);
521 }
522 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
523
524 qdio_release_aob(aob);
525 }
526
527 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
528 {
529 return card->options.cq == QETH_CQ_ENABLED &&
530 card->qdio.c_q != NULL &&
531 queue != 0 &&
532 queue == card->qdio.no_in_queues - 1;
533 }
534
535 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data)
536 {
537 ccw->cmd_code = cmd_code;
538 ccw->flags = CCW_FLAG_SLI;
539 ccw->count = len;
540 ccw->cda = (__u32) __pa(data);
541 }
542
543 static int __qeth_issue_next_read(struct qeth_card *card)
544 {
545 struct qeth_channel *channel = &card->read;
546 struct qeth_cmd_buffer *iob;
547 int rc;
548
549 QETH_CARD_TEXT(card, 5, "issnxrd");
550 if (channel->state != CH_STATE_UP)
551 return -EIO;
552 iob = qeth_get_buffer(channel);
553 if (!iob) {
554 dev_warn(&card->gdev->dev, "The qeth device driver "
555 "failed to recover an error on the device\n");
556 QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
557 CARD_DEVID(card));
558 return -ENOMEM;
559 }
560 qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
561 QETH_CARD_TEXT(card, 6, "noirqpnd");
562 rc = ccw_device_start(channel->ccwdev, channel->ccw,
563 (addr_t) iob, 0, 0);
564 if (rc) {
565 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
566 rc, CARD_DEVID(card));
567 atomic_set(&channel->irq_pending, 0);
568 qeth_release_buffer(channel, iob);
569 card->read_or_write_problem = 1;
570 qeth_schedule_recovery(card);
571 wake_up(&card->wait_q);
572 }
573 return rc;
574 }
575
576 static int qeth_issue_next_read(struct qeth_card *card)
577 {
578 int ret;
579
580 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
581 ret = __qeth_issue_next_read(card);
582 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
583
584 return ret;
585 }
586
587 static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
588 {
589 struct qeth_reply *reply;
590
591 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
592 if (reply) {
593 refcount_set(&reply->refcnt, 1);
594 atomic_set(&reply->received, 0);
595 }
596 return reply;
597 }
598
599 static void qeth_get_reply(struct qeth_reply *reply)
600 {
601 refcount_inc(&reply->refcnt);
602 }
603
604 static void qeth_put_reply(struct qeth_reply *reply)
605 {
606 if (refcount_dec_and_test(&reply->refcnt))
607 kfree(reply);
608 }
609
610 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
611 struct qeth_card *card)
612 {
613 const char *ipa_name;
614 int com = cmd->hdr.command;
615 ipa_name = qeth_get_ipa_cmd_name(com);
616
617 if (rc)
618 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
619 ipa_name, com, CARD_DEVID(card), rc,
620 qeth_get_ipa_msg(rc));
621 else
622 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
623 ipa_name, com, CARD_DEVID(card));
624 }
625
626 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
627 struct qeth_ipa_cmd *cmd)
628 {
629 QETH_CARD_TEXT(card, 5, "chkipad");
630
631 if (IS_IPA_REPLY(cmd)) {
632 if (cmd->hdr.command != IPA_CMD_SETCCID &&
633 cmd->hdr.command != IPA_CMD_DELCCID &&
634 cmd->hdr.command != IPA_CMD_MODCCID &&
635 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
636 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
637 return cmd;
638 }
639
640 /* handle unsolicited event: */
641 switch (cmd->hdr.command) {
642 case IPA_CMD_STOPLAN:
643 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
644 dev_err(&card->gdev->dev,
645 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
646 QETH_CARD_IFNAME(card));
647 qeth_close_dev(card);
648 } else {
649 dev_warn(&card->gdev->dev,
650 "The link for interface %s on CHPID 0x%X failed\n",
651 QETH_CARD_IFNAME(card), card->info.chpid);
652 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
653 netif_carrier_off(card->dev);
654 }
655 return NULL;
656 case IPA_CMD_STARTLAN:
657 dev_info(&card->gdev->dev,
658 "The link for %s on CHPID 0x%X has been restored\n",
659 QETH_CARD_IFNAME(card), card->info.chpid);
660 if (card->info.hwtrap)
661 card->info.hwtrap = 2;
662 qeth_schedule_recovery(card);
663 return NULL;
664 case IPA_CMD_SETBRIDGEPORT_IQD:
665 case IPA_CMD_SETBRIDGEPORT_OSA:
666 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
667 if (card->discipline->control_event_handler(card, cmd))
668 return cmd;
669 return NULL;
670 case IPA_CMD_MODCCID:
671 return cmd;
672 case IPA_CMD_REGISTER_LOCAL_ADDR:
673 QETH_CARD_TEXT(card, 3, "irla");
674 return NULL;
675 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
676 QETH_CARD_TEXT(card, 3, "urla");
677 return NULL;
678 default:
679 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
680 return cmd;
681 }
682 }
683
684 void qeth_clear_ipacmd_list(struct qeth_card *card)
685 {
686 struct qeth_reply *reply, *r;
687 unsigned long flags;
688
689 QETH_CARD_TEXT(card, 4, "clipalst");
690
691 spin_lock_irqsave(&card->lock, flags);
692 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
693 qeth_get_reply(reply);
694 reply->rc = -EIO;
695 atomic_inc(&reply->received);
696 list_del_init(&reply->list);
697 wake_up(&reply->wait_q);
698 qeth_put_reply(reply);
699 }
700 spin_unlock_irqrestore(&card->lock, flags);
701 }
702 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
703
704 static int qeth_check_idx_response(struct qeth_card *card,
705 unsigned char *buffer)
706 {
707 if (!buffer)
708 return 0;
709
710 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
711 if ((buffer[2] & 0xc0) == 0xc0) {
712 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
713 buffer[4]);
714 QETH_CARD_TEXT(card, 2, "ckidxres");
715 QETH_CARD_TEXT(card, 2, " idxterm");
716 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
717 if (buffer[4] == 0xf6) {
718 dev_err(&card->gdev->dev,
719 "The qeth device is not configured "
720 "for the OSI layer required by z/VM\n");
721 return -EPERM;
722 }
723 return -EIO;
724 }
725 return 0;
726 }
727
728 static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
729 {
730 __u8 index;
731
732 index = channel->io_buf_no;
733 do {
734 if (channel->iob[index].state == BUF_STATE_FREE) {
735 channel->iob[index].state = BUF_STATE_LOCKED;
736 channel->io_buf_no = (channel->io_buf_no + 1) %
737 QETH_CMD_BUFFER_NO;
738 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
739 return channel->iob + index;
740 }
741 index = (index + 1) % QETH_CMD_BUFFER_NO;
742 } while (index != channel->io_buf_no);
743
744 return NULL;
745 }
746
747 void qeth_release_buffer(struct qeth_channel *channel,
748 struct qeth_cmd_buffer *iob)
749 {
750 unsigned long flags;
751
752 spin_lock_irqsave(&channel->iob_lock, flags);
753 iob->state = BUF_STATE_FREE;
754 iob->callback = qeth_send_control_data_cb;
755 iob->rc = 0;
756 spin_unlock_irqrestore(&channel->iob_lock, flags);
757 wake_up(&channel->wait_q);
758 }
759 EXPORT_SYMBOL_GPL(qeth_release_buffer);
760
761 static void qeth_release_buffer_cb(struct qeth_card *card,
762 struct qeth_channel *channel,
763 struct qeth_cmd_buffer *iob)
764 {
765 qeth_release_buffer(channel, iob);
766 }
767
768 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
769 {
770 struct qeth_cmd_buffer *buffer = NULL;
771 unsigned long flags;
772
773 spin_lock_irqsave(&channel->iob_lock, flags);
774 buffer = __qeth_get_buffer(channel);
775 spin_unlock_irqrestore(&channel->iob_lock, flags);
776 return buffer;
777 }
778
779 struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
780 {
781 struct qeth_cmd_buffer *buffer;
782 wait_event(channel->wait_q,
783 ((buffer = qeth_get_buffer(channel)) != NULL));
784 return buffer;
785 }
786 EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
787
788 void qeth_clear_cmd_buffers(struct qeth_channel *channel)
789 {
790 int cnt;
791
792 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
793 qeth_release_buffer(channel, &channel->iob[cnt]);
794 channel->io_buf_no = 0;
795 }
796 EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
797
798 static void qeth_send_control_data_cb(struct qeth_card *card,
799 struct qeth_channel *channel,
800 struct qeth_cmd_buffer *iob)
801 {
802 struct qeth_ipa_cmd *cmd = NULL;
803 struct qeth_reply *reply, *r;
804 unsigned long flags;
805 int keep_reply;
806 int rc = 0;
807
808 QETH_CARD_TEXT(card, 4, "sndctlcb");
809 rc = qeth_check_idx_response(card, iob->data);
810 switch (rc) {
811 case 0:
812 break;
813 case -EIO:
814 qeth_clear_ipacmd_list(card);
815 qeth_schedule_recovery(card);
816 /* fall through */
817 default:
818 goto out;
819 }
820
821 if (IS_IPA(iob->data)) {
822 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
823 cmd = qeth_check_ipa_data(card, cmd);
824 if (!cmd)
825 goto out;
826 if (IS_OSN(card) && card->osn_info.assist_cb &&
827 cmd->hdr.command != IPA_CMD_STARTLAN) {
828 card->osn_info.assist_cb(card->dev, cmd);
829 goto out;
830 }
831 } else {
832 /* non-IPA commands should only flow during initialization */
833 if (card->state != CARD_STATE_DOWN)
834 goto out;
835 }
836
837 spin_lock_irqsave(&card->lock, flags);
838 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
839 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
840 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
841 qeth_get_reply(reply);
842 list_del_init(&reply->list);
843 spin_unlock_irqrestore(&card->lock, flags);
844 keep_reply = 0;
845 if (reply->callback != NULL) {
846 if (cmd) {
847 reply->offset = (__u16)((char *)cmd -
848 (char *)iob->data);
849 keep_reply = reply->callback(card,
850 reply,
851 (unsigned long)cmd);
852 } else
853 keep_reply = reply->callback(card,
854 reply,
855 (unsigned long)iob);
856 }
857 if (cmd)
858 reply->rc = (u16) cmd->hdr.return_code;
859 else if (iob->rc)
860 reply->rc = iob->rc;
861 if (keep_reply) {
862 spin_lock_irqsave(&card->lock, flags);
863 list_add_tail(&reply->list,
864 &card->cmd_waiter_list);
865 spin_unlock_irqrestore(&card->lock, flags);
866 } else {
867 atomic_inc(&reply->received);
868 wake_up(&reply->wait_q);
869 }
870 qeth_put_reply(reply);
871 goto out;
872 }
873 }
874 spin_unlock_irqrestore(&card->lock, flags);
875 out:
876 memcpy(&card->seqno.pdu_hdr_ack,
877 QETH_PDU_HEADER_SEQ_NO(iob->data),
878 QETH_SEQ_NO_LENGTH);
879 qeth_release_buffer(channel, iob);
880 }
881
882 static int qeth_set_thread_start_bit(struct qeth_card *card,
883 unsigned long thread)
884 {
885 unsigned long flags;
886
887 spin_lock_irqsave(&card->thread_mask_lock, flags);
888 if (!(card->thread_allowed_mask & thread) ||
889 (card->thread_start_mask & thread)) {
890 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
891 return -EPERM;
892 }
893 card->thread_start_mask |= thread;
894 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
895 return 0;
896 }
897
898 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
899 {
900 unsigned long flags;
901
902 spin_lock_irqsave(&card->thread_mask_lock, flags);
903 card->thread_start_mask &= ~thread;
904 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
905 wake_up(&card->wait_q);
906 }
907 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
908
909 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
910 {
911 unsigned long flags;
912
913 spin_lock_irqsave(&card->thread_mask_lock, flags);
914 card->thread_running_mask &= ~thread;
915 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
916 wake_up_all(&card->wait_q);
917 }
918 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
919
920 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
921 {
922 unsigned long flags;
923 int rc = 0;
924
925 spin_lock_irqsave(&card->thread_mask_lock, flags);
926 if (card->thread_start_mask & thread) {
927 if ((card->thread_allowed_mask & thread) &&
928 !(card->thread_running_mask & thread)) {
929 rc = 1;
930 card->thread_start_mask &= ~thread;
931 card->thread_running_mask |= thread;
932 } else
933 rc = -EPERM;
934 }
935 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
936 return rc;
937 }
938
939 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
940 {
941 int rc = 0;
942
943 wait_event(card->wait_q,
944 (rc = __qeth_do_run_thread(card, thread)) >= 0);
945 return rc;
946 }
947 EXPORT_SYMBOL_GPL(qeth_do_run_thread);
948
949 void qeth_schedule_recovery(struct qeth_card *card)
950 {
951 QETH_CARD_TEXT(card, 2, "startrec");
952 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
953 schedule_work(&card->kernel_thread_starter);
954 }
955 EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
956
957 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
958 struct irb *irb)
959 {
960 int dstat, cstat;
961 char *sense;
962
963 sense = (char *) irb->ecw;
964 cstat = irb->scsw.cmd.cstat;
965 dstat = irb->scsw.cmd.dstat;
966
967 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
968 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
969 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
970 QETH_CARD_TEXT(card, 2, "CGENCHK");
971 dev_warn(&cdev->dev, "The qeth device driver "
972 "failed to recover an error on the device\n");
973 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
974 CCW_DEVID(cdev), dstat, cstat);
975 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
976 16, 1, irb, 64, 1);
977 return 1;
978 }
979
980 if (dstat & DEV_STAT_UNIT_CHECK) {
981 if (sense[SENSE_RESETTING_EVENT_BYTE] &
982 SENSE_RESETTING_EVENT_FLAG) {
983 QETH_CARD_TEXT(card, 2, "REVIND");
984 return 1;
985 }
986 if (sense[SENSE_COMMAND_REJECT_BYTE] &
987 SENSE_COMMAND_REJECT_FLAG) {
988 QETH_CARD_TEXT(card, 2, "CMDREJi");
989 return 1;
990 }
991 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
992 QETH_CARD_TEXT(card, 2, "AFFE");
993 return 1;
994 }
995 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
996 QETH_CARD_TEXT(card, 2, "ZEROSEN");
997 return 0;
998 }
999 QETH_CARD_TEXT(card, 2, "DGENCHK");
1000 return 1;
1001 }
1002 return 0;
1003 }
1004
1005 static long qeth_check_irb_error(struct qeth_card *card,
1006 struct ccw_device *cdev, unsigned long intparm,
1007 struct irb *irb)
1008 {
1009 if (!IS_ERR(irb))
1010 return 0;
1011
1012 switch (PTR_ERR(irb)) {
1013 case -EIO:
1014 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1015 CCW_DEVID(cdev));
1016 QETH_CARD_TEXT(card, 2, "ckirberr");
1017 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
1018 break;
1019 case -ETIMEDOUT:
1020 dev_warn(&cdev->dev, "A hardware operation timed out"
1021 " on the device\n");
1022 QETH_CARD_TEXT(card, 2, "ckirberr");
1023 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
1024 if (intparm == QETH_RCD_PARM) {
1025 if (card->data.ccwdev == cdev) {
1026 card->data.state = CH_STATE_DOWN;
1027 wake_up(&card->wait_q);
1028 }
1029 }
1030 break;
1031 default:
1032 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1033 PTR_ERR(irb), CCW_DEVID(cdev));
1034 QETH_CARD_TEXT(card, 2, "ckirberr");
1035 QETH_CARD_TEXT(card, 2, " rc???");
1036 }
1037 return PTR_ERR(irb);
1038 }
1039
1040 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1041 struct irb *irb)
1042 {
1043 int rc;
1044 int cstat, dstat;
1045 struct qeth_cmd_buffer *iob = NULL;
1046 struct ccwgroup_device *gdev;
1047 struct qeth_channel *channel;
1048 struct qeth_card *card;
1049
1050 /* while we hold the ccwdev lock, this stays valid: */
1051 gdev = dev_get_drvdata(&cdev->dev);
1052 card = dev_get_drvdata(&gdev->dev);
1053 if (!card)
1054 return;
1055
1056 QETH_CARD_TEXT(card, 5, "irq");
1057
1058 if (card->read.ccwdev == cdev) {
1059 channel = &card->read;
1060 QETH_CARD_TEXT(card, 5, "read");
1061 } else if (card->write.ccwdev == cdev) {
1062 channel = &card->write;
1063 QETH_CARD_TEXT(card, 5, "write");
1064 } else {
1065 channel = &card->data;
1066 QETH_CARD_TEXT(card, 5, "data");
1067 }
1068
1069 if (qeth_intparm_is_iob(intparm))
1070 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1071
1072 if (qeth_check_irb_error(card, cdev, intparm, irb)) {
1073 /* IO was terminated, free its resources. */
1074 if (iob)
1075 qeth_release_buffer(iob->channel, iob);
1076 atomic_set(&channel->irq_pending, 0);
1077 wake_up(&card->wait_q);
1078 return;
1079 }
1080
1081 atomic_set(&channel->irq_pending, 0);
1082
1083 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
1084 channel->state = CH_STATE_STOPPED;
1085
1086 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
1087 channel->state = CH_STATE_HALTED;
1088
1089 /*let's wake up immediately on data channel*/
1090 if ((channel == &card->data) && (intparm != 0) &&
1091 (intparm != QETH_RCD_PARM))
1092 goto out;
1093
1094 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
1095 QETH_CARD_TEXT(card, 6, "clrchpar");
1096 /* we don't have to handle this further */
1097 intparm = 0;
1098 }
1099 if (intparm == QETH_HALT_CHANNEL_PARM) {
1100 QETH_CARD_TEXT(card, 6, "hltchpar");
1101 /* we don't have to handle this further */
1102 intparm = 0;
1103 }
1104
1105 cstat = irb->scsw.cmd.cstat;
1106 dstat = irb->scsw.cmd.dstat;
1107
1108 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1109 (dstat & DEV_STAT_UNIT_CHECK) ||
1110 (cstat)) {
1111 if (irb->esw.esw0.erw.cons) {
1112 dev_warn(&channel->ccwdev->dev,
1113 "The qeth device driver failed to recover "
1114 "an error on the device\n");
1115 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1116 CCW_DEVID(channel->ccwdev), cstat,
1117 dstat);
1118 print_hex_dump(KERN_WARNING, "qeth: irb ",
1119 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1120 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1121 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1122 }
1123 if (intparm == QETH_RCD_PARM) {
1124 channel->state = CH_STATE_DOWN;
1125 goto out;
1126 }
1127 rc = qeth_get_problem(card, cdev, irb);
1128 if (rc) {
1129 card->read_or_write_problem = 1;
1130 if (iob)
1131 qeth_release_buffer(iob->channel, iob);
1132 qeth_clear_ipacmd_list(card);
1133 qeth_schedule_recovery(card);
1134 goto out;
1135 }
1136 }
1137
1138 if (intparm == QETH_RCD_PARM) {
1139 channel->state = CH_STATE_RCD_DONE;
1140 goto out;
1141 }
1142 if (channel == &card->data)
1143 return;
1144 if (channel == &card->read &&
1145 channel->state == CH_STATE_UP)
1146 __qeth_issue_next_read(card);
1147
1148 if (iob && iob->callback)
1149 iob->callback(card, iob->channel, iob);
1150
1151 out:
1152 wake_up(&card->wait_q);
1153 return;
1154 }
1155
1156 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1157 struct qeth_qdio_out_buffer *buf,
1158 enum iucv_tx_notify notification)
1159 {
1160 struct sk_buff *skb;
1161
1162 skb_queue_walk(&buf->skb_list, skb) {
1163 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1164 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1165 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1166 iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1167 }
1168 }
1169
1170 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1171 {
1172 /* release may never happen from within CQ tasklet scope */
1173 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1174
1175 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1176 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
1177
1178 __skb_queue_purge(&buf->skb_list);
1179 }
1180
1181 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1182 struct qeth_qdio_out_buffer *buf)
1183 {
1184 int i;
1185
1186 /* is PCI flag set on buffer? */
1187 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1188 atomic_dec(&queue->set_pci_flags_count);
1189
1190 qeth_release_skbs(buf);
1191
1192 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
1193 if (buf->buffer->element[i].addr && buf->is_header[i])
1194 kmem_cache_free(qeth_core_header_cache,
1195 buf->buffer->element[i].addr);
1196 buf->is_header[i] = 0;
1197 }
1198
1199 qeth_scrub_qdio_buffer(buf->buffer,
1200 QETH_MAX_BUFFER_ELEMENTS(queue->card));
1201 buf->next_element_to_fill = 0;
1202 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1203 }
1204
1205 static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
1206 {
1207 int j;
1208
1209 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1210 if (!q->bufs[j])
1211 continue;
1212 qeth_cleanup_handled_pending(q, j, 1);
1213 qeth_clear_output_buffer(q, q->bufs[j]);
1214 if (free) {
1215 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1216 q->bufs[j] = NULL;
1217 }
1218 }
1219 }
1220
1221 void qeth_clear_qdio_buffers(struct qeth_card *card)
1222 {
1223 int i;
1224
1225 QETH_CARD_TEXT(card, 2, "clearqdbf");
1226 /* clear outbound buffers to free skbs */
1227 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1228 if (card->qdio.out_qs[i]) {
1229 qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
1230 }
1231 }
1232 }
1233 EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
1234
1235 static void qeth_free_buffer_pool(struct qeth_card *card)
1236 {
1237 struct qeth_buffer_pool_entry *pool_entry, *tmp;
1238 int i = 0;
1239 list_for_each_entry_safe(pool_entry, tmp,
1240 &card->qdio.init_pool.entry_list, init_list){
1241 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
1242 free_page((unsigned long)pool_entry->elements[i]);
1243 list_del(&pool_entry->init_list);
1244 kfree(pool_entry);
1245 }
1246 }
1247
1248 static void qeth_clean_channel(struct qeth_channel *channel)
1249 {
1250 struct ccw_device *cdev = channel->ccwdev;
1251 int cnt;
1252
1253 QETH_DBF_TEXT(SETUP, 2, "freech");
1254
1255 spin_lock_irq(get_ccwdev_lock(cdev));
1256 cdev->handler = NULL;
1257 spin_unlock_irq(get_ccwdev_lock(cdev));
1258
1259 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1260 kfree(channel->iob[cnt].data);
1261 kfree(channel->ccw);
1262 }
1263
1264 static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
1265 {
1266 struct ccw_device *cdev = channel->ccwdev;
1267 int cnt;
1268
1269 QETH_DBF_TEXT(SETUP, 2, "setupch");
1270
1271 channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1272 if (!channel->ccw)
1273 return -ENOMEM;
1274 channel->state = CH_STATE_DOWN;
1275 atomic_set(&channel->irq_pending, 0);
1276 init_waitqueue_head(&channel->wait_q);
1277
1278 spin_lock_irq(get_ccwdev_lock(cdev));
1279 cdev->handler = qeth_irq;
1280 spin_unlock_irq(get_ccwdev_lock(cdev));
1281
1282 if (!alloc_buffers)
1283 return 0;
1284
1285 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
1286 channel->iob[cnt].data = kmalloc(QETH_BUFSIZE,
1287 GFP_KERNEL | GFP_DMA);
1288 if (channel->iob[cnt].data == NULL)
1289 break;
1290 channel->iob[cnt].state = BUF_STATE_FREE;
1291 channel->iob[cnt].channel = channel;
1292 channel->iob[cnt].callback = qeth_send_control_data_cb;
1293 channel->iob[cnt].rc = 0;
1294 }
1295 if (cnt < QETH_CMD_BUFFER_NO) {
1296 qeth_clean_channel(channel);
1297 return -ENOMEM;
1298 }
1299 channel->io_buf_no = 0;
1300 spin_lock_init(&channel->iob_lock);
1301
1302 return 0;
1303 }
1304
1305 static void qeth_set_single_write_queues(struct qeth_card *card)
1306 {
1307 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1308 (card->qdio.no_out_queues == 4))
1309 qeth_free_qdio_buffers(card);
1310
1311 card->qdio.no_out_queues = 1;
1312 if (card->qdio.default_out_queue != 0)
1313 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1314
1315 card->qdio.default_out_queue = 0;
1316 }
1317
1318 static void qeth_set_multiple_write_queues(struct qeth_card *card)
1319 {
1320 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1321 (card->qdio.no_out_queues == 1)) {
1322 qeth_free_qdio_buffers(card);
1323 card->qdio.default_out_queue = 2;
1324 }
1325 card->qdio.no_out_queues = 4;
1326 }
1327
1328 static void qeth_update_from_chp_desc(struct qeth_card *card)
1329 {
1330 struct ccw_device *ccwdev;
1331 struct channel_path_desc_fmt0 *chp_dsc;
1332
1333 QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1334
1335 ccwdev = card->data.ccwdev;
1336 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1337 if (!chp_dsc)
1338 goto out;
1339
1340 card->info.func_level = 0x4100 + chp_dsc->desc;
1341 if (card->info.type == QETH_CARD_TYPE_IQD)
1342 goto out;
1343
1344 /* CHPP field bit 6 == 1 -> single queue */
1345 if ((chp_dsc->chpp & 0x02) == 0x02)
1346 qeth_set_single_write_queues(card);
1347 else
1348 qeth_set_multiple_write_queues(card);
1349 out:
1350 kfree(chp_dsc);
1351 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1352 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1353 }
1354
1355 static void qeth_init_qdio_info(struct qeth_card *card)
1356 {
1357 QETH_DBF_TEXT(SETUP, 4, "intqdinf");
1358 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1359 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1360 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1361 card->qdio.no_out_queues = QETH_MAX_QUEUES;
1362
1363 /* inbound */
1364 card->qdio.no_in_queues = 1;
1365 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1366 if (card->info.type == QETH_CARD_TYPE_IQD)
1367 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1368 else
1369 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1370 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1371 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1372 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1373 }
1374
1375 static void qeth_set_initial_options(struct qeth_card *card)
1376 {
1377 card->options.route4.type = NO_ROUTER;
1378 card->options.route6.type = NO_ROUTER;
1379 card->options.rx_sg_cb = QETH_RX_SG_CB;
1380 card->options.isolation = ISOLATION_MODE_NONE;
1381 card->options.cq = QETH_CQ_DISABLED;
1382 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1383 }
1384
1385 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1386 {
1387 unsigned long flags;
1388 int rc = 0;
1389
1390 spin_lock_irqsave(&card->thread_mask_lock, flags);
1391 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1392 (u8) card->thread_start_mask,
1393 (u8) card->thread_allowed_mask,
1394 (u8) card->thread_running_mask);
1395 rc = (card->thread_start_mask & thread);
1396 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1397 return rc;
1398 }
1399
1400 static void qeth_start_kernel_thread(struct work_struct *work)
1401 {
1402 struct task_struct *ts;
1403 struct qeth_card *card = container_of(work, struct qeth_card,
1404 kernel_thread_starter);
1405 QETH_CARD_TEXT(card , 2, "strthrd");
1406
1407 if (card->read.state != CH_STATE_UP &&
1408 card->write.state != CH_STATE_UP)
1409 return;
1410 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1411 ts = kthread_run(card->discipline->recover, (void *)card,
1412 "qeth_recover");
1413 if (IS_ERR(ts)) {
1414 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1415 qeth_clear_thread_running_bit(card,
1416 QETH_RECOVER_THREAD);
1417 }
1418 }
1419 }
1420
1421 static void qeth_buffer_reclaim_work(struct work_struct *);
1422 static void qeth_setup_card(struct qeth_card *card)
1423 {
1424 QETH_DBF_TEXT(SETUP, 2, "setupcrd");
1425 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1426
1427 card->info.type = CARD_RDEV(card)->id.driver_info;
1428 card->state = CARD_STATE_DOWN;
1429 spin_lock_init(&card->mclock);
1430 spin_lock_init(&card->lock);
1431 spin_lock_init(&card->ip_lock);
1432 spin_lock_init(&card->thread_mask_lock);
1433 mutex_init(&card->conf_mutex);
1434 mutex_init(&card->discipline_mutex);
1435 mutex_init(&card->vid_list_mutex);
1436 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1437 INIT_LIST_HEAD(&card->cmd_waiter_list);
1438 init_waitqueue_head(&card->wait_q);
1439 qeth_set_initial_options(card);
1440 /* IP address takeover */
1441 INIT_LIST_HEAD(&card->ipato.entries);
1442 qeth_init_qdio_info(card);
1443 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1444 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1445 }
1446
1447 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1448 {
1449 struct qeth_card *card = container_of(slr, struct qeth_card,
1450 qeth_service_level);
1451 if (card->info.mcl_level[0])
1452 seq_printf(m, "qeth: %s firmware level %s\n",
1453 CARD_BUS_ID(card), card->info.mcl_level);
1454 }
1455
1456 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1457 {
1458 struct qeth_card *card;
1459
1460 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1461 card = kzalloc(sizeof(*card), GFP_KERNEL);
1462 if (!card)
1463 goto out;
1464 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1465
1466 card->gdev = gdev;
1467 dev_set_drvdata(&gdev->dev, card);
1468 CARD_RDEV(card) = gdev->cdev[0];
1469 CARD_WDEV(card) = gdev->cdev[1];
1470 CARD_DDEV(card) = gdev->cdev[2];
1471
1472 card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
1473 if (!card->event_wq)
1474 goto out_wq;
1475 if (qeth_setup_channel(&card->read, true))
1476 goto out_ip;
1477 if (qeth_setup_channel(&card->write, true))
1478 goto out_channel;
1479 if (qeth_setup_channel(&card->data, false))
1480 goto out_data;
1481 card->qeth_service_level.seq_print = qeth_core_sl_print;
1482 register_service_level(&card->qeth_service_level);
1483 return card;
1484
1485 out_data:
1486 qeth_clean_channel(&card->write);
1487 out_channel:
1488 qeth_clean_channel(&card->read);
1489 out_ip:
1490 destroy_workqueue(card->event_wq);
1491 out_wq:
1492 dev_set_drvdata(&gdev->dev, NULL);
1493 kfree(card);
1494 out:
1495 return NULL;
1496 }
1497
1498 static int qeth_clear_channel(struct qeth_card *card,
1499 struct qeth_channel *channel)
1500 {
1501 int rc;
1502
1503 QETH_CARD_TEXT(card, 3, "clearch");
1504 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1505 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1506 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1507
1508 if (rc)
1509 return rc;
1510 rc = wait_event_interruptible_timeout(card->wait_q,
1511 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1512 if (rc == -ERESTARTSYS)
1513 return rc;
1514 if (channel->state != CH_STATE_STOPPED)
1515 return -ETIME;
1516 channel->state = CH_STATE_DOWN;
1517 return 0;
1518 }
1519
1520 static int qeth_halt_channel(struct qeth_card *card,
1521 struct qeth_channel *channel)
1522 {
1523 int rc;
1524
1525 QETH_CARD_TEXT(card, 3, "haltch");
1526 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1527 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1528 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1529
1530 if (rc)
1531 return rc;
1532 rc = wait_event_interruptible_timeout(card->wait_q,
1533 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1534 if (rc == -ERESTARTSYS)
1535 return rc;
1536 if (channel->state != CH_STATE_HALTED)
1537 return -ETIME;
1538 return 0;
1539 }
1540
1541 static int qeth_halt_channels(struct qeth_card *card)
1542 {
1543 int rc1 = 0, rc2 = 0, rc3 = 0;
1544
1545 QETH_CARD_TEXT(card, 3, "haltchs");
1546 rc1 = qeth_halt_channel(card, &card->read);
1547 rc2 = qeth_halt_channel(card, &card->write);
1548 rc3 = qeth_halt_channel(card, &card->data);
1549 if (rc1)
1550 return rc1;
1551 if (rc2)
1552 return rc2;
1553 return rc3;
1554 }
1555
1556 static int qeth_clear_channels(struct qeth_card *card)
1557 {
1558 int rc1 = 0, rc2 = 0, rc3 = 0;
1559
1560 QETH_CARD_TEXT(card, 3, "clearchs");
1561 rc1 = qeth_clear_channel(card, &card->read);
1562 rc2 = qeth_clear_channel(card, &card->write);
1563 rc3 = qeth_clear_channel(card, &card->data);
1564 if (rc1)
1565 return rc1;
1566 if (rc2)
1567 return rc2;
1568 return rc3;
1569 }
1570
1571 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1572 {
1573 int rc = 0;
1574
1575 QETH_CARD_TEXT(card, 3, "clhacrd");
1576
1577 if (halt)
1578 rc = qeth_halt_channels(card);
1579 if (rc)
1580 return rc;
1581 return qeth_clear_channels(card);
1582 }
1583
1584 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1585 {
1586 int rc = 0;
1587
1588 QETH_CARD_TEXT(card, 3, "qdioclr");
1589 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1590 QETH_QDIO_CLEANING)) {
1591 case QETH_QDIO_ESTABLISHED:
1592 if (card->info.type == QETH_CARD_TYPE_IQD)
1593 rc = qdio_shutdown(CARD_DDEV(card),
1594 QDIO_FLAG_CLEANUP_USING_HALT);
1595 else
1596 rc = qdio_shutdown(CARD_DDEV(card),
1597 QDIO_FLAG_CLEANUP_USING_CLEAR);
1598 if (rc)
1599 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1600 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1601 break;
1602 case QETH_QDIO_CLEANING:
1603 return rc;
1604 default:
1605 break;
1606 }
1607 rc = qeth_clear_halt_card(card, use_halt);
1608 if (rc)
1609 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1610 card->state = CARD_STATE_DOWN;
1611 return rc;
1612 }
1613 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1614
1615 static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1616 int *length)
1617 {
1618 struct ciw *ciw;
1619 char *rcd_buf;
1620 int ret;
1621 struct qeth_channel *channel = &card->data;
1622
1623 /*
1624 * scan for RCD command in extended SenseID data
1625 */
1626 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1627 if (!ciw || ciw->cmd == 0)
1628 return -EOPNOTSUPP;
1629 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1630 if (!rcd_buf)
1631 return -ENOMEM;
1632
1633 qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
1634 channel->state = CH_STATE_RCD;
1635 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1636 ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1637 QETH_RCD_PARM, LPM_ANYPATH, 0,
1638 QETH_RCD_TIMEOUT);
1639 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1640 if (!ret)
1641 wait_event(card->wait_q,
1642 (channel->state == CH_STATE_RCD_DONE ||
1643 channel->state == CH_STATE_DOWN));
1644 if (channel->state == CH_STATE_DOWN)
1645 ret = -EIO;
1646 else
1647 channel->state = CH_STATE_DOWN;
1648 if (ret) {
1649 kfree(rcd_buf);
1650 *buffer = NULL;
1651 *length = 0;
1652 } else {
1653 *length = ciw->count;
1654 *buffer = rcd_buf;
1655 }
1656 return ret;
1657 }
1658
1659 static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
1660 {
1661 QETH_DBF_TEXT(SETUP, 2, "cfgunit");
1662 card->info.chpid = prcd[30];
1663 card->info.unit_addr2 = prcd[31];
1664 card->info.cula = prcd[63];
1665 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1666 (prcd[0x11] == _ascebc['M']));
1667 }
1668
1669 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1670 {
1671 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1672 struct diag26c_vnic_resp *response = NULL;
1673 struct diag26c_vnic_req *request = NULL;
1674 struct ccw_dev_id id;
1675 char userid[80];
1676 int rc = 0;
1677
1678 QETH_DBF_TEXT(SETUP, 2, "vmlayer");
1679
1680 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1681 if (rc)
1682 goto out;
1683
1684 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1685 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1686 if (!request || !response) {
1687 rc = -ENOMEM;
1688 goto out;
1689 }
1690
1691 ccw_device_get_id(CARD_RDEV(card), &id);
1692 request->resp_buf_len = sizeof(*response);
1693 request->resp_version = DIAG26C_VERSION6_VM65918;
1694 request->req_format = DIAG26C_VNIC_INFO;
1695 ASCEBC(userid, 8);
1696 memcpy(&request->sys_name, userid, 8);
1697 request->devno = id.devno;
1698
1699 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1700 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1701 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1702 if (rc)
1703 goto out;
1704 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1705
1706 if (request->resp_buf_len < sizeof(*response) ||
1707 response->version != request->resp_version) {
1708 rc = -EIO;
1709 goto out;
1710 }
1711
1712 if (response->protocol == VNIC_INFO_PROT_L2)
1713 disc = QETH_DISCIPLINE_LAYER2;
1714 else if (response->protocol == VNIC_INFO_PROT_L3)
1715 disc = QETH_DISCIPLINE_LAYER3;
1716
1717 out:
1718 kfree(response);
1719 kfree(request);
1720 if (rc)
1721 QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
1722 return disc;
1723 }
1724
1725 /* Determine whether the device requires a specific layer discipline */
1726 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1727 {
1728 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1729
1730 if (card->info.type == QETH_CARD_TYPE_OSM ||
1731 card->info.type == QETH_CARD_TYPE_OSN)
1732 disc = QETH_DISCIPLINE_LAYER2;
1733 else if (card->info.guestlan)
1734 disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
1735 QETH_DISCIPLINE_LAYER3 :
1736 qeth_vm_detect_layer(card);
1737
1738 switch (disc) {
1739 case QETH_DISCIPLINE_LAYER2:
1740 QETH_DBF_TEXT(SETUP, 3, "force l2");
1741 break;
1742 case QETH_DISCIPLINE_LAYER3:
1743 QETH_DBF_TEXT(SETUP, 3, "force l3");
1744 break;
1745 default:
1746 QETH_DBF_TEXT(SETUP, 3, "force no");
1747 }
1748
1749 return disc;
1750 }
1751
1752 static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1753 {
1754 QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1755
1756 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
1757 prcd[76] >= 0xF1 && prcd[76] <= 0xF4) {
1758 card->info.blkt.time_total = 0;
1759 card->info.blkt.inter_packet = 0;
1760 card->info.blkt.inter_packet_jumbo = 0;
1761 } else {
1762 card->info.blkt.time_total = 250;
1763 card->info.blkt.inter_packet = 5;
1764 card->info.blkt.inter_packet_jumbo = 15;
1765 }
1766 }
1767
1768 static void qeth_init_tokens(struct qeth_card *card)
1769 {
1770 card->token.issuer_rm_w = 0x00010103UL;
1771 card->token.cm_filter_w = 0x00010108UL;
1772 card->token.cm_connection_w = 0x0001010aUL;
1773 card->token.ulp_filter_w = 0x0001010bUL;
1774 card->token.ulp_connection_w = 0x0001010dUL;
1775 }
1776
1777 static void qeth_init_func_level(struct qeth_card *card)
1778 {
1779 switch (card->info.type) {
1780 case QETH_CARD_TYPE_IQD:
1781 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1782 break;
1783 case QETH_CARD_TYPE_OSD:
1784 case QETH_CARD_TYPE_OSN:
1785 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1786 break;
1787 default:
1788 break;
1789 }
1790 }
1791
1792 static int qeth_idx_activate_get_answer(struct qeth_card *card,
1793 struct qeth_channel *channel,
1794 void (*reply_cb)(struct qeth_card *,
1795 struct qeth_channel *,
1796 struct qeth_cmd_buffer *))
1797 {
1798 struct qeth_cmd_buffer *iob;
1799 int rc;
1800
1801 QETH_DBF_TEXT(SETUP, 2, "idxanswr");
1802 iob = qeth_get_buffer(channel);
1803 if (!iob)
1804 return -ENOMEM;
1805 iob->callback = reply_cb;
1806 qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
1807
1808 wait_event(card->wait_q,
1809 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1810 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1811 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1812 rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1813 (addr_t) iob, 0, 0, QETH_TIMEOUT);
1814 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1815
1816 if (rc) {
1817 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
1818 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
1819 atomic_set(&channel->irq_pending, 0);
1820 qeth_release_buffer(channel, iob);
1821 wake_up(&card->wait_q);
1822 return rc;
1823 }
1824 rc = wait_event_interruptible_timeout(card->wait_q,
1825 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1826 if (rc == -ERESTARTSYS)
1827 return rc;
1828 if (channel->state != CH_STATE_UP) {
1829 rc = -ETIME;
1830 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
1831 } else
1832 rc = 0;
1833 return rc;
1834 }
1835
1836 static int qeth_idx_activate_channel(struct qeth_card *card,
1837 struct qeth_channel *channel,
1838 void (*reply_cb)(struct qeth_card *,
1839 struct qeth_channel *,
1840 struct qeth_cmd_buffer *))
1841 {
1842 struct qeth_cmd_buffer *iob;
1843 __u16 temp;
1844 __u8 tmp;
1845 int rc;
1846 struct ccw_dev_id temp_devid;
1847
1848 QETH_DBF_TEXT(SETUP, 2, "idxactch");
1849
1850 iob = qeth_get_buffer(channel);
1851 if (!iob)
1852 return -ENOMEM;
1853 iob->callback = reply_cb;
1854 qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
1855 iob->data);
1856 if (channel == &card->write) {
1857 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1858 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1859 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1860 card->seqno.trans_hdr++;
1861 } else {
1862 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1863 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1864 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1865 }
1866 tmp = ((u8)card->dev->dev_port) | 0x80;
1867 memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
1868 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1869 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1870 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1871 &card->info.func_level, sizeof(__u16));
1872 ccw_device_get_id(CARD_DDEV(card), &temp_devid);
1873 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
1874 temp = (card->info.cula << 8) + card->info.unit_addr2;
1875 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1876
1877 wait_event(card->wait_q,
1878 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1879 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1880 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1881 rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1882 (addr_t) iob, 0, 0, QETH_TIMEOUT);
1883 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1884
1885 if (rc) {
1886 QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
1887 rc);
1888 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1889 atomic_set(&channel->irq_pending, 0);
1890 qeth_release_buffer(channel, iob);
1891 wake_up(&card->wait_q);
1892 return rc;
1893 }
1894 rc = wait_event_interruptible_timeout(card->wait_q,
1895 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1896 if (rc == -ERESTARTSYS)
1897 return rc;
1898 if (channel->state != CH_STATE_ACTIVATING) {
1899 dev_warn(&channel->ccwdev->dev, "The qeth device driver"
1900 " failed to recover an error on the device\n");
1901 QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
1902 CCW_DEVID(channel->ccwdev));
1903 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1904 return -ETIME;
1905 }
1906 return qeth_idx_activate_get_answer(card, channel, reply_cb);
1907 }
1908
1909 static int qeth_peer_func_level(int level)
1910 {
1911 if ((level & 0xff) == 8)
1912 return (level & 0xff) + 0x400;
1913 if (((level >> 8) & 3) == 1)
1914 return (level & 0xff) + 0x200;
1915 return level;
1916 }
1917
1918 static void qeth_idx_write_cb(struct qeth_card *card,
1919 struct qeth_channel *channel,
1920 struct qeth_cmd_buffer *iob)
1921 {
1922 __u16 temp;
1923
1924 QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
1925
1926 if (channel->state == CH_STATE_DOWN) {
1927 channel->state = CH_STATE_ACTIVATING;
1928 goto out;
1929 }
1930
1931 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1932 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
1933 dev_err(&channel->ccwdev->dev,
1934 "The adapter is used exclusively by another "
1935 "host\n");
1936 else
1937 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1938 CCW_DEVID(channel->ccwdev));
1939 goto out;
1940 }
1941 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1942 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1943 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1944 CCW_DEVID(channel->ccwdev),
1945 card->info.func_level, temp);
1946 goto out;
1947 }
1948 channel->state = CH_STATE_UP;
1949 out:
1950 qeth_release_buffer(channel, iob);
1951 }
1952
1953 static void qeth_idx_read_cb(struct qeth_card *card,
1954 struct qeth_channel *channel,
1955 struct qeth_cmd_buffer *iob)
1956 {
1957 __u16 temp;
1958
1959 QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
1960 if (channel->state == CH_STATE_DOWN) {
1961 channel->state = CH_STATE_ACTIVATING;
1962 goto out;
1963 }
1964
1965 if (qeth_check_idx_response(card, iob->data))
1966 goto out;
1967
1968 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1969 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1970 case QETH_IDX_ACT_ERR_EXCL:
1971 dev_err(&channel->ccwdev->dev,
1972 "The adapter is used exclusively by another "
1973 "host\n");
1974 break;
1975 case QETH_IDX_ACT_ERR_AUTH:
1976 case QETH_IDX_ACT_ERR_AUTH_USER:
1977 dev_err(&channel->ccwdev->dev,
1978 "Setting the device online failed because of "
1979 "insufficient authorization\n");
1980 break;
1981 default:
1982 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1983 CCW_DEVID(channel->ccwdev));
1984 }
1985 QETH_CARD_TEXT_(card, 2, "idxread%c",
1986 QETH_IDX_ACT_CAUSE_CODE(iob->data));
1987 goto out;
1988 }
1989
1990 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1991 if (temp != qeth_peer_func_level(card->info.func_level)) {
1992 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1993 CCW_DEVID(channel->ccwdev),
1994 card->info.func_level, temp);
1995 goto out;
1996 }
1997 memcpy(&card->token.issuer_rm_r,
1998 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1999 QETH_MPC_TOKEN_LENGTH);
2000 memcpy(&card->info.mcl_level[0],
2001 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2002 channel->state = CH_STATE_UP;
2003 out:
2004 qeth_release_buffer(channel, iob);
2005 }
2006
2007 void qeth_prepare_control_data(struct qeth_card *card, int len,
2008 struct qeth_cmd_buffer *iob)
2009 {
2010 qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
2011 iob->callback = qeth_release_buffer_cb;
2012
2013 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
2014 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
2015 card->seqno.trans_hdr++;
2016 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
2017 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
2018 card->seqno.pdu_hdr++;
2019 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
2020 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2021 QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
2022 }
2023 EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
2024
2025 /**
2026 * qeth_send_control_data() - send control command to the card
2027 * @card: qeth_card structure pointer
2028 * @len: size of the command buffer
2029 * @iob: qeth_cmd_buffer pointer
2030 * @reply_cb: callback function pointer
2031 * @cb_card: pointer to the qeth_card structure
2032 * @cb_reply: pointer to the qeth_reply structure
2033 * @cb_cmd: pointer to the original iob for non-IPA
2034 * commands, or to the qeth_ipa_cmd structure
2035 * for the IPA commands.
2036 * @reply_param: private pointer passed to the callback
2037 *
2038 * Returns the value of the `return_code' field of the response
2039 * block returned from the hardware, or other error indication.
2040 * Value of zero indicates successful execution of the command.
2041 *
2042 * Callback function gets called one or more times, with cb_cmd
2043 * pointing to the response returned by the hardware. Callback
2044 * function must return non-zero if more reply blocks are expected,
2045 * and zero if the last or only reply block is received. Callback
2046 * function can get the value of the reply_param pointer from the
2047 * field 'param' of the structure qeth_reply.
2048 */
2049
2050 int qeth_send_control_data(struct qeth_card *card, int len,
2051 struct qeth_cmd_buffer *iob,
2052 int (*reply_cb)(struct qeth_card *cb_card,
2053 struct qeth_reply *cb_reply,
2054 unsigned long cb_cmd),
2055 void *reply_param)
2056 {
2057 struct qeth_channel *channel = iob->channel;
2058 int rc;
2059 struct qeth_reply *reply = NULL;
2060 unsigned long timeout, event_timeout;
2061 struct qeth_ipa_cmd *cmd = NULL;
2062
2063 QETH_CARD_TEXT(card, 2, "sendctl");
2064
2065 if (card->read_or_write_problem) {
2066 qeth_release_buffer(channel, iob);
2067 return -EIO;
2068 }
2069 reply = qeth_alloc_reply(card);
2070 if (!reply) {
2071 qeth_release_buffer(channel, iob);
2072 return -ENOMEM;
2073 }
2074 reply->callback = reply_cb;
2075 reply->param = reply_param;
2076
2077 init_waitqueue_head(&reply->wait_q);
2078
2079 while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
2080
2081 if (IS_IPA(iob->data)) {
2082 cmd = __ipa_cmd(iob);
2083 cmd->hdr.seqno = card->seqno.ipa++;
2084 reply->seqno = cmd->hdr.seqno;
2085 event_timeout = QETH_IPA_TIMEOUT;
2086 } else {
2087 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2088 event_timeout = QETH_TIMEOUT;
2089 }
2090 qeth_prepare_control_data(card, len, iob);
2091
2092 spin_lock_irq(&card->lock);
2093 list_add_tail(&reply->list, &card->cmd_waiter_list);
2094 spin_unlock_irq(&card->lock);
2095
2096 timeout = jiffies + event_timeout;
2097
2098 QETH_CARD_TEXT(card, 6, "noirqpnd");
2099 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2100 rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
2101 (addr_t) iob, 0, 0, event_timeout);
2102 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2103 if (rc) {
2104 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2105 CARD_DEVID(card), rc);
2106 QETH_CARD_TEXT_(card, 2, " err%d", rc);
2107 spin_lock_irq(&card->lock);
2108 list_del_init(&reply->list);
2109 qeth_put_reply(reply);
2110 spin_unlock_irq(&card->lock);
2111 qeth_release_buffer(channel, iob);
2112 atomic_set(&channel->irq_pending, 0);
2113 wake_up(&card->wait_q);
2114 return rc;
2115 }
2116
2117 /* we have only one long running ipassist, since we can ensure
2118 process context of this command we can sleep */
2119 if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
2120 cmd->hdr.prot_version == QETH_PROT_IPV4) {
2121 if (!wait_event_timeout(reply->wait_q,
2122 atomic_read(&reply->received), event_timeout))
2123 goto time_err;
2124 } else {
2125 while (!atomic_read(&reply->received)) {
2126 if (time_after(jiffies, timeout))
2127 goto time_err;
2128 cpu_relax();
2129 }
2130 }
2131
2132 rc = reply->rc;
2133 qeth_put_reply(reply);
2134 return rc;
2135
2136 time_err:
2137 reply->rc = -ETIME;
2138 spin_lock_irq(&card->lock);
2139 list_del_init(&reply->list);
2140 spin_unlock_irq(&card->lock);
2141 atomic_inc(&reply->received);
2142 rc = reply->rc;
2143 qeth_put_reply(reply);
2144 return rc;
2145 }
2146 EXPORT_SYMBOL_GPL(qeth_send_control_data);
2147
2148 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2149 unsigned long data)
2150 {
2151 struct qeth_cmd_buffer *iob;
2152
2153 QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
2154
2155 iob = (struct qeth_cmd_buffer *) data;
2156 memcpy(&card->token.cm_filter_r,
2157 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2158 QETH_MPC_TOKEN_LENGTH);
2159 return 0;
2160 }
2161
2162 static int qeth_cm_enable(struct qeth_card *card)
2163 {
2164 int rc;
2165 struct qeth_cmd_buffer *iob;
2166
2167 QETH_DBF_TEXT(SETUP, 2, "cmenable");
2168
2169 iob = qeth_wait_for_buffer(&card->write);
2170 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
2171 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2172 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2173 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2174 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2175
2176 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
2177 qeth_cm_enable_cb, NULL);
2178 return rc;
2179 }
2180
2181 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2182 unsigned long data)
2183 {
2184 struct qeth_cmd_buffer *iob;
2185
2186 QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
2187
2188 iob = (struct qeth_cmd_buffer *) data;
2189 memcpy(&card->token.cm_connection_r,
2190 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2191 QETH_MPC_TOKEN_LENGTH);
2192 return 0;
2193 }
2194
2195 static int qeth_cm_setup(struct qeth_card *card)
2196 {
2197 int rc;
2198 struct qeth_cmd_buffer *iob;
2199
2200 QETH_DBF_TEXT(SETUP, 2, "cmsetup");
2201
2202 iob = qeth_wait_for_buffer(&card->write);
2203 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
2204 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2205 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2206 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2207 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2208 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2209 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2210 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
2211 qeth_cm_setup_cb, NULL);
2212 return rc;
2213 }
2214
2215 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2216 {
2217 struct net_device *dev = card->dev;
2218 unsigned int new_mtu;
2219
2220 if (!max_mtu) {
2221 /* IQD needs accurate max MTU to set up its RX buffers: */
2222 if (IS_IQD(card))
2223 return -EINVAL;
2224 /* tolerate quirky HW: */
2225 max_mtu = ETH_MAX_MTU;
2226 }
2227
2228 rtnl_lock();
2229 if (IS_IQD(card)) {
2230 /* move any device with default MTU to new max MTU: */
2231 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2232
2233 /* adjust RX buffer size to new max MTU: */
2234 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2235 if (dev->max_mtu && dev->max_mtu != max_mtu)
2236 qeth_free_qdio_buffers(card);
2237 } else {
2238 if (dev->mtu)
2239 new_mtu = dev->mtu;
2240 /* default MTUs for first setup: */
2241 else if (IS_LAYER2(card))
2242 new_mtu = ETH_DATA_LEN;
2243 else
2244 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2245 }
2246
2247 dev->max_mtu = max_mtu;
2248 dev->mtu = min(new_mtu, max_mtu);
2249 rtnl_unlock();
2250 return 0;
2251 }
2252
2253 static int qeth_get_mtu_outof_framesize(int framesize)
2254 {
2255 switch (framesize) {
2256 case 0x4000:
2257 return 8192;
2258 case 0x6000:
2259 return 16384;
2260 case 0xa000:
2261 return 32768;
2262 case 0xffff:
2263 return 57344;
2264 default:
2265 return 0;
2266 }
2267 }
2268
2269 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2270 unsigned long data)
2271 {
2272 __u16 mtu, framesize;
2273 __u16 len;
2274 __u8 link_type;
2275 struct qeth_cmd_buffer *iob;
2276
2277 QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
2278
2279 iob = (struct qeth_cmd_buffer *) data;
2280 memcpy(&card->token.ulp_filter_r,
2281 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2282 QETH_MPC_TOKEN_LENGTH);
2283 if (card->info.type == QETH_CARD_TYPE_IQD) {
2284 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2285 mtu = qeth_get_mtu_outof_framesize(framesize);
2286 } else {
2287 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2288 }
2289 *(u16 *)reply->param = mtu;
2290
2291 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2292 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2293 memcpy(&link_type,
2294 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2295 card->info.link_type = link_type;
2296 } else
2297 card->info.link_type = 0;
2298 QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
2299 return 0;
2300 }
2301
2302 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2303 {
2304 if (IS_OSN(card))
2305 return QETH_PROT_OSN2;
2306 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2307 }
2308
2309 static int qeth_ulp_enable(struct qeth_card *card)
2310 {
2311 u8 prot_type = qeth_mpc_select_prot_type(card);
2312 struct qeth_cmd_buffer *iob;
2313 u16 max_mtu;
2314 int rc;
2315
2316 /*FIXME: trace view callbacks*/
2317 QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
2318
2319 iob = qeth_wait_for_buffer(&card->write);
2320 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2321
2322 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2323 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2324 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2325 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2326 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2327 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2328 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2329 qeth_ulp_enable_cb, &max_mtu);
2330 if (rc)
2331 return rc;
2332 return qeth_update_max_mtu(card, max_mtu);
2333 }
2334
2335 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2336 unsigned long data)
2337 {
2338 struct qeth_cmd_buffer *iob;
2339
2340 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
2341
2342 iob = (struct qeth_cmd_buffer *) data;
2343 memcpy(&card->token.ulp_connection_r,
2344 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2345 QETH_MPC_TOKEN_LENGTH);
2346 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2347 3)) {
2348 QETH_DBF_TEXT(SETUP, 2, "olmlimit");
2349 dev_err(&card->gdev->dev, "A connection could not be "
2350 "established because of an OLM limit\n");
2351 iob->rc = -EMLINK;
2352 }
2353 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
2354 return 0;
2355 }
2356
2357 static int qeth_ulp_setup(struct qeth_card *card)
2358 {
2359 int rc;
2360 __u16 temp;
2361 struct qeth_cmd_buffer *iob;
2362 struct ccw_dev_id dev_id;
2363
2364 QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
2365
2366 iob = qeth_wait_for_buffer(&card->write);
2367 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2368
2369 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2370 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2371 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2372 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2373 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2374 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2375
2376 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2377 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2378 temp = (card->info.cula << 8) + card->info.unit_addr2;
2379 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2380 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2381 qeth_ulp_setup_cb, NULL);
2382 return rc;
2383 }
2384
2385 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2386 {
2387 struct qeth_qdio_out_buffer *newbuf;
2388
2389 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2390 if (!newbuf)
2391 return -ENOMEM;
2392
2393 newbuf->buffer = q->qdio_bufs[bidx];
2394 skb_queue_head_init(&newbuf->skb_list);
2395 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2396 newbuf->q = q;
2397 newbuf->next_pending = q->bufs[bidx];
2398 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2399 q->bufs[bidx] = newbuf;
2400 return 0;
2401 }
2402
2403 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2404 {
2405 if (!q)
2406 return;
2407
2408 qeth_clear_outq_buffers(q, 1);
2409 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2410 kfree(q);
2411 }
2412
2413 static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
2414 {
2415 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2416
2417 if (!q)
2418 return NULL;
2419
2420 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2421 kfree(q);
2422 return NULL;
2423 }
2424 return q;
2425 }
2426
2427 static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2428 {
2429 int i, j;
2430
2431 QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
2432
2433 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2434 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2435 return 0;
2436
2437 QETH_DBF_TEXT(SETUP, 2, "inq");
2438 card->qdio.in_q = qeth_alloc_qdio_queue();
2439 if (!card->qdio.in_q)
2440 goto out_nomem;
2441
2442 /* inbound buffer pool */
2443 if (qeth_alloc_buffer_pool(card))
2444 goto out_freeinq;
2445
2446 /* outbound */
2447 card->qdio.out_qs =
2448 kcalloc(card->qdio.no_out_queues,
2449 sizeof(struct qeth_qdio_out_q *),
2450 GFP_KERNEL);
2451 if (!card->qdio.out_qs)
2452 goto out_freepool;
2453 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2454 card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
2455 if (!card->qdio.out_qs[i])
2456 goto out_freeoutq;
2457 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
2458 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
2459 card->qdio.out_qs[i]->queue_no = i;
2460 /* give outbound qeth_qdio_buffers their qdio_buffers */
2461 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2462 WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
2463 if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
2464 goto out_freeoutqbufs;
2465 }
2466 }
2467
2468 /* completion */
2469 if (qeth_alloc_cq(card))
2470 goto out_freeoutq;
2471
2472 return 0;
2473
2474 out_freeoutqbufs:
2475 while (j > 0) {
2476 --j;
2477 kmem_cache_free(qeth_qdio_outbuf_cache,
2478 card->qdio.out_qs[i]->bufs[j]);
2479 card->qdio.out_qs[i]->bufs[j] = NULL;
2480 }
2481 out_freeoutq:
2482 while (i > 0)
2483 qeth_free_output_queue(card->qdio.out_qs[--i]);
2484 kfree(card->qdio.out_qs);
2485 card->qdio.out_qs = NULL;
2486 out_freepool:
2487 qeth_free_buffer_pool(card);
2488 out_freeinq:
2489 qeth_free_qdio_queue(card->qdio.in_q);
2490 card->qdio.in_q = NULL;
2491 out_nomem:
2492 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2493 return -ENOMEM;
2494 }
2495
2496 static void qeth_free_qdio_buffers(struct qeth_card *card)
2497 {
2498 int i, j;
2499
2500 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2501 QETH_QDIO_UNINITIALIZED)
2502 return;
2503
2504 qeth_free_cq(card);
2505 cancel_delayed_work_sync(&card->buffer_reclaim_work);
2506 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2507 if (card->qdio.in_q->bufs[j].rx_skb)
2508 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2509 }
2510 qeth_free_qdio_queue(card->qdio.in_q);
2511 card->qdio.in_q = NULL;
2512 /* inbound buffer pool */
2513 qeth_free_buffer_pool(card);
2514 /* free outbound qdio_qs */
2515 if (card->qdio.out_qs) {
2516 for (i = 0; i < card->qdio.no_out_queues; i++)
2517 qeth_free_output_queue(card->qdio.out_qs[i]);
2518 kfree(card->qdio.out_qs);
2519 card->qdio.out_qs = NULL;
2520 }
2521 }
2522
2523 static void qeth_create_qib_param_field(struct qeth_card *card,
2524 char *param_field)
2525 {
2526
2527 param_field[0] = _ascebc['P'];
2528 param_field[1] = _ascebc['C'];
2529 param_field[2] = _ascebc['I'];
2530 param_field[3] = _ascebc['T'];
2531 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2532 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2533 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2534 }
2535
2536 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2537 char *param_field)
2538 {
2539 param_field[16] = _ascebc['B'];
2540 param_field[17] = _ascebc['L'];
2541 param_field[18] = _ascebc['K'];
2542 param_field[19] = _ascebc['T'];
2543 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2544 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2545 *((unsigned int *) (&param_field[28])) =
2546 card->info.blkt.inter_packet_jumbo;
2547 }
2548
2549 static int qeth_qdio_activate(struct qeth_card *card)
2550 {
2551 QETH_DBF_TEXT(SETUP, 3, "qdioact");
2552 return qdio_activate(CARD_DDEV(card));
2553 }
2554
2555 static int qeth_dm_act(struct qeth_card *card)
2556 {
2557 int rc;
2558 struct qeth_cmd_buffer *iob;
2559
2560 QETH_DBF_TEXT(SETUP, 2, "dmact");
2561
2562 iob = qeth_wait_for_buffer(&card->write);
2563 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
2564
2565 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2566 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2567 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2568 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2569 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
2570 return rc;
2571 }
2572
2573 static int qeth_mpc_initialize(struct qeth_card *card)
2574 {
2575 int rc;
2576
2577 QETH_DBF_TEXT(SETUP, 2, "mpcinit");
2578
2579 rc = qeth_issue_next_read(card);
2580 if (rc) {
2581 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2582 return rc;
2583 }
2584 rc = qeth_cm_enable(card);
2585 if (rc) {
2586 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2587 goto out_qdio;
2588 }
2589 rc = qeth_cm_setup(card);
2590 if (rc) {
2591 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
2592 goto out_qdio;
2593 }
2594 rc = qeth_ulp_enable(card);
2595 if (rc) {
2596 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
2597 goto out_qdio;
2598 }
2599 rc = qeth_ulp_setup(card);
2600 if (rc) {
2601 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2602 goto out_qdio;
2603 }
2604 rc = qeth_alloc_qdio_buffers(card);
2605 if (rc) {
2606 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2607 goto out_qdio;
2608 }
2609 rc = qeth_qdio_establish(card);
2610 if (rc) {
2611 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
2612 qeth_free_qdio_buffers(card);
2613 goto out_qdio;
2614 }
2615 rc = qeth_qdio_activate(card);
2616 if (rc) {
2617 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
2618 goto out_qdio;
2619 }
2620 rc = qeth_dm_act(card);
2621 if (rc) {
2622 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
2623 goto out_qdio;
2624 }
2625
2626 return 0;
2627 out_qdio:
2628 qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
2629 qdio_free(CARD_DDEV(card));
2630 return rc;
2631 }
2632
2633 void qeth_print_status_message(struct qeth_card *card)
2634 {
2635 switch (card->info.type) {
2636 case QETH_CARD_TYPE_OSD:
2637 case QETH_CARD_TYPE_OSM:
2638 case QETH_CARD_TYPE_OSX:
2639 /* VM will use a non-zero first character
2640 * to indicate a HiperSockets like reporting
2641 * of the level OSA sets the first character to zero
2642 * */
2643 if (!card->info.mcl_level[0]) {
2644 sprintf(card->info.mcl_level, "%02x%02x",
2645 card->info.mcl_level[2],
2646 card->info.mcl_level[3]);
2647 break;
2648 }
2649 /* fallthrough */
2650 case QETH_CARD_TYPE_IQD:
2651 if ((card->info.guestlan) ||
2652 (card->info.mcl_level[0] & 0x80)) {
2653 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2654 card->info.mcl_level[0]];
2655 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2656 card->info.mcl_level[1]];
2657 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2658 card->info.mcl_level[2]];
2659 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2660 card->info.mcl_level[3]];
2661 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2662 }
2663 break;
2664 default:
2665 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2666 }
2667 dev_info(&card->gdev->dev,
2668 "Device is a%s card%s%s%s\nwith link type %s.\n",
2669 qeth_get_cardname(card),
2670 (card->info.mcl_level[0]) ? " (level: " : "",
2671 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2672 (card->info.mcl_level[0]) ? ")" : "",
2673 qeth_get_cardname_short(card));
2674 }
2675 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2676
2677 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2678 {
2679 struct qeth_buffer_pool_entry *entry;
2680
2681 QETH_CARD_TEXT(card, 5, "inwrklst");
2682
2683 list_for_each_entry(entry,
2684 &card->qdio.init_pool.entry_list, init_list) {
2685 qeth_put_buffer_pool_entry(card, entry);
2686 }
2687 }
2688
2689 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2690 struct qeth_card *card)
2691 {
2692 struct list_head *plh;
2693 struct qeth_buffer_pool_entry *entry;
2694 int i, free;
2695 struct page *page;
2696
2697 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2698 return NULL;
2699
2700 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2701 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2702 free = 1;
2703 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2704 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2705 free = 0;
2706 break;
2707 }
2708 }
2709 if (free) {
2710 list_del_init(&entry->list);
2711 return entry;
2712 }
2713 }
2714
2715 /* no free buffer in pool so take first one and swap pages */
2716 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2717 struct qeth_buffer_pool_entry, list);
2718 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2719 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2720 page = alloc_page(GFP_ATOMIC);
2721 if (!page) {
2722 return NULL;
2723 } else {
2724 free_page((unsigned long)entry->elements[i]);
2725 entry->elements[i] = page_address(page);
2726 if (card->options.performance_stats)
2727 card->perf_stats.sg_alloc_page_rx++;
2728 }
2729 }
2730 }
2731 list_del_init(&entry->list);
2732 return entry;
2733 }
2734
2735 static int qeth_init_input_buffer(struct qeth_card *card,
2736 struct qeth_qdio_buffer *buf)
2737 {
2738 struct qeth_buffer_pool_entry *pool_entry;
2739 int i;
2740
2741 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2742 buf->rx_skb = netdev_alloc_skb(card->dev,
2743 QETH_RX_PULL_LEN + ETH_HLEN);
2744 if (!buf->rx_skb)
2745 return 1;
2746 }
2747
2748 pool_entry = qeth_find_free_buffer_pool_entry(card);
2749 if (!pool_entry)
2750 return 1;
2751
2752 /*
2753 * since the buffer is accessed only from the input_tasklet
2754 * there shouldn't be a need to synchronize; also, since we use
2755 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2756 * buffers
2757 */
2758
2759 buf->pool_entry = pool_entry;
2760 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2761 buf->buffer->element[i].length = PAGE_SIZE;
2762 buf->buffer->element[i].addr = pool_entry->elements[i];
2763 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2764 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2765 else
2766 buf->buffer->element[i].eflags = 0;
2767 buf->buffer->element[i].sflags = 0;
2768 }
2769 return 0;
2770 }
2771
2772 int qeth_init_qdio_queues(struct qeth_card *card)
2773 {
2774 int i, j;
2775 int rc;
2776
2777 QETH_DBF_TEXT(SETUP, 2, "initqdqs");
2778
2779 /* inbound queue */
2780 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2781 memset(&card->rx, 0, sizeof(struct qeth_rx));
2782 qeth_initialize_working_pool_list(card);
2783 /*give only as many buffers to hardware as we have buffer pool entries*/
2784 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2785 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2786 card->qdio.in_q->next_buf_to_init =
2787 card->qdio.in_buf_pool.buf_count - 1;
2788 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2789 card->qdio.in_buf_pool.buf_count - 1);
2790 if (rc) {
2791 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2792 return rc;
2793 }
2794
2795 /* completion */
2796 rc = qeth_cq_init(card);
2797 if (rc) {
2798 return rc;
2799 }
2800
2801 /* outbound queue */
2802 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2803 qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
2804 QDIO_MAX_BUFFERS_PER_Q);
2805 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2806 qeth_clear_output_buffer(card->qdio.out_qs[i],
2807 card->qdio.out_qs[i]->bufs[j]);
2808 }
2809 card->qdio.out_qs[i]->card = card;
2810 card->qdio.out_qs[i]->next_buf_to_fill = 0;
2811 card->qdio.out_qs[i]->do_pack = 0;
2812 atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
2813 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2814 atomic_set(&card->qdio.out_qs[i]->state,
2815 QETH_OUT_Q_UNLOCKED);
2816 }
2817 return 0;
2818 }
2819 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2820
2821 static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
2822 {
2823 switch (link_type) {
2824 case QETH_LINK_TYPE_HSTR:
2825 return 2;
2826 default:
2827 return 1;
2828 }
2829 }
2830
2831 static void qeth_fill_ipacmd_header(struct qeth_card *card,
2832 struct qeth_ipa_cmd *cmd,
2833 enum qeth_ipa_cmds command,
2834 enum qeth_prot_versions prot)
2835 {
2836 cmd->hdr.command = command;
2837 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2838 /* cmd->hdr.seqno is set by qeth_send_control_data() */
2839 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2840 cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
2841 cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1;
2842 cmd->hdr.param_count = 1;
2843 cmd->hdr.prot_version = prot;
2844 }
2845
2846 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
2847 {
2848 u8 prot_type = qeth_mpc_select_prot_type(card);
2849
2850 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2851 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2852 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2853 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2854 }
2855 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2856
2857 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2858 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2859 {
2860 struct qeth_cmd_buffer *iob;
2861
2862 iob = qeth_get_buffer(&card->write);
2863 if (iob) {
2864 qeth_prepare_ipa_cmd(card, iob);
2865 qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
2866 } else {
2867 dev_warn(&card->gdev->dev,
2868 "The qeth driver ran out of channel command buffers\n");
2869 QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
2870 CARD_DEVID(card));
2871 }
2872
2873 return iob;
2874 }
2875 EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
2876
2877 /**
2878 * qeth_send_ipa_cmd() - send an IPA command
2879 *
2880 * See qeth_send_control_data() for explanation of the arguments.
2881 */
2882
2883 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2884 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2885 unsigned long),
2886 void *reply_param)
2887 {
2888 int rc;
2889
2890 QETH_CARD_TEXT(card, 4, "sendipa");
2891 rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
2892 iob, reply_cb, reply_param);
2893 if (rc == -ETIME) {
2894 qeth_clear_ipacmd_list(card);
2895 qeth_schedule_recovery(card);
2896 }
2897 return rc;
2898 }
2899 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2900
2901 static int qeth_send_startlan(struct qeth_card *card)
2902 {
2903 int rc;
2904 struct qeth_cmd_buffer *iob;
2905
2906 QETH_DBF_TEXT(SETUP, 2, "strtlan");
2907
2908 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
2909 if (!iob)
2910 return -ENOMEM;
2911 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2912 return rc;
2913 }
2914
2915 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
2916 {
2917 if (!cmd->hdr.return_code)
2918 cmd->hdr.return_code =
2919 cmd->data.setadapterparms.hdr.return_code;
2920 return cmd->hdr.return_code;
2921 }
2922
2923 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2924 struct qeth_reply *reply, unsigned long data)
2925 {
2926 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2927
2928 QETH_CARD_TEXT(card, 3, "quyadpcb");
2929 if (qeth_setadpparms_inspect_rc(cmd))
2930 return 0;
2931
2932 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2933 card->info.link_type =
2934 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2935 QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
2936 }
2937 card->options.adp.supported_funcs =
2938 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2939 return 0;
2940 }
2941
2942 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2943 __u32 command, __u32 cmdlen)
2944 {
2945 struct qeth_cmd_buffer *iob;
2946 struct qeth_ipa_cmd *cmd;
2947
2948 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
2949 QETH_PROT_IPV4);
2950 if (iob) {
2951 cmd = __ipa_cmd(iob);
2952 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
2953 cmd->data.setadapterparms.hdr.command_code = command;
2954 cmd->data.setadapterparms.hdr.used_total = 1;
2955 cmd->data.setadapterparms.hdr.seq_no = 1;
2956 }
2957
2958 return iob;
2959 }
2960
2961 static int qeth_query_setadapterparms(struct qeth_card *card)
2962 {
2963 int rc;
2964 struct qeth_cmd_buffer *iob;
2965
2966 QETH_CARD_TEXT(card, 3, "queryadp");
2967 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2968 sizeof(struct qeth_ipacmd_setadpparms));
2969 if (!iob)
2970 return -ENOMEM;
2971 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2972 return rc;
2973 }
2974
2975 static int qeth_query_ipassists_cb(struct qeth_card *card,
2976 struct qeth_reply *reply, unsigned long data)
2977 {
2978 struct qeth_ipa_cmd *cmd;
2979
2980 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
2981
2982 cmd = (struct qeth_ipa_cmd *) data;
2983
2984 switch (cmd->hdr.return_code) {
2985 case IPA_RC_NOTSUPP:
2986 case IPA_RC_L2_UNSUPPORTED_CMD:
2987 QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
2988 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
2989 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
2990 return 0;
2991 default:
2992 if (cmd->hdr.return_code) {
2993 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2994 CARD_DEVID(card),
2995 cmd->hdr.return_code);
2996 return 0;
2997 }
2998 }
2999
3000 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
3001 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
3002 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
3003 } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
3004 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
3005 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
3006 } else
3007 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3008 CARD_DEVID(card));
3009 return 0;
3010 }
3011
3012 static int qeth_query_ipassists(struct qeth_card *card,
3013 enum qeth_prot_versions prot)
3014 {
3015 int rc;
3016 struct qeth_cmd_buffer *iob;
3017
3018 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
3019 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
3020 if (!iob)
3021 return -ENOMEM;
3022 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3023 return rc;
3024 }
3025
3026 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3027 struct qeth_reply *reply, unsigned long data)
3028 {
3029 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3030 struct qeth_query_switch_attributes *attrs;
3031 struct qeth_switch_info *sw_info;
3032
3033 QETH_CARD_TEXT(card, 2, "qswiatcb");
3034 if (qeth_setadpparms_inspect_rc(cmd))
3035 return 0;
3036
3037 sw_info = (struct qeth_switch_info *)reply->param;
3038 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3039 sw_info->capabilities = attrs->capabilities;
3040 sw_info->settings = attrs->settings;
3041 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3042 sw_info->settings);
3043 return 0;
3044 }
3045
3046 int qeth_query_switch_attributes(struct qeth_card *card,
3047 struct qeth_switch_info *sw_info)
3048 {
3049 struct qeth_cmd_buffer *iob;
3050
3051 QETH_CARD_TEXT(card, 2, "qswiattr");
3052 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3053 return -EOPNOTSUPP;
3054 if (!netif_carrier_ok(card->dev))
3055 return -ENOMEDIUM;
3056 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
3057 sizeof(struct qeth_ipacmd_setadpparms_hdr));
3058 if (!iob)
3059 return -ENOMEM;
3060 return qeth_send_ipa_cmd(card, iob,
3061 qeth_query_switch_attributes_cb, sw_info);
3062 }
3063
3064 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3065 struct qeth_reply *reply, unsigned long data)
3066 {
3067 struct qeth_ipa_cmd *cmd;
3068 __u16 rc;
3069
3070 cmd = (struct qeth_ipa_cmd *)data;
3071 rc = cmd->hdr.return_code;
3072 if (rc)
3073 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3074 else
3075 card->info.diagass_support = cmd->data.diagass.ext;
3076 return 0;
3077 }
3078
3079 static int qeth_query_setdiagass(struct qeth_card *card)
3080 {
3081 struct qeth_cmd_buffer *iob;
3082 struct qeth_ipa_cmd *cmd;
3083
3084 QETH_DBF_TEXT(SETUP, 2, "qdiagass");
3085 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3086 if (!iob)
3087 return -ENOMEM;
3088 cmd = __ipa_cmd(iob);
3089 cmd->data.diagass.subcmd_len = 16;
3090 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
3091 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3092 }
3093
3094 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3095 {
3096 unsigned long info = get_zeroed_page(GFP_KERNEL);
3097 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3098 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3099 struct ccw_dev_id ccwid;
3100 int level;
3101
3102 tid->chpid = card->info.chpid;
3103 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3104 tid->ssid = ccwid.ssid;
3105 tid->devno = ccwid.devno;
3106 if (!info)
3107 return;
3108 level = stsi(NULL, 0, 0, 0);
3109 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3110 tid->lparnr = info222->lpar_number;
3111 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3112 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3113 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3114 }
3115 free_page(info);
3116 return;
3117 }
3118
3119 static int qeth_hw_trap_cb(struct qeth_card *card,
3120 struct qeth_reply *reply, unsigned long data)
3121 {
3122 struct qeth_ipa_cmd *cmd;
3123 __u16 rc;
3124
3125 cmd = (struct qeth_ipa_cmd *)data;
3126 rc = cmd->hdr.return_code;
3127 if (rc)
3128 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3129 return 0;
3130 }
3131
3132 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3133 {
3134 struct qeth_cmd_buffer *iob;
3135 struct qeth_ipa_cmd *cmd;
3136
3137 QETH_DBF_TEXT(SETUP, 2, "diagtrap");
3138 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3139 if (!iob)
3140 return -ENOMEM;
3141 cmd = __ipa_cmd(iob);
3142 cmd->data.diagass.subcmd_len = 80;
3143 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
3144 cmd->data.diagass.type = 1;
3145 cmd->data.diagass.action = action;
3146 switch (action) {
3147 case QETH_DIAGS_TRAP_ARM:
3148 cmd->data.diagass.options = 0x0003;
3149 cmd->data.diagass.ext = 0x00010000 +
3150 sizeof(struct qeth_trap_id);
3151 qeth_get_trap_id(card,
3152 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3153 break;
3154 case QETH_DIAGS_TRAP_DISARM:
3155 cmd->data.diagass.options = 0x0001;
3156 break;
3157 case QETH_DIAGS_TRAP_CAPTURE:
3158 break;
3159 }
3160 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3161 }
3162 EXPORT_SYMBOL_GPL(qeth_hw_trap);
3163
3164 static int qeth_check_qdio_errors(struct qeth_card *card,
3165 struct qdio_buffer *buf,
3166 unsigned int qdio_error,
3167 const char *dbftext)
3168 {
3169 if (qdio_error) {
3170 QETH_CARD_TEXT(card, 2, dbftext);
3171 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3172 buf->element[15].sflags);
3173 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3174 buf->element[14].sflags);
3175 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3176 if ((buf->element[15].sflags) == 0x12) {
3177 card->stats.rx_dropped++;
3178 return 0;
3179 } else
3180 return 1;
3181 }
3182 return 0;
3183 }
3184
3185 static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3186 {
3187 struct qeth_qdio_q *queue = card->qdio.in_q;
3188 struct list_head *lh;
3189 int count;
3190 int i;
3191 int rc;
3192 int newcount = 0;
3193
3194 count = (index < queue->next_buf_to_init)?
3195 card->qdio.in_buf_pool.buf_count -
3196 (queue->next_buf_to_init - index) :
3197 card->qdio.in_buf_pool.buf_count -
3198 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
3199 /* only requeue at a certain threshold to avoid SIGAs */
3200 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3201 for (i = queue->next_buf_to_init;
3202 i < queue->next_buf_to_init + count; ++i) {
3203 if (qeth_init_input_buffer(card,
3204 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
3205 break;
3206 } else {
3207 newcount++;
3208 }
3209 }
3210
3211 if (newcount < count) {
3212 /* we are in memory shortage so we switch back to
3213 traditional skb allocation and drop packages */
3214 atomic_set(&card->force_alloc_skb, 3);
3215 count = newcount;
3216 } else {
3217 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3218 }
3219
3220 if (!count) {
3221 i = 0;
3222 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3223 i++;
3224 if (i == card->qdio.in_buf_pool.buf_count) {
3225 QETH_CARD_TEXT(card, 2, "qsarbw");
3226 card->reclaim_index = index;
3227 schedule_delayed_work(
3228 &card->buffer_reclaim_work,
3229 QETH_RECLAIM_WORK_TIME);
3230 }
3231 return;
3232 }
3233
3234 /*
3235 * according to old code it should be avoided to requeue all
3236 * 128 buffers in order to benefit from PCI avoidance.
3237 * this function keeps at least one buffer (the buffer at
3238 * 'index') un-requeued -> this buffer is the first buffer that
3239 * will be requeued the next time
3240 */
3241 if (card->options.performance_stats) {
3242 card->perf_stats.inbound_do_qdio_cnt++;
3243 card->perf_stats.inbound_do_qdio_start_time =
3244 qeth_get_micros();
3245 }
3246 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3247 queue->next_buf_to_init, count);
3248 if (card->options.performance_stats)
3249 card->perf_stats.inbound_do_qdio_time +=
3250 qeth_get_micros() -
3251 card->perf_stats.inbound_do_qdio_start_time;
3252 if (rc) {
3253 QETH_CARD_TEXT(card, 2, "qinberr");
3254 }
3255 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
3256 QDIO_MAX_BUFFERS_PER_Q;
3257 }
3258 }
3259
3260 static void qeth_buffer_reclaim_work(struct work_struct *work)
3261 {
3262 struct qeth_card *card = container_of(work, struct qeth_card,
3263 buffer_reclaim_work.work);
3264
3265 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3266 qeth_queue_input_buffer(card, card->reclaim_index);
3267 }
3268
3269 static void qeth_handle_send_error(struct qeth_card *card,
3270 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3271 {
3272 int sbalf15 = buffer->buffer->element[15].sflags;
3273
3274 QETH_CARD_TEXT(card, 6, "hdsnderr");
3275 if (card->info.type == QETH_CARD_TYPE_IQD) {
3276 if (sbalf15 == 0) {
3277 qdio_err = 0;
3278 } else {
3279 qdio_err = 1;
3280 }
3281 }
3282 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3283
3284 if (!qdio_err)
3285 return;
3286
3287 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3288 return;
3289
3290 QETH_CARD_TEXT(card, 1, "lnkfail");
3291 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3292 (u16)qdio_err, (u8)sbalf15);
3293 }
3294
3295 /**
3296 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3297 * @queue: queue to check for packing buffer
3298 *
3299 * Returns number of buffers that were prepared for flush.
3300 */
3301 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3302 {
3303 struct qeth_qdio_out_buffer *buffer;
3304
3305 buffer = queue->bufs[queue->next_buf_to_fill];
3306 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3307 (buffer->next_element_to_fill > 0)) {
3308 /* it's a packing buffer */
3309 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3310 queue->next_buf_to_fill =
3311 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
3312 return 1;
3313 }
3314 return 0;
3315 }
3316
3317 /*
3318 * Switched to packing state if the number of used buffers on a queue
3319 * reaches a certain limit.
3320 */
3321 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3322 {
3323 if (!queue->do_pack) {
3324 if (atomic_read(&queue->used_buffers)
3325 >= QETH_HIGH_WATERMARK_PACK){
3326 /* switch non-PACKING -> PACKING */
3327 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3328 if (queue->card->options.performance_stats)
3329 queue->card->perf_stats.sc_dp_p++;
3330 queue->do_pack = 1;
3331 }
3332 }
3333 }
3334
3335 /*
3336 * Switches from packing to non-packing mode. If there is a packing
3337 * buffer on the queue this buffer will be prepared to be flushed.
3338 * In that case 1 is returned to inform the caller. If no buffer
3339 * has to be flushed, zero is returned.
3340 */
3341 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3342 {
3343 if (queue->do_pack) {
3344 if (atomic_read(&queue->used_buffers)
3345 <= QETH_LOW_WATERMARK_PACK) {
3346 /* switch PACKING -> non-PACKING */
3347 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3348 if (queue->card->options.performance_stats)
3349 queue->card->perf_stats.sc_p_dp++;
3350 queue->do_pack = 0;
3351 return qeth_prep_flush_pack_buffer(queue);
3352 }
3353 }
3354 return 0;
3355 }
3356
3357 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3358 int count)
3359 {
3360 struct qeth_qdio_out_buffer *buf;
3361 int rc;
3362 int i;
3363 unsigned int qdio_flags;
3364
3365 for (i = index; i < index + count; ++i) {
3366 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3367 buf = queue->bufs[bidx];
3368 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3369 SBAL_EFLAGS_LAST_ENTRY;
3370
3371 if (queue->bufstates)
3372 queue->bufstates[bidx].user = buf;
3373
3374 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
3375 continue;
3376
3377 if (!queue->do_pack) {
3378 if ((atomic_read(&queue->used_buffers) >=
3379 (QETH_HIGH_WATERMARK_PACK -
3380 QETH_WATERMARK_PACK_FUZZ)) &&
3381 !atomic_read(&queue->set_pci_flags_count)) {
3382 /* it's likely that we'll go to packing
3383 * mode soon */
3384 atomic_inc(&queue->set_pci_flags_count);
3385 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3386 }
3387 } else {
3388 if (!atomic_read(&queue->set_pci_flags_count)) {
3389 /*
3390 * there's no outstanding PCI any more, so we
3391 * have to request a PCI to be sure the the PCI
3392 * will wake at some time in the future then we
3393 * can flush packed buffers that might still be
3394 * hanging around, which can happen if no
3395 * further send was requested by the stack
3396 */
3397 atomic_inc(&queue->set_pci_flags_count);
3398 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3399 }
3400 }
3401 }
3402
3403 netif_trans_update(queue->card->dev);
3404 if (queue->card->options.performance_stats) {
3405 queue->card->perf_stats.outbound_do_qdio_cnt++;
3406 queue->card->perf_stats.outbound_do_qdio_start_time =
3407 qeth_get_micros();
3408 }
3409 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3410 if (atomic_read(&queue->set_pci_flags_count))
3411 qdio_flags |= QDIO_FLAG_PCI_OUT;
3412 atomic_add(count, &queue->used_buffers);
3413
3414 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3415 queue->queue_no, index, count);
3416 if (queue->card->options.performance_stats)
3417 queue->card->perf_stats.outbound_do_qdio_time +=
3418 qeth_get_micros() -
3419 queue->card->perf_stats.outbound_do_qdio_start_time;
3420 if (rc) {
3421 queue->card->stats.tx_errors += count;
3422 /* ignore temporary SIGA errors without busy condition */
3423 if (rc == -ENOBUFS)
3424 return;
3425 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3426 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3427 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3428 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3429 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3430
3431 /* this must not happen under normal circumstances. if it
3432 * happens something is really wrong -> recover */
3433 qeth_schedule_recovery(queue->card);
3434 return;
3435 }
3436 if (queue->card->options.performance_stats)
3437 queue->card->perf_stats.bufs_sent += count;
3438 }
3439
3440 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3441 {
3442 int index;
3443 int flush_cnt = 0;
3444 int q_was_packing = 0;
3445
3446 /*
3447 * check if weed have to switch to non-packing mode or if
3448 * we have to get a pci flag out on the queue
3449 */
3450 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3451 !atomic_read(&queue->set_pci_flags_count)) {
3452 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3453 QETH_OUT_Q_UNLOCKED) {
3454 /*
3455 * If we get in here, there was no action in
3456 * do_send_packet. So, we check if there is a
3457 * packing buffer to be flushed here.
3458 */
3459 netif_stop_queue(queue->card->dev);
3460 index = queue->next_buf_to_fill;
3461 q_was_packing = queue->do_pack;
3462 /* queue->do_pack may change */
3463 barrier();
3464 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3465 if (!flush_cnt &&
3466 !atomic_read(&queue->set_pci_flags_count))
3467 flush_cnt += qeth_prep_flush_pack_buffer(queue);
3468 if (queue->card->options.performance_stats &&
3469 q_was_packing)
3470 queue->card->perf_stats.bufs_sent_pack +=
3471 flush_cnt;
3472 if (flush_cnt)
3473 qeth_flush_buffers(queue, index, flush_cnt);
3474 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3475 }
3476 }
3477 }
3478
3479 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3480 unsigned long card_ptr)
3481 {
3482 struct qeth_card *card = (struct qeth_card *)card_ptr;
3483
3484 if (card->dev->flags & IFF_UP)
3485 napi_schedule(&card->napi);
3486 }
3487
3488 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3489 {
3490 int rc;
3491
3492 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3493 rc = -1;
3494 goto out;
3495 } else {
3496 if (card->options.cq == cq) {
3497 rc = 0;
3498 goto out;
3499 }
3500
3501 if (card->state != CARD_STATE_DOWN &&
3502 card->state != CARD_STATE_RECOVER) {
3503 rc = -1;
3504 goto out;
3505 }
3506
3507 qeth_free_qdio_buffers(card);
3508 card->options.cq = cq;
3509 rc = 0;
3510 }
3511 out:
3512 return rc;
3513
3514 }
3515 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3516
3517 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3518 unsigned int queue, int first_element,
3519 int count)
3520 {
3521 struct qeth_qdio_q *cq = card->qdio.c_q;
3522 int i;
3523 int rc;
3524
3525 if (!qeth_is_cq(card, queue))
3526 goto out;
3527
3528 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3529 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3530 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3531
3532 if (qdio_err) {
3533 netif_stop_queue(card->dev);
3534 qeth_schedule_recovery(card);
3535 goto out;
3536 }
3537
3538 if (card->options.performance_stats) {
3539 card->perf_stats.cq_cnt++;
3540 card->perf_stats.cq_start_time = qeth_get_micros();
3541 }
3542
3543 for (i = first_element; i < first_element + count; ++i) {
3544 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3545 struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
3546 int e = 0;
3547
3548 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3549 buffer->element[e].addr) {
3550 unsigned long phys_aob_addr;
3551
3552 phys_aob_addr = (unsigned long) buffer->element[e].addr;
3553 qeth_qdio_handle_aob(card, phys_aob_addr);
3554 ++e;
3555 }
3556 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3557 }
3558 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3559 card->qdio.c_q->next_buf_to_init,
3560 count);
3561 if (rc) {
3562 dev_warn(&card->gdev->dev,
3563 "QDIO reported an error, rc=%i\n", rc);
3564 QETH_CARD_TEXT(card, 2, "qcqherr");
3565 }
3566 card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
3567 + count) % QDIO_MAX_BUFFERS_PER_Q;
3568
3569 netif_wake_queue(card->dev);
3570
3571 if (card->options.performance_stats) {
3572 int delta_t = qeth_get_micros();
3573 delta_t -= card->perf_stats.cq_start_time;
3574 card->perf_stats.cq_time += delta_t;
3575 }
3576 out:
3577 return;
3578 }
3579
3580 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3581 unsigned int qdio_err, int queue,
3582 int first_elem, int count,
3583 unsigned long card_ptr)
3584 {
3585 struct qeth_card *card = (struct qeth_card *)card_ptr;
3586
3587 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3588 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3589
3590 if (qeth_is_cq(card, queue))
3591 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3592 else if (qdio_err)
3593 qeth_schedule_recovery(card);
3594 }
3595
3596 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3597 unsigned int qdio_error, int __queue,
3598 int first_element, int count,
3599 unsigned long card_ptr)
3600 {
3601 struct qeth_card *card = (struct qeth_card *) card_ptr;
3602 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3603 struct qeth_qdio_out_buffer *buffer;
3604 int i;
3605
3606 QETH_CARD_TEXT(card, 6, "qdouhdl");
3607 if (qdio_error & QDIO_ERROR_FATAL) {
3608 QETH_CARD_TEXT(card, 2, "achkcond");
3609 netif_stop_queue(card->dev);
3610 qeth_schedule_recovery(card);
3611 return;
3612 }
3613 if (card->options.performance_stats) {
3614 card->perf_stats.outbound_handler_cnt++;
3615 card->perf_stats.outbound_handler_start_time =
3616 qeth_get_micros();
3617 }
3618 for (i = first_element; i < (first_element + count); ++i) {
3619 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3620 buffer = queue->bufs[bidx];
3621 qeth_handle_send_error(card, buffer, qdio_error);
3622
3623 if (queue->bufstates &&
3624 (queue->bufstates[bidx].flags &
3625 QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
3626 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
3627
3628 if (atomic_cmpxchg(&buffer->state,
3629 QETH_QDIO_BUF_PRIMED,
3630 QETH_QDIO_BUF_PENDING) ==
3631 QETH_QDIO_BUF_PRIMED) {
3632 qeth_notify_skbs(queue, buffer,
3633 TX_NOTIFY_PENDING);
3634 }
3635 QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
3636
3637 /* prepare the queue slot for re-use: */
3638 qeth_scrub_qdio_buffer(buffer->buffer,
3639 QETH_MAX_BUFFER_ELEMENTS(card));
3640 if (qeth_init_qdio_out_buf(queue, bidx)) {
3641 QETH_CARD_TEXT(card, 2, "outofbuf");
3642 qeth_schedule_recovery(card);
3643 }
3644 } else {
3645 if (card->options.cq == QETH_CQ_ENABLED) {
3646 enum iucv_tx_notify n;
3647
3648 n = qeth_compute_cq_notification(
3649 buffer->buffer->element[15].sflags, 0);
3650 qeth_notify_skbs(queue, buffer, n);
3651 }
3652
3653 qeth_clear_output_buffer(queue, buffer);
3654 }
3655 qeth_cleanup_handled_pending(queue, bidx, 0);
3656 }
3657 atomic_sub(count, &queue->used_buffers);
3658 /* check if we need to do something on this outbound queue */
3659 if (card->info.type != QETH_CARD_TYPE_IQD)
3660 qeth_check_outbound_queue(queue);
3661
3662 netif_wake_queue(queue->card->dev);
3663 if (card->options.performance_stats)
3664 card->perf_stats.outbound_handler_time += qeth_get_micros() -
3665 card->perf_stats.outbound_handler_start_time;
3666 }
3667
3668 /* We cannot use outbound queue 3 for unicast packets on HiperSockets */
3669 static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
3670 {
3671 if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
3672 return 2;
3673 return queue_num;
3674 }
3675
3676 /**
3677 * Note: Function assumes that we have 4 outbound queues.
3678 */
3679 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3680 int ipv)
3681 {
3682 __be16 *tci;
3683 u8 tos;
3684
3685 switch (card->qdio.do_prio_queueing) {
3686 case QETH_PRIO_Q_ING_TOS:
3687 case QETH_PRIO_Q_ING_PREC:
3688 switch (ipv) {
3689 case 4:
3690 tos = ipv4_get_dsfield(ip_hdr(skb));
3691 break;
3692 case 6:
3693 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3694 break;
3695 default:
3696 return card->qdio.default_out_queue;
3697 }
3698 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3699 return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
3700 if (tos & IPTOS_MINCOST)
3701 return qeth_cut_iqd_prio(card, 3);
3702 if (tos & IPTOS_RELIABILITY)
3703 return 2;
3704 if (tos & IPTOS_THROUGHPUT)
3705 return 1;
3706 if (tos & IPTOS_LOWDELAY)
3707 return 0;
3708 break;
3709 case QETH_PRIO_Q_ING_SKB:
3710 if (skb->priority > 5)
3711 return 0;
3712 return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
3713 case QETH_PRIO_Q_ING_VLAN:
3714 tci = &((struct ethhdr *)skb->data)->h_proto;
3715 if (be16_to_cpu(*tci) == ETH_P_8021Q)
3716 return qeth_cut_iqd_prio(card,
3717 ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
3718 break;
3719 default:
3720 break;
3721 }
3722 return card->qdio.default_out_queue;
3723 }
3724 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3725
3726 /**
3727 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3728 * @skb: SKB address
3729 *
3730 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3731 * fragmented part of the SKB. Returns zero for linear SKB.
3732 */
3733 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3734 {
3735 int cnt, elements = 0;
3736
3737 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3738 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
3739
3740 elements += qeth_get_elements_for_range(
3741 (addr_t)skb_frag_address(frag),
3742 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3743 }
3744 return elements;
3745 }
3746
3747 /**
3748 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3749 * to transmit an skb.
3750 * @skb: the skb to operate on.
3751 * @data_offset: skip this part of the skb's linear data
3752 *
3753 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3754 * skb's data (both its linear part and paged fragments).
3755 */
3756 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3757 {
3758 unsigned int elements = qeth_get_elements_for_frags(skb);
3759 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3760 addr_t start = (addr_t)skb->data + data_offset;
3761
3762 if (start != end)
3763 elements += qeth_get_elements_for_range(start, end);
3764 return elements;
3765 }
3766 EXPORT_SYMBOL_GPL(qeth_count_elements);
3767
3768 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3769 MAX_TCP_HEADER)
3770
3771 /**
3772 * qeth_add_hw_header() - add a HW header to an skb.
3773 * @skb: skb that the HW header should be added to.
3774 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3775 * it contains a valid pointer to a qeth_hdr.
3776 * @hdr_len: length of the HW header.
3777 * @proto_len: length of protocol headers that need to be in same page as the
3778 * HW header.
3779 *
3780 * Returns the pushed length. If the header can't be pushed on
3781 * (eg. because it would cross a page boundary), it is allocated from
3782 * the cache instead and 0 is returned.
3783 * The number of needed buffer elements is returned in @elements.
3784 * Error to create the hdr is indicated by returning with < 0.
3785 */
3786 static int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
3787 struct qeth_hdr **hdr, unsigned int hdr_len,
3788 unsigned int proto_len, unsigned int *elements)
3789 {
3790 const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3791 const unsigned int contiguous = proto_len ? proto_len : 1;
3792 unsigned int __elements;
3793 addr_t start, end;
3794 bool push_ok;
3795 int rc;
3796
3797 check_layout:
3798 start = (addr_t)skb->data - hdr_len;
3799 end = (addr_t)skb->data;
3800
3801 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3802 /* Push HW header into same page as first protocol header. */
3803 push_ok = true;
3804 /* ... but TSO always needs a separate element for headers: */
3805 if (skb_is_gso(skb))
3806 __elements = 1 + qeth_count_elements(skb, proto_len);
3807 else
3808 __elements = qeth_count_elements(skb, 0);
3809 } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
3810 /* Push HW header into a new page. */
3811 push_ok = true;
3812 __elements = 1 + qeth_count_elements(skb, 0);
3813 } else {
3814 /* Use header cache, copy protocol headers up. */
3815 push_ok = false;
3816 __elements = 1 + qeth_count_elements(skb, proto_len);
3817 }
3818
3819 /* Compress skb to fit into one IO buffer: */
3820 if (__elements > max_elements) {
3821 if (!skb_is_nonlinear(skb)) {
3822 /* Drop it, no easy way of shrinking it further. */
3823 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3824 max_elements, __elements, skb->len);
3825 return -E2BIG;
3826 }
3827
3828 rc = skb_linearize(skb);
3829 if (card->options.performance_stats) {
3830 if (rc)
3831 card->perf_stats.tx_linfail++;
3832 else
3833 card->perf_stats.tx_lin++;
3834 }
3835 if (rc)
3836 return rc;
3837
3838 /* Linearization changed the layout, re-evaluate: */
3839 goto check_layout;
3840 }
3841
3842 *elements = __elements;
3843 /* Add the header: */
3844 if (push_ok) {
3845 *hdr = skb_push(skb, hdr_len);
3846 return hdr_len;
3847 }
3848 /* fall back */
3849 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3850 return -E2BIG;
3851 *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
3852 if (!*hdr)
3853 return -ENOMEM;
3854 /* Copy protocol headers behind HW header: */
3855 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3856 return 0;
3857 }
3858
3859 static void __qeth_fill_buffer(struct sk_buff *skb,
3860 struct qeth_qdio_out_buffer *buf,
3861 bool is_first_elem, unsigned int offset)
3862 {
3863 struct qdio_buffer *buffer = buf->buffer;
3864 int element = buf->next_element_to_fill;
3865 int length = skb_headlen(skb) - offset;
3866 char *data = skb->data + offset;
3867 int length_here, cnt;
3868
3869 /* map linear part into buffer element(s) */
3870 while (length > 0) {
3871 /* length_here is the remaining amount of data in this page */
3872 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3873 if (length < length_here)
3874 length_here = length;
3875
3876 buffer->element[element].addr = data;
3877 buffer->element[element].length = length_here;
3878 length -= length_here;
3879 if (is_first_elem) {
3880 is_first_elem = false;
3881 if (length || skb_is_nonlinear(skb))
3882 /* skb needs additional elements */
3883 buffer->element[element].eflags =
3884 SBAL_EFLAGS_FIRST_FRAG;
3885 else
3886 buffer->element[element].eflags = 0;
3887 } else {
3888 buffer->element[element].eflags =
3889 SBAL_EFLAGS_MIDDLE_FRAG;
3890 }
3891 data += length_here;
3892 element++;
3893 }
3894
3895 /* map page frags into buffer element(s) */
3896 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3897 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3898
3899 data = skb_frag_address(frag);
3900 length = skb_frag_size(frag);
3901 while (length > 0) {
3902 length_here = PAGE_SIZE -
3903 ((unsigned long) data % PAGE_SIZE);
3904 if (length < length_here)
3905 length_here = length;
3906
3907 buffer->element[element].addr = data;
3908 buffer->element[element].length = length_here;
3909 buffer->element[element].eflags =
3910 SBAL_EFLAGS_MIDDLE_FRAG;
3911 length -= length_here;
3912 data += length_here;
3913 element++;
3914 }
3915 }
3916
3917 if (buffer->element[element - 1].eflags)
3918 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3919 buf->next_element_to_fill = element;
3920 }
3921
3922 /**
3923 * qeth_fill_buffer() - map skb into an output buffer
3924 * @queue: QDIO queue to submit the buffer on
3925 * @buf: buffer to transport the skb
3926 * @skb: skb to map into the buffer
3927 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
3928 * from qeth_core_header_cache.
3929 * @offset: when mapping the skb, start at skb->data + offset
3930 * @hd_len: if > 0, build a dedicated header element of this size
3931 */
3932 static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3933 struct qeth_qdio_out_buffer *buf,
3934 struct sk_buff *skb, struct qeth_hdr *hdr,
3935 unsigned int offset, unsigned int hd_len)
3936 {
3937 struct qdio_buffer *buffer = buf->buffer;
3938 bool is_first_elem = true;
3939 int flush_cnt = 0;
3940
3941 __skb_queue_tail(&buf->skb_list, skb);
3942
3943 /* build dedicated header element */
3944 if (hd_len) {
3945 int element = buf->next_element_to_fill;
3946 is_first_elem = false;
3947
3948 buffer->element[element].addr = hdr;
3949 buffer->element[element].length = hd_len;
3950 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3951 /* remember to free cache-allocated qeth_hdr: */
3952 buf->is_header[element] = ((void *)hdr != skb->data);
3953 buf->next_element_to_fill++;
3954 }
3955
3956 __qeth_fill_buffer(skb, buf, is_first_elem, offset);
3957
3958 if (!queue->do_pack) {
3959 QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
3960 /* set state to PRIMED -> will be flushed */
3961 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3962 flush_cnt = 1;
3963 } else {
3964 QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
3965 if (queue->card->options.performance_stats)
3966 queue->card->perf_stats.skbs_sent_pack++;
3967 if (buf->next_element_to_fill >=
3968 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3969 /*
3970 * packed buffer if full -> set state PRIMED
3971 * -> will be flushed
3972 */
3973 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3974 flush_cnt = 1;
3975 }
3976 }
3977 return flush_cnt;
3978 }
3979
3980 static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
3981 struct sk_buff *skb, struct qeth_hdr *hdr,
3982 unsigned int offset, unsigned int hd_len)
3983 {
3984 int index = queue->next_buf_to_fill;
3985 struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
3986
3987 /*
3988 * check if buffer is empty to make sure that we do not 'overtake'
3989 * ourselves and try to fill a buffer that is already primed
3990 */
3991 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3992 return -EBUSY;
3993 queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
3994 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
3995 qeth_flush_buffers(queue, index, 1);
3996 return 0;
3997 }
3998
3999 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4000 struct sk_buff *skb, struct qeth_hdr *hdr,
4001 unsigned int offset, unsigned int hd_len,
4002 int elements_needed)
4003 {
4004 struct qeth_qdio_out_buffer *buffer;
4005 int start_index;
4006 int flush_count = 0;
4007 int do_pack = 0;
4008 int tmp;
4009 int rc = 0;
4010
4011 /* spin until we get the queue ... */
4012 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4013 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4014 start_index = queue->next_buf_to_fill;
4015 buffer = queue->bufs[queue->next_buf_to_fill];
4016 /*
4017 * check if buffer is empty to make sure that we do not 'overtake'
4018 * ourselves and try to fill a buffer that is already primed
4019 */
4020 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4021 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4022 return -EBUSY;
4023 }
4024 /* check if we need to switch packing state of this queue */
4025 qeth_switch_to_packing_if_needed(queue);
4026 if (queue->do_pack) {
4027 do_pack = 1;
4028 /* does packet fit in current buffer? */
4029 if ((QETH_MAX_BUFFER_ELEMENTS(card) -
4030 buffer->next_element_to_fill) < elements_needed) {
4031 /* ... no -> set state PRIMED */
4032 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4033 flush_count++;
4034 queue->next_buf_to_fill =
4035 (queue->next_buf_to_fill + 1) %
4036 QDIO_MAX_BUFFERS_PER_Q;
4037 buffer = queue->bufs[queue->next_buf_to_fill];
4038 /* we did a step forward, so check buffer state
4039 * again */
4040 if (atomic_read(&buffer->state) !=
4041 QETH_QDIO_BUF_EMPTY) {
4042 qeth_flush_buffers(queue, start_index,
4043 flush_count);
4044 atomic_set(&queue->state,
4045 QETH_OUT_Q_UNLOCKED);
4046 rc = -EBUSY;
4047 goto out;
4048 }
4049 }
4050 }
4051 tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
4052 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4053 QDIO_MAX_BUFFERS_PER_Q;
4054 flush_count += tmp;
4055 if (flush_count)
4056 qeth_flush_buffers(queue, start_index, flush_count);
4057 else if (!atomic_read(&queue->set_pci_flags_count))
4058 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4059 /*
4060 * queue->state will go from LOCKED -> UNLOCKED or from
4061 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4062 * (switch packing state or flush buffer to get another pci flag out).
4063 * In that case we will enter this loop
4064 */
4065 while (atomic_dec_return(&queue->state)) {
4066 start_index = queue->next_buf_to_fill;
4067 /* check if we can go back to non-packing state */
4068 tmp = qeth_switch_to_nonpacking_if_needed(queue);
4069 /*
4070 * check if we need to flush a packing buffer to get a pci
4071 * flag out on the queue
4072 */
4073 if (!tmp && !atomic_read(&queue->set_pci_flags_count))
4074 tmp = qeth_prep_flush_pack_buffer(queue);
4075 if (tmp) {
4076 qeth_flush_buffers(queue, start_index, tmp);
4077 flush_count += tmp;
4078 }
4079 }
4080 out:
4081 /* at this point the queue is UNLOCKED again */
4082 if (queue->card->options.performance_stats && do_pack)
4083 queue->card->perf_stats.bufs_sent_pack += flush_count;
4084
4085 return rc;
4086 }
4087 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4088
4089 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4090 unsigned int payload_len, struct sk_buff *skb,
4091 unsigned int proto_len)
4092 {
4093 struct qeth_hdr_ext_tso *ext = &hdr->ext;
4094
4095 ext->hdr_tot_len = sizeof(*ext);
4096 ext->imb_hdr_no = 1;
4097 ext->hdr_type = 1;
4098 ext->hdr_version = 1;
4099 ext->hdr_len = 28;
4100 ext->payload_len = payload_len;
4101 ext->mss = skb_shinfo(skb)->gso_size;
4102 ext->dg_hdr_len = proto_len;
4103 }
4104
4105 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4106 struct qeth_qdio_out_q *queue, int ipv, int cast_type,
4107 void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
4108 struct sk_buff *skb, int ipv, int cast_type,
4109 unsigned int data_len))
4110 {
4111 unsigned int proto_len, hw_hdr_len;
4112 unsigned int frame_len = skb->len;
4113 bool is_tso = skb_is_gso(skb);
4114 unsigned int data_offset = 0;
4115 struct qeth_hdr *hdr = NULL;
4116 unsigned int hd_len = 0;
4117 unsigned int elements;
4118 int push_len, rc;
4119 bool is_sg;
4120
4121 if (is_tso) {
4122 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4123 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4124 } else {
4125 hw_hdr_len = sizeof(struct qeth_hdr);
4126 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4127 }
4128
4129 rc = skb_cow_head(skb, hw_hdr_len);
4130 if (rc)
4131 return rc;
4132
4133 push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
4134 &elements);
4135 if (push_len < 0)
4136 return push_len;
4137 if (is_tso || !push_len) {
4138 /* HW header needs its own buffer element. */
4139 hd_len = hw_hdr_len + proto_len;
4140 data_offset = push_len + proto_len;
4141 }
4142 memset(hdr, 0, hw_hdr_len);
4143 fill_header(card, hdr, skb, ipv, cast_type, frame_len);
4144 if (is_tso)
4145 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4146 frame_len - proto_len, skb, proto_len);
4147
4148 is_sg = skb_is_nonlinear(skb);
4149 if (IS_IQD(card)) {
4150 rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
4151 hd_len);
4152 } else {
4153 /* TODO: drop skb_orphan() once TX completion is fast enough */
4154 skb_orphan(skb);
4155 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4156 hd_len, elements);
4157 }
4158
4159 if (!rc) {
4160 if (card->options.performance_stats) {
4161 card->perf_stats.buf_elements_sent += elements;
4162 if (is_sg)
4163 card->perf_stats.sg_skbs_sent++;
4164 if (is_tso) {
4165 card->perf_stats.large_send_bytes += frame_len;
4166 card->perf_stats.large_send_cnt++;
4167 }
4168 }
4169 } else {
4170 if (!push_len)
4171 kmem_cache_free(qeth_core_header_cache, hdr);
4172 if (rc == -EBUSY)
4173 /* roll back to ETH header */
4174 skb_pull(skb, push_len);
4175 }
4176 return rc;
4177 }
4178 EXPORT_SYMBOL_GPL(qeth_xmit);
4179
4180 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4181 struct qeth_reply *reply, unsigned long data)
4182 {
4183 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4184 struct qeth_ipacmd_setadpparms *setparms;
4185
4186 QETH_CARD_TEXT(card, 4, "prmadpcb");
4187
4188 setparms = &(cmd->data.setadapterparms);
4189 if (qeth_setadpparms_inspect_rc(cmd)) {
4190 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4191 setparms->data.mode = SET_PROMISC_MODE_OFF;
4192 }
4193 card->info.promisc_mode = setparms->data.mode;
4194 return 0;
4195 }
4196
4197 void qeth_setadp_promisc_mode(struct qeth_card *card)
4198 {
4199 enum qeth_ipa_promisc_modes mode;
4200 struct net_device *dev = card->dev;
4201 struct qeth_cmd_buffer *iob;
4202 struct qeth_ipa_cmd *cmd;
4203
4204 QETH_CARD_TEXT(card, 4, "setprom");
4205
4206 if (((dev->flags & IFF_PROMISC) &&
4207 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
4208 (!(dev->flags & IFF_PROMISC) &&
4209 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
4210 return;
4211 mode = SET_PROMISC_MODE_OFF;
4212 if (dev->flags & IFF_PROMISC)
4213 mode = SET_PROMISC_MODE_ON;
4214 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4215
4216 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4217 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
4218 if (!iob)
4219 return;
4220 cmd = __ipa_cmd(iob);
4221 cmd->data.setadapterparms.data.mode = mode;
4222 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4223 }
4224 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4225
4226 struct net_device_stats *qeth_get_stats(struct net_device *dev)
4227 {
4228 struct qeth_card *card;
4229
4230 card = dev->ml_priv;
4231
4232 QETH_CARD_TEXT(card, 5, "getstat");
4233
4234 return &card->stats;
4235 }
4236 EXPORT_SYMBOL_GPL(qeth_get_stats);
4237
4238 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4239 struct qeth_reply *reply, unsigned long data)
4240 {
4241 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4242 struct qeth_ipacmd_setadpparms *adp_cmd;
4243
4244 QETH_CARD_TEXT(card, 4, "chgmaccb");
4245 if (qeth_setadpparms_inspect_rc(cmd))
4246 return 0;
4247
4248 adp_cmd = &cmd->data.setadapterparms;
4249 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4250 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4251 return 0;
4252
4253 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4254 return 0;
4255 }
4256
4257 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4258 {
4259 int rc;
4260 struct qeth_cmd_buffer *iob;
4261 struct qeth_ipa_cmd *cmd;
4262
4263 QETH_CARD_TEXT(card, 4, "chgmac");
4264
4265 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4266 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4267 sizeof(struct qeth_change_addr));
4268 if (!iob)
4269 return -ENOMEM;
4270 cmd = __ipa_cmd(iob);
4271 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4272 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4273 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4274 card->dev->dev_addr);
4275 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4276 NULL);
4277 return rc;
4278 }
4279 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4280
4281 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4282 struct qeth_reply *reply, unsigned long data)
4283 {
4284 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4285 struct qeth_set_access_ctrl *access_ctrl_req;
4286 int fallback = *(int *)reply->param;
4287
4288 QETH_CARD_TEXT(card, 4, "setaccb");
4289 if (cmd->hdr.return_code)
4290 return 0;
4291 qeth_setadpparms_inspect_rc(cmd);
4292
4293 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4294 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
4295 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
4296 QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
4297 cmd->data.setadapterparms.hdr.return_code);
4298 if (cmd->data.setadapterparms.hdr.return_code !=
4299 SET_ACCESS_CTRL_RC_SUCCESS)
4300 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4301 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4302 cmd->data.setadapterparms.hdr.return_code);
4303 switch (cmd->data.setadapterparms.hdr.return_code) {
4304 case SET_ACCESS_CTRL_RC_SUCCESS:
4305 if (card->options.isolation == ISOLATION_MODE_NONE) {
4306 dev_info(&card->gdev->dev,
4307 "QDIO data connection isolation is deactivated\n");
4308 } else {
4309 dev_info(&card->gdev->dev,
4310 "QDIO data connection isolation is activated\n");
4311 }
4312 break;
4313 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4314 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4315 CARD_DEVID(card));
4316 if (fallback)
4317 card->options.isolation = card->options.prev_isolation;
4318 break;
4319 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4320 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4321 CARD_DEVID(card));
4322 if (fallback)
4323 card->options.isolation = card->options.prev_isolation;
4324 break;
4325 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4326 dev_err(&card->gdev->dev, "Adapter does not "
4327 "support QDIO data connection isolation\n");
4328 break;
4329 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4330 dev_err(&card->gdev->dev,
4331 "Adapter is dedicated. "
4332 "QDIO data connection isolation not supported\n");
4333 if (fallback)
4334 card->options.isolation = card->options.prev_isolation;
4335 break;
4336 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4337 dev_err(&card->gdev->dev,
4338 "TSO does not permit QDIO data connection isolation\n");
4339 if (fallback)
4340 card->options.isolation = card->options.prev_isolation;
4341 break;
4342 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4343 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4344 "support reflective relay mode\n");
4345 if (fallback)
4346 card->options.isolation = card->options.prev_isolation;
4347 break;
4348 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4349 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4350 "enabled at the adjacent switch port");
4351 if (fallback)
4352 card->options.isolation = card->options.prev_isolation;
4353 break;
4354 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4355 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4356 "at the adjacent switch failed\n");
4357 break;
4358 default:
4359 /* this should never happen */
4360 if (fallback)
4361 card->options.isolation = card->options.prev_isolation;
4362 break;
4363 }
4364 return 0;
4365 }
4366
4367 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4368 enum qeth_ipa_isolation_modes isolation, int fallback)
4369 {
4370 int rc;
4371 struct qeth_cmd_buffer *iob;
4372 struct qeth_ipa_cmd *cmd;
4373 struct qeth_set_access_ctrl *access_ctrl_req;
4374
4375 QETH_CARD_TEXT(card, 4, "setacctl");
4376
4377 QETH_DBF_TEXT_(SETUP, 2, "setacctl");
4378 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
4379
4380 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4381 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4382 sizeof(struct qeth_set_access_ctrl));
4383 if (!iob)
4384 return -ENOMEM;
4385 cmd = __ipa_cmd(iob);
4386 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4387 access_ctrl_req->subcmd_code = isolation;
4388
4389 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4390 &fallback);
4391 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
4392 return rc;
4393 }
4394
4395 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4396 {
4397 int rc = 0;
4398
4399 QETH_CARD_TEXT(card, 4, "setactlo");
4400
4401 if ((card->info.type == QETH_CARD_TYPE_OSD ||
4402 card->info.type == QETH_CARD_TYPE_OSX) &&
4403 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4404 rc = qeth_setadpparms_set_access_ctrl(card,
4405 card->options.isolation, fallback);
4406 if (rc) {
4407 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4408 rc, CARD_DEVID(card));
4409 rc = -EOPNOTSUPP;
4410 }
4411 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4412 card->options.isolation = ISOLATION_MODE_NONE;
4413
4414 dev_err(&card->gdev->dev, "Adapter does not "
4415 "support QDIO data connection isolation\n");
4416 rc = -EOPNOTSUPP;
4417 }
4418 return rc;
4419 }
4420 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
4421
4422 void qeth_tx_timeout(struct net_device *dev)
4423 {
4424 struct qeth_card *card;
4425
4426 card = dev->ml_priv;
4427 QETH_CARD_TEXT(card, 4, "txtimeo");
4428 card->stats.tx_errors++;
4429 qeth_schedule_recovery(card);
4430 }
4431 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4432
4433 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4434 {
4435 struct qeth_card *card = dev->ml_priv;
4436 int rc = 0;
4437
4438 switch (regnum) {
4439 case MII_BMCR: /* Basic mode control register */
4440 rc = BMCR_FULLDPLX;
4441 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4442 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4443 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4444 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4445 rc |= BMCR_SPEED100;
4446 break;
4447 case MII_BMSR: /* Basic mode status register */
4448 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4449 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4450 BMSR_100BASE4;
4451 break;
4452 case MII_PHYSID1: /* PHYS ID 1 */
4453 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4454 dev->dev_addr[2];
4455 rc = (rc >> 5) & 0xFFFF;
4456 break;
4457 case MII_PHYSID2: /* PHYS ID 2 */
4458 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4459 break;
4460 case MII_ADVERTISE: /* Advertisement control reg */
4461 rc = ADVERTISE_ALL;
4462 break;
4463 case MII_LPA: /* Link partner ability reg */
4464 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4465 LPA_100BASE4 | LPA_LPACK;
4466 break;
4467 case MII_EXPANSION: /* Expansion register */
4468 break;
4469 case MII_DCOUNTER: /* disconnect counter */
4470 break;
4471 case MII_FCSCOUNTER: /* false carrier counter */
4472 break;
4473 case MII_NWAYTEST: /* N-way auto-neg test register */
4474 break;
4475 case MII_RERRCOUNTER: /* rx error counter */
4476 rc = card->stats.rx_errors;
4477 break;
4478 case MII_SREVISION: /* silicon revision */
4479 break;
4480 case MII_RESV1: /* reserved 1 */
4481 break;
4482 case MII_LBRERROR: /* loopback, rx, bypass error */
4483 break;
4484 case MII_PHYADDR: /* physical address */
4485 break;
4486 case MII_RESV2: /* reserved 2 */
4487 break;
4488 case MII_TPISTATUS: /* TPI status for 10mbps */
4489 break;
4490 case MII_NCONFIG: /* network interface config */
4491 break;
4492 default:
4493 break;
4494 }
4495 return rc;
4496 }
4497
4498 static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
4499 struct qeth_cmd_buffer *iob, int len,
4500 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
4501 unsigned long),
4502 void *reply_param)
4503 {
4504 u16 s1, s2;
4505
4506 QETH_CARD_TEXT(card, 4, "sendsnmp");
4507
4508 /* adjust PDU length fields in IPA_PDU_HEADER */
4509 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4510 s2 = (u32) len;
4511 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4512 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4513 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4514 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4515 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4516 reply_cb, reply_param);
4517 }
4518
4519 static int qeth_snmp_command_cb(struct qeth_card *card,
4520 struct qeth_reply *reply, unsigned long sdata)
4521 {
4522 struct qeth_ipa_cmd *cmd;
4523 struct qeth_arp_query_info *qinfo;
4524 unsigned char *data;
4525 void *snmp_data;
4526 __u16 data_len;
4527
4528 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4529
4530 cmd = (struct qeth_ipa_cmd *) sdata;
4531 data = (unsigned char *)((char *)cmd - reply->offset);
4532 qinfo = (struct qeth_arp_query_info *) reply->param;
4533
4534 if (cmd->hdr.return_code) {
4535 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4536 return 0;
4537 }
4538 if (cmd->data.setadapterparms.hdr.return_code) {
4539 cmd->hdr.return_code =
4540 cmd->data.setadapterparms.hdr.return_code;
4541 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4542 return 0;
4543 }
4544 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
4545 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4546 snmp_data = &cmd->data.setadapterparms.data.snmp;
4547 data_len -= offsetof(struct qeth_ipa_cmd,
4548 data.setadapterparms.data.snmp);
4549 } else {
4550 snmp_data = &cmd->data.setadapterparms.data.snmp.request;
4551 data_len -= offsetof(struct qeth_ipa_cmd,
4552 data.setadapterparms.data.snmp.request);
4553 }
4554
4555 /* check if there is enough room in userspace */
4556 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4557 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
4558 cmd->hdr.return_code = IPA_RC_ENOMEM;
4559 return 0;
4560 }
4561 QETH_CARD_TEXT_(card, 4, "snore%i",
4562 cmd->data.setadapterparms.hdr.used_total);
4563 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4564 cmd->data.setadapterparms.hdr.seq_no);
4565 /*copy entries to user buffer*/
4566 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4567 qinfo->udata_offset += data_len;
4568
4569 /* check if all replies received ... */
4570 QETH_CARD_TEXT_(card, 4, "srtot%i",
4571 cmd->data.setadapterparms.hdr.used_total);
4572 QETH_CARD_TEXT_(card, 4, "srseq%i",
4573 cmd->data.setadapterparms.hdr.seq_no);
4574 if (cmd->data.setadapterparms.hdr.seq_no <
4575 cmd->data.setadapterparms.hdr.used_total)
4576 return 1;
4577 return 0;
4578 }
4579
4580 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4581 {
4582 struct qeth_cmd_buffer *iob;
4583 struct qeth_ipa_cmd *cmd;
4584 struct qeth_snmp_ureq *ureq;
4585 unsigned int req_len;
4586 struct qeth_arp_query_info qinfo = {0, };
4587 int rc = 0;
4588
4589 QETH_CARD_TEXT(card, 3, "snmpcmd");
4590
4591 if (card->info.guestlan)
4592 return -EOPNOTSUPP;
4593
4594 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4595 IS_LAYER3(card))
4596 return -EOPNOTSUPP;
4597
4598 /* skip 4 bytes (data_len struct member) to get req_len */
4599 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4600 return -EFAULT;
4601 if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
4602 sizeof(struct qeth_ipacmd_hdr) -
4603 sizeof(struct qeth_ipacmd_setadpparms_hdr)))
4604 return -EINVAL;
4605 ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
4606 if (IS_ERR(ureq)) {
4607 QETH_CARD_TEXT(card, 2, "snmpnome");
4608 return PTR_ERR(ureq);
4609 }
4610 qinfo.udata_len = ureq->hdr.data_len;
4611 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4612 if (!qinfo.udata) {
4613 kfree(ureq);
4614 return -ENOMEM;
4615 }
4616 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4617
4618 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4619 QETH_SNMP_SETADP_CMDLENGTH + req_len);
4620 if (!iob) {
4621 rc = -ENOMEM;
4622 goto out;
4623 }
4624 cmd = __ipa_cmd(iob);
4625 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4626 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4627 qeth_snmp_command_cb, (void *)&qinfo);
4628 if (rc)
4629 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4630 CARD_DEVID(card), rc);
4631 else {
4632 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4633 rc = -EFAULT;
4634 }
4635 out:
4636 kfree(ureq);
4637 kfree(qinfo.udata);
4638 return rc;
4639 }
4640
4641 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4642 struct qeth_reply *reply, unsigned long data)
4643 {
4644 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4645 struct qeth_qoat_priv *priv;
4646 char *resdata;
4647 int resdatalen;
4648
4649 QETH_CARD_TEXT(card, 3, "qoatcb");
4650 if (qeth_setadpparms_inspect_rc(cmd))
4651 return 0;
4652
4653 priv = (struct qeth_qoat_priv *)reply->param;
4654 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4655 resdata = (char *)data + 28;
4656
4657 if (resdatalen > (priv->buffer_len - priv->response_len)) {
4658 cmd->hdr.return_code = IPA_RC_FFFF;
4659 return 0;
4660 }
4661
4662 memcpy((priv->buffer + priv->response_len), resdata,
4663 resdatalen);
4664 priv->response_len += resdatalen;
4665
4666 if (cmd->data.setadapterparms.hdr.seq_no <
4667 cmd->data.setadapterparms.hdr.used_total)
4668 return 1;
4669 return 0;
4670 }
4671
4672 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4673 {
4674 int rc = 0;
4675 struct qeth_cmd_buffer *iob;
4676 struct qeth_ipa_cmd *cmd;
4677 struct qeth_query_oat *oat_req;
4678 struct qeth_query_oat_data oat_data;
4679 struct qeth_qoat_priv priv;
4680 void __user *tmp;
4681
4682 QETH_CARD_TEXT(card, 3, "qoatcmd");
4683
4684 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4685 rc = -EOPNOTSUPP;
4686 goto out;
4687 }
4688
4689 if (copy_from_user(&oat_data, udata,
4690 sizeof(struct qeth_query_oat_data))) {
4691 rc = -EFAULT;
4692 goto out;
4693 }
4694
4695 priv.buffer_len = oat_data.buffer_len;
4696 priv.response_len = 0;
4697 priv.buffer = vzalloc(oat_data.buffer_len);
4698 if (!priv.buffer) {
4699 rc = -ENOMEM;
4700 goto out;
4701 }
4702
4703 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4704 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4705 sizeof(struct qeth_query_oat));
4706 if (!iob) {
4707 rc = -ENOMEM;
4708 goto out_free;
4709 }
4710 cmd = __ipa_cmd(iob);
4711 oat_req = &cmd->data.setadapterparms.data.query_oat;
4712 oat_req->subcmd_code = oat_data.command;
4713
4714 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4715 &priv);
4716 if (!rc) {
4717 if (is_compat_task())
4718 tmp = compat_ptr(oat_data.ptr);
4719 else
4720 tmp = (void __user *)(unsigned long)oat_data.ptr;
4721
4722 if (copy_to_user(tmp, priv.buffer,
4723 priv.response_len)) {
4724 rc = -EFAULT;
4725 goto out_free;
4726 }
4727
4728 oat_data.response_len = priv.response_len;
4729
4730 if (copy_to_user(udata, &oat_data,
4731 sizeof(struct qeth_query_oat_data)))
4732 rc = -EFAULT;
4733 } else
4734 if (rc == IPA_RC_FFFF)
4735 rc = -EFAULT;
4736
4737 out_free:
4738 vfree(priv.buffer);
4739 out:
4740 return rc;
4741 }
4742
4743 static int qeth_query_card_info_cb(struct qeth_card *card,
4744 struct qeth_reply *reply, unsigned long data)
4745 {
4746 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4747 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4748 struct qeth_query_card_info *card_info;
4749
4750 QETH_CARD_TEXT(card, 2, "qcrdincb");
4751 if (qeth_setadpparms_inspect_rc(cmd))
4752 return 0;
4753
4754 card_info = &cmd->data.setadapterparms.data.card_info;
4755 carrier_info->card_type = card_info->card_type;
4756 carrier_info->port_mode = card_info->port_mode;
4757 carrier_info->port_speed = card_info->port_speed;
4758 return 0;
4759 }
4760
4761 static int qeth_query_card_info(struct qeth_card *card,
4762 struct carrier_info *carrier_info)
4763 {
4764 struct qeth_cmd_buffer *iob;
4765
4766 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4767 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4768 return -EOPNOTSUPP;
4769 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
4770 sizeof(struct qeth_ipacmd_setadpparms_hdr));
4771 if (!iob)
4772 return -ENOMEM;
4773 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4774 (void *)carrier_info);
4775 }
4776
4777 /**
4778 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4779 * @card: pointer to a qeth_card
4780 *
4781 * Returns
4782 * 0, if a MAC address has been set for the card's netdevice
4783 * a return code, for various error conditions
4784 */
4785 int qeth_vm_request_mac(struct qeth_card *card)
4786 {
4787 struct diag26c_mac_resp *response;
4788 struct diag26c_mac_req *request;
4789 struct ccw_dev_id id;
4790 int rc;
4791
4792 QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
4793
4794 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4795 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4796 if (!request || !response) {
4797 rc = -ENOMEM;
4798 goto out;
4799 }
4800
4801 ccw_device_get_id(CARD_DDEV(card), &id);
4802 request->resp_buf_len = sizeof(*response);
4803 request->resp_version = DIAG26C_VERSION2;
4804 request->op_code = DIAG26C_GET_MAC;
4805 request->devno = id.devno;
4806
4807 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4808 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4809 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4810 if (rc)
4811 goto out;
4812 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4813
4814 if (request->resp_buf_len < sizeof(*response) ||
4815 response->version != request->resp_version) {
4816 rc = -EIO;
4817 QETH_DBF_TEXT(SETUP, 2, "badresp");
4818 QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len,
4819 sizeof(request->resp_buf_len));
4820 } else if (!is_valid_ether_addr(response->mac)) {
4821 rc = -EINVAL;
4822 QETH_DBF_TEXT(SETUP, 2, "badmac");
4823 QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN);
4824 } else {
4825 ether_addr_copy(card->dev->dev_addr, response->mac);
4826 }
4827
4828 out:
4829 kfree(response);
4830 kfree(request);
4831 return rc;
4832 }
4833 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4834
4835 static int qeth_get_qdio_q_format(struct qeth_card *card)
4836 {
4837 if (card->info.type == QETH_CARD_TYPE_IQD)
4838 return QDIO_IQDIO_QFMT;
4839 else
4840 return QDIO_QETH_QFMT;
4841 }
4842
4843 static void qeth_determine_capabilities(struct qeth_card *card)
4844 {
4845 int rc;
4846 int length;
4847 char *prcd;
4848 struct ccw_device *ddev;
4849 int ddev_offline = 0;
4850
4851 QETH_DBF_TEXT(SETUP, 2, "detcapab");
4852 ddev = CARD_DDEV(card);
4853 if (!ddev->online) {
4854 ddev_offline = 1;
4855 rc = ccw_device_set_online(ddev);
4856 if (rc) {
4857 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4858 goto out;
4859 }
4860 }
4861
4862 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4863 if (rc) {
4864 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4865 CARD_DEVID(card), rc);
4866 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4867 goto out_offline;
4868 }
4869 qeth_configure_unitaddr(card, prcd);
4870 if (ddev_offline)
4871 qeth_configure_blkt_default(card, prcd);
4872 kfree(prcd);
4873
4874 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4875 if (rc)
4876 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4877
4878 QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
4879 QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1);
4880 QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2);
4881 QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3);
4882 QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
4883 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4884 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4885 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4886 dev_info(&card->gdev->dev,
4887 "Completion Queueing supported\n");
4888 } else {
4889 card->options.cq = QETH_CQ_NOTAVAILABLE;
4890 }
4891
4892
4893 out_offline:
4894 if (ddev_offline == 1)
4895 ccw_device_set_offline(ddev);
4896 out:
4897 return;
4898 }
4899
4900 static void qeth_qdio_establish_cq(struct qeth_card *card,
4901 struct qdio_buffer **in_sbal_ptrs,
4902 void (**queue_start_poll)
4903 (struct ccw_device *, int,
4904 unsigned long))
4905 {
4906 int i;
4907
4908 if (card->options.cq == QETH_CQ_ENABLED) {
4909 int offset = QDIO_MAX_BUFFERS_PER_Q *
4910 (card->qdio.no_in_queues - 1);
4911 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4912 in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4913 virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4914 }
4915
4916 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4917 }
4918 }
4919
4920 static int qeth_qdio_establish(struct qeth_card *card)
4921 {
4922 struct qdio_initialize init_data;
4923 char *qib_param_field;
4924 struct qdio_buffer **in_sbal_ptrs;
4925 void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4926 struct qdio_buffer **out_sbal_ptrs;
4927 int i, j, k;
4928 int rc = 0;
4929
4930 QETH_DBF_TEXT(SETUP, 2, "qdioest");
4931
4932 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
4933 GFP_KERNEL);
4934 if (!qib_param_field) {
4935 rc = -ENOMEM;
4936 goto out_free_nothing;
4937 }
4938
4939 qeth_create_qib_param_field(card, qib_param_field);
4940 qeth_create_qib_param_field_blkt(card, qib_param_field);
4941
4942 in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
4943 sizeof(void *),
4944 GFP_KERNEL);
4945 if (!in_sbal_ptrs) {
4946 rc = -ENOMEM;
4947 goto out_free_qib_param;
4948 }
4949 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4950 in_sbal_ptrs[i] = (struct qdio_buffer *)
4951 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4952 }
4953
4954 queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4955 GFP_KERNEL);
4956 if (!queue_start_poll) {
4957 rc = -ENOMEM;
4958 goto out_free_in_sbals;
4959 }
4960 for (i = 0; i < card->qdio.no_in_queues; ++i)
4961 queue_start_poll[i] = qeth_qdio_start_poll;
4962
4963 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4964
4965 out_sbal_ptrs =
4966 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
4967 sizeof(void *),
4968 GFP_KERNEL);
4969 if (!out_sbal_ptrs) {
4970 rc = -ENOMEM;
4971 goto out_free_queue_start_poll;
4972 }
4973 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4974 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4975 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4976 card->qdio.out_qs[i]->bufs[j]->buffer);
4977 }
4978
4979 memset(&init_data, 0, sizeof(struct qdio_initialize));
4980 init_data.cdev = CARD_DDEV(card);
4981 init_data.q_format = qeth_get_qdio_q_format(card);
4982 init_data.qib_param_field_format = 0;
4983 init_data.qib_param_field = qib_param_field;
4984 init_data.no_input_qs = card->qdio.no_in_queues;
4985 init_data.no_output_qs = card->qdio.no_out_queues;
4986 init_data.input_handler = qeth_qdio_input_handler;
4987 init_data.output_handler = qeth_qdio_output_handler;
4988 init_data.queue_start_poll_array = queue_start_poll;
4989 init_data.int_parm = (unsigned long) card;
4990 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
4991 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
4992 init_data.output_sbal_state_array = card->qdio.out_bufstates;
4993 init_data.scan_threshold =
4994 (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
4995
4996 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
4997 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
4998 rc = qdio_allocate(&init_data);
4999 if (rc) {
5000 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5001 goto out;
5002 }
5003 rc = qdio_establish(&init_data);
5004 if (rc) {
5005 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5006 qdio_free(CARD_DDEV(card));
5007 }
5008 }
5009
5010 switch (card->options.cq) {
5011 case QETH_CQ_ENABLED:
5012 dev_info(&card->gdev->dev, "Completion Queue support enabled");
5013 break;
5014 case QETH_CQ_DISABLED:
5015 dev_info(&card->gdev->dev, "Completion Queue support disabled");
5016 break;
5017 default:
5018 break;
5019 }
5020 out:
5021 kfree(out_sbal_ptrs);
5022 out_free_queue_start_poll:
5023 kfree(queue_start_poll);
5024 out_free_in_sbals:
5025 kfree(in_sbal_ptrs);
5026 out_free_qib_param:
5027 kfree(qib_param_field);
5028 out_free_nothing:
5029 return rc;
5030 }
5031
5032 static void qeth_core_free_card(struct qeth_card *card)
5033 {
5034 QETH_DBF_TEXT(SETUP, 2, "freecrd");
5035 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
5036 qeth_clean_channel(&card->read);
5037 qeth_clean_channel(&card->write);
5038 qeth_clean_channel(&card->data);
5039 destroy_workqueue(card->event_wq);
5040 qeth_free_qdio_buffers(card);
5041 unregister_service_level(&card->qeth_service_level);
5042 dev_set_drvdata(&card->gdev->dev, NULL);
5043 kfree(card);
5044 }
5045
5046 void qeth_trace_features(struct qeth_card *card)
5047 {
5048 QETH_CARD_TEXT(card, 2, "features");
5049 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5050 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5051 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5052 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5053 sizeof(card->info.diagass_support));
5054 }
5055 EXPORT_SYMBOL_GPL(qeth_trace_features);
5056
5057 static struct ccw_device_id qeth_ids[] = {
5058 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5059 .driver_info = QETH_CARD_TYPE_OSD},
5060 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5061 .driver_info = QETH_CARD_TYPE_IQD},
5062 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
5063 .driver_info = QETH_CARD_TYPE_OSN},
5064 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5065 .driver_info = QETH_CARD_TYPE_OSM},
5066 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5067 .driver_info = QETH_CARD_TYPE_OSX},
5068 {},
5069 };
5070 MODULE_DEVICE_TABLE(ccw, qeth_ids);
5071
5072 static struct ccw_driver qeth_ccw_driver = {
5073 .driver = {
5074 .owner = THIS_MODULE,
5075 .name = "qeth",
5076 },
5077 .ids = qeth_ids,
5078 .probe = ccwgroup_probe_ccwdev,
5079 .remove = ccwgroup_remove_ccwdev,
5080 };
5081
5082 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5083 {
5084 int retries = 3;
5085 int rc;
5086
5087 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
5088 atomic_set(&card->force_alloc_skb, 0);
5089 qeth_update_from_chp_desc(card);
5090 retry:
5091 if (retries < 3)
5092 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5093 CARD_DEVID(card));
5094 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
5095 ccw_device_set_offline(CARD_DDEV(card));
5096 ccw_device_set_offline(CARD_WDEV(card));
5097 ccw_device_set_offline(CARD_RDEV(card));
5098 qdio_free(CARD_DDEV(card));
5099 rc = ccw_device_set_online(CARD_RDEV(card));
5100 if (rc)
5101 goto retriable;
5102 rc = ccw_device_set_online(CARD_WDEV(card));
5103 if (rc)
5104 goto retriable;
5105 rc = ccw_device_set_online(CARD_DDEV(card));
5106 if (rc)
5107 goto retriable;
5108 retriable:
5109 if (rc == -ERESTARTSYS) {
5110 QETH_DBF_TEXT(SETUP, 2, "break1");
5111 return rc;
5112 } else if (rc) {
5113 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
5114 if (--retries < 0)
5115 goto out;
5116 else
5117 goto retry;
5118 }
5119 qeth_determine_capabilities(card);
5120 qeth_init_tokens(card);
5121 qeth_init_func_level(card);
5122 rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb);
5123 if (rc == -ERESTARTSYS) {
5124 QETH_DBF_TEXT(SETUP, 2, "break2");
5125 return rc;
5126 } else if (rc) {
5127 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
5128 if (--retries < 0)
5129 goto out;
5130 else
5131 goto retry;
5132 }
5133 rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb);
5134 if (rc == -ERESTARTSYS) {
5135 QETH_DBF_TEXT(SETUP, 2, "break3");
5136 return rc;
5137 } else if (rc) {
5138 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
5139 if (--retries < 0)
5140 goto out;
5141 else
5142 goto retry;
5143 }
5144 card->read_or_write_problem = 0;
5145 rc = qeth_mpc_initialize(card);
5146 if (rc) {
5147 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
5148 goto out;
5149 }
5150
5151 rc = qeth_send_startlan(card);
5152 if (rc) {
5153 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
5154 if (rc == IPA_RC_LAN_OFFLINE) {
5155 dev_warn(&card->gdev->dev,
5156 "The LAN is offline\n");
5157 *carrier_ok = false;
5158 } else {
5159 rc = -ENODEV;
5160 goto out;
5161 }
5162 } else {
5163 *carrier_ok = true;
5164 }
5165
5166 if (qeth_netdev_is_registered(card->dev)) {
5167 if (*carrier_ok)
5168 netif_carrier_on(card->dev);
5169 else
5170 netif_carrier_off(card->dev);
5171 }
5172
5173 card->options.ipa4.supported_funcs = 0;
5174 card->options.ipa6.supported_funcs = 0;
5175 card->options.adp.supported_funcs = 0;
5176 card->options.sbp.supported_funcs = 0;
5177 card->info.diagass_support = 0;
5178 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5179 if (rc == -ENOMEM)
5180 goto out;
5181 if (qeth_is_supported(card, IPA_IPV6)) {
5182 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5183 if (rc == -ENOMEM)
5184 goto out;
5185 }
5186 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5187 rc = qeth_query_setadapterparms(card);
5188 if (rc < 0) {
5189 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
5190 goto out;
5191 }
5192 }
5193 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5194 rc = qeth_query_setdiagass(card);
5195 if (rc < 0) {
5196 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
5197 goto out;
5198 }
5199 }
5200 return 0;
5201 out:
5202 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5203 "an error on the device\n");
5204 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5205 CARD_DEVID(card), rc);
5206 return rc;
5207 }
5208 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
5209
5210 static void qeth_create_skb_frag(struct qdio_buffer_element *element,
5211 struct sk_buff *skb, int offset, int data_len)
5212 {
5213 struct page *page = virt_to_page(element->addr);
5214 unsigned int next_frag;
5215
5216 /* first fill the linear space */
5217 if (!skb->len) {
5218 unsigned int linear = min(data_len, skb_tailroom(skb));
5219
5220 skb_put_data(skb, element->addr + offset, linear);
5221 data_len -= linear;
5222 if (!data_len)
5223 return;
5224 offset += linear;
5225 /* fall through to add page frag for remaining data */
5226 }
5227
5228 next_frag = skb_shinfo(skb)->nr_frags;
5229 get_page(page);
5230 skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
5231 }
5232
5233 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5234 {
5235 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5236 }
5237
5238 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
5239 struct qeth_qdio_buffer *qethbuffer,
5240 struct qdio_buffer_element **__element, int *__offset,
5241 struct qeth_hdr **hdr)
5242 {
5243 struct qdio_buffer_element *element = *__element;
5244 struct qdio_buffer *buffer = qethbuffer->buffer;
5245 int offset = *__offset;
5246 struct sk_buff *skb;
5247 int skb_len = 0;
5248 void *data_ptr;
5249 int data_len;
5250 int headroom = 0;
5251 int use_rx_sg = 0;
5252
5253 /* qeth_hdr must not cross element boundaries */
5254 while (element->length < offset + sizeof(struct qeth_hdr)) {
5255 if (qeth_is_last_sbale(element))
5256 return NULL;
5257 element++;
5258 offset = 0;
5259 }
5260 *hdr = element->addr + offset;
5261
5262 offset += sizeof(struct qeth_hdr);
5263 switch ((*hdr)->hdr.l2.id) {
5264 case QETH_HEADER_TYPE_LAYER2:
5265 skb_len = (*hdr)->hdr.l2.pkt_length;
5266 break;
5267 case QETH_HEADER_TYPE_LAYER3:
5268 skb_len = (*hdr)->hdr.l3.length;
5269 headroom = ETH_HLEN;
5270 break;
5271 case QETH_HEADER_TYPE_OSN:
5272 skb_len = (*hdr)->hdr.osn.pdu_length;
5273 headroom = sizeof(struct qeth_hdr);
5274 break;
5275 default:
5276 break;
5277 }
5278
5279 if (!skb_len)
5280 return NULL;
5281
5282 if (((skb_len >= card->options.rx_sg_cb) &&
5283 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
5284 (!atomic_read(&card->force_alloc_skb))) ||
5285 (card->options.cq == QETH_CQ_ENABLED))
5286 use_rx_sg = 1;
5287
5288 if (use_rx_sg && qethbuffer->rx_skb) {
5289 /* QETH_CQ_ENABLED only: */
5290 skb = qethbuffer->rx_skb;
5291 qethbuffer->rx_skb = NULL;
5292 } else {
5293 unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
5294
5295 skb = napi_alloc_skb(&card->napi, linear + headroom);
5296 }
5297 if (!skb)
5298 goto no_mem;
5299 if (headroom)
5300 skb_reserve(skb, headroom);
5301
5302 data_ptr = element->addr + offset;
5303 while (skb_len) {
5304 data_len = min(skb_len, (int)(element->length - offset));
5305 if (data_len) {
5306 if (use_rx_sg)
5307 qeth_create_skb_frag(element, skb, offset,
5308 data_len);
5309 else
5310 skb_put_data(skb, data_ptr, data_len);
5311 }
5312 skb_len -= data_len;
5313 if (skb_len) {
5314 if (qeth_is_last_sbale(element)) {
5315 QETH_CARD_TEXT(card, 4, "unexeob");
5316 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5317 dev_kfree_skb_any(skb);
5318 card->stats.rx_errors++;
5319 return NULL;
5320 }
5321 element++;
5322 offset = 0;
5323 data_ptr = element->addr;
5324 } else {
5325 offset += data_len;
5326 }
5327 }
5328 *__element = element;
5329 *__offset = offset;
5330 if (use_rx_sg && card->options.performance_stats) {
5331 card->perf_stats.sg_skbs_rx++;
5332 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
5333 }
5334 return skb;
5335 no_mem:
5336 if (net_ratelimit()) {
5337 QETH_CARD_TEXT(card, 2, "noskbmem");
5338 }
5339 card->stats.rx_dropped++;
5340 return NULL;
5341 }
5342 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
5343
5344 int qeth_poll(struct napi_struct *napi, int budget)
5345 {
5346 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5347 int work_done = 0;
5348 struct qeth_qdio_buffer *buffer;
5349 int done;
5350 int new_budget = budget;
5351
5352 if (card->options.performance_stats) {
5353 card->perf_stats.inbound_cnt++;
5354 card->perf_stats.inbound_start_time = qeth_get_micros();
5355 }
5356
5357 while (1) {
5358 if (!card->rx.b_count) {
5359 card->rx.qdio_err = 0;
5360 card->rx.b_count = qdio_get_next_buffers(
5361 card->data.ccwdev, 0, &card->rx.b_index,
5362 &card->rx.qdio_err);
5363 if (card->rx.b_count <= 0) {
5364 card->rx.b_count = 0;
5365 break;
5366 }
5367 card->rx.b_element =
5368 &card->qdio.in_q->bufs[card->rx.b_index]
5369 .buffer->element[0];
5370 card->rx.e_offset = 0;
5371 }
5372
5373 while (card->rx.b_count) {
5374 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5375 if (!(card->rx.qdio_err &&
5376 qeth_check_qdio_errors(card, buffer->buffer,
5377 card->rx.qdio_err, "qinerr")))
5378 work_done +=
5379 card->discipline->process_rx_buffer(
5380 card, new_budget, &done);
5381 else
5382 done = 1;
5383
5384 if (done) {
5385 if (card->options.performance_stats)
5386 card->perf_stats.bufs_rec++;
5387 qeth_put_buffer_pool_entry(card,
5388 buffer->pool_entry);
5389 qeth_queue_input_buffer(card, card->rx.b_index);
5390 card->rx.b_count--;
5391 if (card->rx.b_count) {
5392 card->rx.b_index =
5393 (card->rx.b_index + 1) %
5394 QDIO_MAX_BUFFERS_PER_Q;
5395 card->rx.b_element =
5396 &card->qdio.in_q
5397 ->bufs[card->rx.b_index]
5398 .buffer->element[0];
5399 card->rx.e_offset = 0;
5400 }
5401 }
5402
5403 if (work_done >= budget)
5404 goto out;
5405 else
5406 new_budget = budget - work_done;
5407 }
5408 }
5409
5410 napi_complete_done(napi, work_done);
5411 if (qdio_start_irq(card->data.ccwdev, 0))
5412 napi_schedule(&card->napi);
5413 out:
5414 if (card->options.performance_stats)
5415 card->perf_stats.inbound_time += qeth_get_micros() -
5416 card->perf_stats.inbound_start_time;
5417 return work_done;
5418 }
5419 EXPORT_SYMBOL_GPL(qeth_poll);
5420
5421 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5422 {
5423 if (!cmd->hdr.return_code)
5424 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5425 return cmd->hdr.return_code;
5426 }
5427
5428 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5429 struct qeth_reply *reply,
5430 unsigned long data)
5431 {
5432 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5433 struct qeth_ipa_caps *caps = reply->param;
5434
5435 if (qeth_setassparms_inspect_rc(cmd))
5436 return 0;
5437
5438 caps->supported = cmd->data.setassparms.data.caps.supported;
5439 caps->enabled = cmd->data.setassparms.data.caps.enabled;
5440 return 0;
5441 }
5442
5443 int qeth_setassparms_cb(struct qeth_card *card,
5444 struct qeth_reply *reply, unsigned long data)
5445 {
5446 struct qeth_ipa_cmd *cmd;
5447
5448 QETH_CARD_TEXT(card, 4, "defadpcb");
5449
5450 cmd = (struct qeth_ipa_cmd *) data;
5451 if (cmd->hdr.return_code == 0) {
5452 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5453 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5454 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5455 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5456 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5457 }
5458 return 0;
5459 }
5460 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5461
5462 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5463 enum qeth_ipa_funcs ipa_func,
5464 __u16 cmd_code, __u16 len,
5465 enum qeth_prot_versions prot)
5466 {
5467 struct qeth_cmd_buffer *iob;
5468 struct qeth_ipa_cmd *cmd;
5469
5470 QETH_CARD_TEXT(card, 4, "getasscm");
5471 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
5472
5473 if (iob) {
5474 cmd = __ipa_cmd(iob);
5475 cmd->data.setassparms.hdr.assist_no = ipa_func;
5476 cmd->data.setassparms.hdr.length = 8 + len;
5477 cmd->data.setassparms.hdr.command_code = cmd_code;
5478 }
5479
5480 return iob;
5481 }
5482 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5483
5484 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5485 enum qeth_ipa_funcs ipa_func,
5486 u16 cmd_code, long data,
5487 enum qeth_prot_versions prot)
5488 {
5489 int length = 0;
5490 struct qeth_cmd_buffer *iob;
5491
5492 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5493 if (data)
5494 length = sizeof(__u32);
5495 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
5496 if (!iob)
5497 return -ENOMEM;
5498
5499 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
5500 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
5501 }
5502 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
5503
5504 static void qeth_unregister_dbf_views(void)
5505 {
5506 int x;
5507 for (x = 0; x < QETH_DBF_INFOS; x++) {
5508 debug_unregister(qeth_dbf[x].id);
5509 qeth_dbf[x].id = NULL;
5510 }
5511 }
5512
5513 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
5514 {
5515 char dbf_txt_buf[32];
5516 va_list args;
5517
5518 if (!debug_level_enabled(id, level))
5519 return;
5520 va_start(args, fmt);
5521 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
5522 va_end(args);
5523 debug_text_event(id, level, dbf_txt_buf);
5524 }
5525 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
5526
5527 static int qeth_register_dbf_views(void)
5528 {
5529 int ret;
5530 int x;
5531
5532 for (x = 0; x < QETH_DBF_INFOS; x++) {
5533 /* register the areas */
5534 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
5535 qeth_dbf[x].pages,
5536 qeth_dbf[x].areas,
5537 qeth_dbf[x].len);
5538 if (qeth_dbf[x].id == NULL) {
5539 qeth_unregister_dbf_views();
5540 return -ENOMEM;
5541 }
5542
5543 /* register a view */
5544 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
5545 if (ret) {
5546 qeth_unregister_dbf_views();
5547 return ret;
5548 }
5549
5550 /* set a passing level */
5551 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
5552 }
5553
5554 return 0;
5555 }
5556
5557 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
5558
5559 int qeth_core_load_discipline(struct qeth_card *card,
5560 enum qeth_discipline_id discipline)
5561 {
5562 mutex_lock(&qeth_mod_mutex);
5563 switch (discipline) {
5564 case QETH_DISCIPLINE_LAYER3:
5565 card->discipline = try_then_request_module(
5566 symbol_get(qeth_l3_discipline), "qeth_l3");
5567 break;
5568 case QETH_DISCIPLINE_LAYER2:
5569 card->discipline = try_then_request_module(
5570 symbol_get(qeth_l2_discipline), "qeth_l2");
5571 break;
5572 default:
5573 break;
5574 }
5575 mutex_unlock(&qeth_mod_mutex);
5576
5577 if (!card->discipline) {
5578 dev_err(&card->gdev->dev, "There is no kernel module to "
5579 "support discipline %d\n", discipline);
5580 return -EINVAL;
5581 }
5582
5583 card->options.layer = discipline;
5584 return 0;
5585 }
5586
5587 void qeth_core_free_discipline(struct qeth_card *card)
5588 {
5589 if (IS_LAYER2(card))
5590 symbol_put(qeth_l2_discipline);
5591 else
5592 symbol_put(qeth_l3_discipline);
5593 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
5594 card->discipline = NULL;
5595 }
5596
5597 const struct device_type qeth_generic_devtype = {
5598 .name = "qeth_generic",
5599 .groups = qeth_generic_attr_groups,
5600 };
5601 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5602
5603 static const struct device_type qeth_osn_devtype = {
5604 .name = "qeth_osn",
5605 .groups = qeth_osn_attr_groups,
5606 };
5607
5608 #define DBF_NAME_LEN 20
5609
5610 struct qeth_dbf_entry {
5611 char dbf_name[DBF_NAME_LEN];
5612 debug_info_t *dbf_info;
5613 struct list_head dbf_list;
5614 };
5615
5616 static LIST_HEAD(qeth_dbf_list);
5617 static DEFINE_MUTEX(qeth_dbf_list_mutex);
5618
5619 static debug_info_t *qeth_get_dbf_entry(char *name)
5620 {
5621 struct qeth_dbf_entry *entry;
5622 debug_info_t *rc = NULL;
5623
5624 mutex_lock(&qeth_dbf_list_mutex);
5625 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
5626 if (strcmp(entry->dbf_name, name) == 0) {
5627 rc = entry->dbf_info;
5628 break;
5629 }
5630 }
5631 mutex_unlock(&qeth_dbf_list_mutex);
5632 return rc;
5633 }
5634
5635 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
5636 {
5637 struct qeth_dbf_entry *new_entry;
5638
5639 card->debug = debug_register(name, 2, 1, 8);
5640 if (!card->debug) {
5641 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
5642 goto err;
5643 }
5644 if (debug_register_view(card->debug, &debug_hex_ascii_view))
5645 goto err_dbg;
5646 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
5647 if (!new_entry)
5648 goto err_dbg;
5649 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
5650 new_entry->dbf_info = card->debug;
5651 mutex_lock(&qeth_dbf_list_mutex);
5652 list_add(&new_entry->dbf_list, &qeth_dbf_list);
5653 mutex_unlock(&qeth_dbf_list_mutex);
5654
5655 return 0;
5656
5657 err_dbg:
5658 debug_unregister(card->debug);
5659 err:
5660 return -ENOMEM;
5661 }
5662
5663 static void qeth_clear_dbf_list(void)
5664 {
5665 struct qeth_dbf_entry *entry, *tmp;
5666
5667 mutex_lock(&qeth_dbf_list_mutex);
5668 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
5669 list_del(&entry->dbf_list);
5670 debug_unregister(entry->dbf_info);
5671 kfree(entry);
5672 }
5673 mutex_unlock(&qeth_dbf_list_mutex);
5674 }
5675
5676 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5677 {
5678 struct net_device *dev;
5679
5680 switch (card->info.type) {
5681 case QETH_CARD_TYPE_IQD:
5682 dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
5683 break;
5684 case QETH_CARD_TYPE_OSN:
5685 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
5686 break;
5687 default:
5688 dev = alloc_etherdev(0);
5689 }
5690
5691 if (!dev)
5692 return NULL;
5693
5694 dev->ml_priv = card;
5695 dev->watchdog_timeo = QETH_TX_TIMEOUT;
5696 dev->min_mtu = IS_OSN(card) ? 64 : 576;
5697 /* initialized when device first goes online: */
5698 dev->max_mtu = 0;
5699 dev->mtu = 0;
5700 SET_NETDEV_DEV(dev, &card->gdev->dev);
5701 netif_carrier_off(dev);
5702
5703 if (!IS_OSN(card)) {
5704 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5705 dev->hw_features |= NETIF_F_SG;
5706 dev->vlan_features |= NETIF_F_SG;
5707 if (IS_IQD(card))
5708 dev->features |= NETIF_F_SG;
5709 }
5710
5711 return dev;
5712 }
5713
5714 struct net_device *qeth_clone_netdev(struct net_device *orig)
5715 {
5716 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
5717
5718 if (!clone)
5719 return NULL;
5720
5721 clone->dev_port = orig->dev_port;
5722 return clone;
5723 }
5724
5725 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5726 {
5727 struct qeth_card *card;
5728 struct device *dev;
5729 int rc;
5730 enum qeth_discipline_id enforced_disc;
5731 char dbf_name[DBF_NAME_LEN];
5732
5733 QETH_DBF_TEXT(SETUP, 2, "probedev");
5734
5735 dev = &gdev->dev;
5736 if (!get_device(dev))
5737 return -ENODEV;
5738
5739 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
5740
5741 card = qeth_alloc_card(gdev);
5742 if (!card) {
5743 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
5744 rc = -ENOMEM;
5745 goto err_dev;
5746 }
5747
5748 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
5749 dev_name(&gdev->dev));
5750 card->debug = qeth_get_dbf_entry(dbf_name);
5751 if (!card->debug) {
5752 rc = qeth_add_dbf_entry(card, dbf_name);
5753 if (rc)
5754 goto err_card;
5755 }
5756
5757 qeth_setup_card(card);
5758 qeth_update_from_chp_desc(card);
5759
5760 card->dev = qeth_alloc_netdev(card);
5761 if (!card->dev) {
5762 rc = -ENOMEM;
5763 goto err_card;
5764 }
5765
5766 qeth_determine_capabilities(card);
5767 enforced_disc = qeth_enforce_discipline(card);
5768 switch (enforced_disc) {
5769 case QETH_DISCIPLINE_UNDETERMINED:
5770 gdev->dev.type = &qeth_generic_devtype;
5771 break;
5772 default:
5773 card->info.layer_enforced = true;
5774 rc = qeth_core_load_discipline(card, enforced_disc);
5775 if (rc)
5776 goto err_load;
5777
5778 gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
5779 ? card->discipline->devtype
5780 : &qeth_osn_devtype;
5781 rc = card->discipline->setup(card->gdev);
5782 if (rc)
5783 goto err_disc;
5784 break;
5785 }
5786
5787 return 0;
5788
5789 err_disc:
5790 qeth_core_free_discipline(card);
5791 err_load:
5792 free_netdev(card->dev);
5793 err_card:
5794 qeth_core_free_card(card);
5795 err_dev:
5796 put_device(dev);
5797 return rc;
5798 }
5799
5800 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
5801 {
5802 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5803
5804 QETH_DBF_TEXT(SETUP, 2, "removedv");
5805
5806 if (card->discipline) {
5807 card->discipline->remove(gdev);
5808 qeth_core_free_discipline(card);
5809 }
5810
5811 free_netdev(card->dev);
5812 qeth_core_free_card(card);
5813 put_device(&gdev->dev);
5814 }
5815
5816 static int qeth_core_set_online(struct ccwgroup_device *gdev)
5817 {
5818 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5819 int rc = 0;
5820 enum qeth_discipline_id def_discipline;
5821
5822 if (!card->discipline) {
5823 if (card->info.type == QETH_CARD_TYPE_IQD)
5824 def_discipline = QETH_DISCIPLINE_LAYER3;
5825 else
5826 def_discipline = QETH_DISCIPLINE_LAYER2;
5827 rc = qeth_core_load_discipline(card, def_discipline);
5828 if (rc)
5829 goto err;
5830 rc = card->discipline->setup(card->gdev);
5831 if (rc) {
5832 qeth_core_free_discipline(card);
5833 goto err;
5834 }
5835 }
5836 rc = card->discipline->set_online(gdev);
5837 err:
5838 return rc;
5839 }
5840
5841 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
5842 {
5843 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5844 return card->discipline->set_offline(gdev);
5845 }
5846
5847 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
5848 {
5849 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5850 qeth_set_allowed_threads(card, 0, 1);
5851 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
5852 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5853 qeth_qdio_clear_card(card, 0);
5854 qeth_clear_qdio_buffers(card);
5855 qdio_free(CARD_DDEV(card));
5856 }
5857
5858 static int qeth_core_freeze(struct ccwgroup_device *gdev)
5859 {
5860 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5861 if (card->discipline && card->discipline->freeze)
5862 return card->discipline->freeze(gdev);
5863 return 0;
5864 }
5865
5866 static int qeth_core_thaw(struct ccwgroup_device *gdev)
5867 {
5868 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5869 if (card->discipline && card->discipline->thaw)
5870 return card->discipline->thaw(gdev);
5871 return 0;
5872 }
5873
5874 static int qeth_core_restore(struct ccwgroup_device *gdev)
5875 {
5876 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5877 if (card->discipline && card->discipline->restore)
5878 return card->discipline->restore(gdev);
5879 return 0;
5880 }
5881
5882 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
5883 size_t count)
5884 {
5885 int err;
5886
5887 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
5888 buf);
5889
5890 return err ? err : count;
5891 }
5892 static DRIVER_ATTR_WO(group);
5893
5894 static struct attribute *qeth_drv_attrs[] = {
5895 &driver_attr_group.attr,
5896 NULL,
5897 };
5898 static struct attribute_group qeth_drv_attr_group = {
5899 .attrs = qeth_drv_attrs,
5900 };
5901 static const struct attribute_group *qeth_drv_attr_groups[] = {
5902 &qeth_drv_attr_group,
5903 NULL,
5904 };
5905
5906 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5907 .driver = {
5908 .groups = qeth_drv_attr_groups,
5909 .owner = THIS_MODULE,
5910 .name = "qeth",
5911 },
5912 .ccw_driver = &qeth_ccw_driver,
5913 .setup = qeth_core_probe_device,
5914 .remove = qeth_core_remove_device,
5915 .set_online = qeth_core_set_online,
5916 .set_offline = qeth_core_set_offline,
5917 .shutdown = qeth_core_shutdown,
5918 .prepare = NULL,
5919 .complete = NULL,
5920 .freeze = qeth_core_freeze,
5921 .thaw = qeth_core_thaw,
5922 .restore = qeth_core_restore,
5923 };
5924
5925 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
5926 {
5927 struct ccwgroup_device *gdev;
5928 struct qeth_card *card;
5929
5930 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
5931 if (!gdev)
5932 return NULL;
5933
5934 card = dev_get_drvdata(&gdev->dev);
5935 put_device(&gdev->dev);
5936 return card;
5937 }
5938 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
5939
5940 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5941 {
5942 struct qeth_card *card = dev->ml_priv;
5943 struct mii_ioctl_data *mii_data;
5944 int rc = 0;
5945
5946 if (!card)
5947 return -ENODEV;
5948
5949 if (!qeth_card_hw_is_reachable(card))
5950 return -ENODEV;
5951
5952 if (card->info.type == QETH_CARD_TYPE_OSN)
5953 return -EPERM;
5954
5955 switch (cmd) {
5956 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5957 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5958 break;
5959 case SIOC_QETH_GET_CARD_TYPE:
5960 if ((card->info.type == QETH_CARD_TYPE_OSD ||
5961 card->info.type == QETH_CARD_TYPE_OSM ||
5962 card->info.type == QETH_CARD_TYPE_OSX) &&
5963 !card->info.guestlan)
5964 return 1;
5965 else
5966 return 0;
5967 case SIOCGMIIPHY:
5968 mii_data = if_mii(rq);
5969 mii_data->phy_id = 0;
5970 break;
5971 case SIOCGMIIREG:
5972 mii_data = if_mii(rq);
5973 if (mii_data->phy_id != 0)
5974 rc = -EINVAL;
5975 else
5976 mii_data->val_out = qeth_mdio_read(dev,
5977 mii_data->phy_id, mii_data->reg_num);
5978 break;
5979 case SIOC_QETH_QUERY_OAT:
5980 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
5981 break;
5982 default:
5983 if (card->discipline->do_ioctl)
5984 rc = card->discipline->do_ioctl(dev, rq, cmd);
5985 else
5986 rc = -EOPNOTSUPP;
5987 }
5988 if (rc)
5989 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
5990 return rc;
5991 }
5992 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
5993
5994 static struct {
5995 const char str[ETH_GSTRING_LEN];
5996 } qeth_ethtool_stats_keys[] = {
5997 /* 0 */{"rx skbs"},
5998 {"rx buffers"},
5999 {"tx skbs"},
6000 {"tx buffers"},
6001 {"tx skbs no packing"},
6002 {"tx buffers no packing"},
6003 {"tx skbs packing"},
6004 {"tx buffers packing"},
6005 {"tx sg skbs"},
6006 {"tx buffer elements"},
6007 /* 10 */{"rx sg skbs"},
6008 {"rx sg frags"},
6009 {"rx sg page allocs"},
6010 {"tx large kbytes"},
6011 {"tx large count"},
6012 {"tx pk state ch n->p"},
6013 {"tx pk state ch p->n"},
6014 {"tx pk watermark low"},
6015 {"tx pk watermark high"},
6016 {"queue 0 buffer usage"},
6017 /* 20 */{"queue 1 buffer usage"},
6018 {"queue 2 buffer usage"},
6019 {"queue 3 buffer usage"},
6020 {"rx poll time"},
6021 {"rx poll count"},
6022 {"rx do_QDIO time"},
6023 {"rx do_QDIO count"},
6024 {"tx handler time"},
6025 {"tx handler count"},
6026 {"tx time"},
6027 /* 30 */{"tx count"},
6028 {"tx do_QDIO time"},
6029 {"tx do_QDIO count"},
6030 {"tx csum"},
6031 {"tx lin"},
6032 {"tx linfail"},
6033 {"cq handler count"},
6034 {"cq handler time"},
6035 {"rx csum"}
6036 };
6037
6038 int qeth_core_get_sset_count(struct net_device *dev, int stringset)
6039 {
6040 switch (stringset) {
6041 case ETH_SS_STATS:
6042 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
6043 default:
6044 return -EINVAL;
6045 }
6046 }
6047 EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
6048
6049 void qeth_core_get_ethtool_stats(struct net_device *dev,
6050 struct ethtool_stats *stats, u64 *data)
6051 {
6052 struct qeth_card *card = dev->ml_priv;
6053 data[0] = card->stats.rx_packets -
6054 card->perf_stats.initial_rx_packets;
6055 data[1] = card->perf_stats.bufs_rec;
6056 data[2] = card->stats.tx_packets -
6057 card->perf_stats.initial_tx_packets;
6058 data[3] = card->perf_stats.bufs_sent;
6059 data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
6060 - card->perf_stats.skbs_sent_pack;
6061 data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
6062 data[6] = card->perf_stats.skbs_sent_pack;
6063 data[7] = card->perf_stats.bufs_sent_pack;
6064 data[8] = card->perf_stats.sg_skbs_sent;
6065 data[9] = card->perf_stats.buf_elements_sent;
6066 data[10] = card->perf_stats.sg_skbs_rx;
6067 data[11] = card->perf_stats.sg_frags_rx;
6068 data[12] = card->perf_stats.sg_alloc_page_rx;
6069 data[13] = (card->perf_stats.large_send_bytes >> 10);
6070 data[14] = card->perf_stats.large_send_cnt;
6071 data[15] = card->perf_stats.sc_dp_p;
6072 data[16] = card->perf_stats.sc_p_dp;
6073 data[17] = QETH_LOW_WATERMARK_PACK;
6074 data[18] = QETH_HIGH_WATERMARK_PACK;
6075 data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
6076 data[20] = (card->qdio.no_out_queues > 1) ?
6077 atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
6078 data[21] = (card->qdio.no_out_queues > 2) ?
6079 atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
6080 data[22] = (card->qdio.no_out_queues > 3) ?
6081 atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
6082 data[23] = card->perf_stats.inbound_time;
6083 data[24] = card->perf_stats.inbound_cnt;
6084 data[25] = card->perf_stats.inbound_do_qdio_time;
6085 data[26] = card->perf_stats.inbound_do_qdio_cnt;
6086 data[27] = card->perf_stats.outbound_handler_time;
6087 data[28] = card->perf_stats.outbound_handler_cnt;
6088 data[29] = card->perf_stats.outbound_time;
6089 data[30] = card->perf_stats.outbound_cnt;
6090 data[31] = card->perf_stats.outbound_do_qdio_time;
6091 data[32] = card->perf_stats.outbound_do_qdio_cnt;
6092 data[33] = card->perf_stats.tx_csum;
6093 data[34] = card->perf_stats.tx_lin;
6094 data[35] = card->perf_stats.tx_linfail;
6095 data[36] = card->perf_stats.cq_cnt;
6096 data[37] = card->perf_stats.cq_time;
6097 data[38] = card->perf_stats.rx_csum;
6098 }
6099 EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
6100
6101 void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
6102 {
6103 switch (stringset) {
6104 case ETH_SS_STATS:
6105 memcpy(data, &qeth_ethtool_stats_keys,
6106 sizeof(qeth_ethtool_stats_keys));
6107 break;
6108 default:
6109 WARN_ON(1);
6110 break;
6111 }
6112 }
6113 EXPORT_SYMBOL_GPL(qeth_core_get_strings);
6114
6115 void qeth_core_get_drvinfo(struct net_device *dev,
6116 struct ethtool_drvinfo *info)
6117 {
6118 struct qeth_card *card = dev->ml_priv;
6119
6120 strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
6121 sizeof(info->driver));
6122 strlcpy(info->version, "1.0", sizeof(info->version));
6123 strlcpy(info->fw_version, card->info.mcl_level,
6124 sizeof(info->fw_version));
6125 snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
6126 CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
6127 }
6128 EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
6129
6130 /* Helper function to fill 'advertising' and 'supported' which are the same. */
6131 /* Autoneg and full-duplex are supported and advertised unconditionally. */
6132 /* Always advertise and support all speeds up to specified, and only one */
6133 /* specified port type. */
6134 static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
6135 int maxspeed, int porttype)
6136 {
6137 ethtool_link_ksettings_zero_link_mode(cmd, supported);
6138 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
6139 ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
6140
6141 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
6142 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
6143
6144 switch (porttype) {
6145 case PORT_TP:
6146 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
6147 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
6148 break;
6149 case PORT_FIBRE:
6150 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
6151 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
6152 break;
6153 default:
6154 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
6155 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
6156 WARN_ON_ONCE(1);
6157 }
6158
6159 /* partially does fall through, to also select lower speeds */
6160 switch (maxspeed) {
6161 case SPEED_25000:
6162 ethtool_link_ksettings_add_link_mode(cmd, supported,
6163 25000baseSR_Full);
6164 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6165 25000baseSR_Full);
6166 break;
6167 case SPEED_10000:
6168 ethtool_link_ksettings_add_link_mode(cmd, supported,
6169 10000baseT_Full);
6170 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6171 10000baseT_Full);
6172 case SPEED_1000:
6173 ethtool_link_ksettings_add_link_mode(cmd, supported,
6174 1000baseT_Full);
6175 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6176 1000baseT_Full);
6177 ethtool_link_ksettings_add_link_mode(cmd, supported,
6178 1000baseT_Half);
6179 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6180 1000baseT_Half);
6181 case SPEED_100:
6182 ethtool_link_ksettings_add_link_mode(cmd, supported,
6183 100baseT_Full);
6184 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6185 100baseT_Full);
6186 ethtool_link_ksettings_add_link_mode(cmd, supported,
6187 100baseT_Half);
6188 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6189 100baseT_Half);
6190 case SPEED_10:
6191 ethtool_link_ksettings_add_link_mode(cmd, supported,
6192 10baseT_Full);
6193 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6194 10baseT_Full);
6195 ethtool_link_ksettings_add_link_mode(cmd, supported,
6196 10baseT_Half);
6197 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6198 10baseT_Half);
6199 /* end fallthrough */
6200 break;
6201 default:
6202 ethtool_link_ksettings_add_link_mode(cmd, supported,
6203 10baseT_Full);
6204 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6205 10baseT_Full);
6206 ethtool_link_ksettings_add_link_mode(cmd, supported,
6207 10baseT_Half);
6208 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6209 10baseT_Half);
6210 WARN_ON_ONCE(1);
6211 }
6212 }
6213
6214 int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
6215 struct ethtool_link_ksettings *cmd)
6216 {
6217 struct qeth_card *card = netdev->ml_priv;
6218 enum qeth_link_types link_type;
6219 struct carrier_info carrier_info;
6220 int rc;
6221
6222 if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
6223 link_type = QETH_LINK_TYPE_10GBIT_ETH;
6224 else
6225 link_type = card->info.link_type;
6226
6227 cmd->base.duplex = DUPLEX_FULL;
6228 cmd->base.autoneg = AUTONEG_ENABLE;
6229 cmd->base.phy_address = 0;
6230 cmd->base.mdio_support = 0;
6231 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
6232 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6233
6234 switch (link_type) {
6235 case QETH_LINK_TYPE_FAST_ETH:
6236 case QETH_LINK_TYPE_LANE_ETH100:
6237 cmd->base.speed = SPEED_100;
6238 cmd->base.port = PORT_TP;
6239 break;
6240 case QETH_LINK_TYPE_GBIT_ETH:
6241 case QETH_LINK_TYPE_LANE_ETH1000:
6242 cmd->base.speed = SPEED_1000;
6243 cmd->base.port = PORT_FIBRE;
6244 break;
6245 case QETH_LINK_TYPE_10GBIT_ETH:
6246 cmd->base.speed = SPEED_10000;
6247 cmd->base.port = PORT_FIBRE;
6248 break;
6249 case QETH_LINK_TYPE_25GBIT_ETH:
6250 cmd->base.speed = SPEED_25000;
6251 cmd->base.port = PORT_FIBRE;
6252 break;
6253 default:
6254 cmd->base.speed = SPEED_10;
6255 cmd->base.port = PORT_TP;
6256 }
6257 qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
6258
6259 /* Check if we can obtain more accurate information. */
6260 /* If QUERY_CARD_INFO command is not supported or fails, */
6261 /* just return the heuristics that was filled above. */
6262 if (!qeth_card_hw_is_reachable(card))
6263 return -ENODEV;
6264 rc = qeth_query_card_info(card, &carrier_info);
6265 if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
6266 return 0;
6267 if (rc) /* report error from the hardware operation */
6268 return rc;
6269 /* on success, fill in the information got from the hardware */
6270
6271 netdev_dbg(netdev,
6272 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
6273 carrier_info.card_type,
6274 carrier_info.port_mode,
6275 carrier_info.port_speed);
6276
6277 /* Update attributes for which we've obtained more authoritative */
6278 /* information, leave the rest the way they where filled above. */
6279 switch (carrier_info.card_type) {
6280 case CARD_INFO_TYPE_1G_COPPER_A:
6281 case CARD_INFO_TYPE_1G_COPPER_B:
6282 cmd->base.port = PORT_TP;
6283 qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
6284 break;
6285 case CARD_INFO_TYPE_1G_FIBRE_A:
6286 case CARD_INFO_TYPE_1G_FIBRE_B:
6287 cmd->base.port = PORT_FIBRE;
6288 qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
6289 break;
6290 case CARD_INFO_TYPE_10G_FIBRE_A:
6291 case CARD_INFO_TYPE_10G_FIBRE_B:
6292 cmd->base.port = PORT_FIBRE;
6293 qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
6294 break;
6295 }
6296
6297 switch (carrier_info.port_mode) {
6298 case CARD_INFO_PORTM_FULLDUPLEX:
6299 cmd->base.duplex = DUPLEX_FULL;
6300 break;
6301 case CARD_INFO_PORTM_HALFDUPLEX:
6302 cmd->base.duplex = DUPLEX_HALF;
6303 break;
6304 }
6305
6306 switch (carrier_info.port_speed) {
6307 case CARD_INFO_PORTS_10M:
6308 cmd->base.speed = SPEED_10;
6309 break;
6310 case CARD_INFO_PORTS_100M:
6311 cmd->base.speed = SPEED_100;
6312 break;
6313 case CARD_INFO_PORTS_1G:
6314 cmd->base.speed = SPEED_1000;
6315 break;
6316 case CARD_INFO_PORTS_10G:
6317 cmd->base.speed = SPEED_10000;
6318 break;
6319 case CARD_INFO_PORTS_25G:
6320 cmd->base.speed = SPEED_25000;
6321 break;
6322 }
6323
6324 return 0;
6325 }
6326 EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings);
6327
6328 /* Callback to handle checksum offload command reply from OSA card.
6329 * Verify that required features have been enabled on the card.
6330 * Return error in hdr->return_code as this value is checked by caller.
6331 *
6332 * Always returns zero to indicate no further messages from the OSA card.
6333 */
6334 static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
6335 struct qeth_reply *reply,
6336 unsigned long data)
6337 {
6338 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6339 struct qeth_checksum_cmd *chksum_cb =
6340 (struct qeth_checksum_cmd *)reply->param;
6341
6342 QETH_CARD_TEXT(card, 4, "chkdoccb");
6343 if (qeth_setassparms_inspect_rc(cmd))
6344 return 0;
6345
6346 memset(chksum_cb, 0, sizeof(*chksum_cb));
6347 if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6348 chksum_cb->supported =
6349 cmd->data.setassparms.data.chksum.supported;
6350 QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
6351 }
6352 if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
6353 chksum_cb->supported =
6354 cmd->data.setassparms.data.chksum.supported;
6355 chksum_cb->enabled =
6356 cmd->data.setassparms.data.chksum.enabled;
6357 QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
6358 QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
6359 }
6360 return 0;
6361 }
6362
6363 /* Send command to OSA card and check results. */
6364 static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
6365 enum qeth_ipa_funcs ipa_func,
6366 __u16 cmd_code, long data,
6367 struct qeth_checksum_cmd *chksum_cb,
6368 enum qeth_prot_versions prot)
6369 {
6370 struct qeth_cmd_buffer *iob;
6371
6372 QETH_CARD_TEXT(card, 4, "chkdocmd");
6373 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6374 sizeof(__u32), prot);
6375 if (!iob)
6376 return -ENOMEM;
6377
6378 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
6379 return qeth_send_ipa_cmd(card, iob, qeth_ipa_checksum_run_cmd_cb,
6380 chksum_cb);
6381 }
6382
6383 static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
6384 enum qeth_prot_versions prot)
6385 {
6386 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6387 struct qeth_checksum_cmd chksum_cb;
6388 int rc;
6389
6390 if (prot == QETH_PROT_IPV4)
6391 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6392 rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6393 &chksum_cb, prot);
6394 if (!rc) {
6395 if ((required_features & chksum_cb.supported) !=
6396 required_features)
6397 rc = -EIO;
6398 else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
6399 cstype == IPA_INBOUND_CHECKSUM)
6400 dev_warn(&card->gdev->dev,
6401 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
6402 QETH_CARD_IFNAME(card));
6403 }
6404 if (rc) {
6405 qeth_send_simple_setassparms_prot(card, cstype,
6406 IPA_CMD_ASS_STOP, 0, prot);
6407 dev_warn(&card->gdev->dev,
6408 "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n",
6409 prot, QETH_CARD_IFNAME(card));
6410 return rc;
6411 }
6412 rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6413 chksum_cb.supported, &chksum_cb,
6414 prot);
6415 if (!rc) {
6416 if ((required_features & chksum_cb.enabled) !=
6417 required_features)
6418 rc = -EIO;
6419 }
6420 if (rc) {
6421 qeth_send_simple_setassparms_prot(card, cstype,
6422 IPA_CMD_ASS_STOP, 0, prot);
6423 dev_warn(&card->gdev->dev,
6424 "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n",
6425 prot, QETH_CARD_IFNAME(card));
6426 return rc;
6427 }
6428
6429 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6430 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6431 return 0;
6432 }
6433
6434 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6435 enum qeth_prot_versions prot)
6436 {
6437 int rc = (on) ? qeth_send_checksum_on(card, cstype, prot)
6438 : qeth_send_simple_setassparms_prot(card, cstype,
6439 IPA_CMD_ASS_STOP, 0,
6440 prot);
6441 return rc ? -EIO : 0;
6442 }
6443
6444 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6445 unsigned long data)
6446 {
6447 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6448 struct qeth_tso_start_data *tso_data = reply->param;
6449
6450 if (qeth_setassparms_inspect_rc(cmd))
6451 return 0;
6452
6453 tso_data->mss = cmd->data.setassparms.data.tso.mss;
6454 tso_data->supported = cmd->data.setassparms.data.tso.supported;
6455 return 0;
6456 }
6457
6458 static int qeth_set_tso_off(struct qeth_card *card,
6459 enum qeth_prot_versions prot)
6460 {
6461 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6462 IPA_CMD_ASS_STOP, 0, prot);
6463 }
6464
6465 static int qeth_set_tso_on(struct qeth_card *card,
6466 enum qeth_prot_versions prot)
6467 {
6468 struct qeth_tso_start_data tso_data;
6469 struct qeth_cmd_buffer *iob;
6470 struct qeth_ipa_caps caps;
6471 int rc;
6472
6473 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6474 IPA_CMD_ASS_START, 0, prot);
6475 if (!iob)
6476 return -ENOMEM;
6477
6478 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6479 if (rc)
6480 return rc;
6481
6482 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6483 qeth_set_tso_off(card, prot);
6484 return -EOPNOTSUPP;
6485 }
6486
6487 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6488 IPA_CMD_ASS_ENABLE, sizeof(caps), prot);
6489 if (!iob) {
6490 qeth_set_tso_off(card, prot);
6491 return -ENOMEM;
6492 }
6493
6494 /* enable TSO capability */
6495 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6496 QETH_IPA_LARGE_SEND_TCP;
6497 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6498 if (rc) {
6499 qeth_set_tso_off(card, prot);
6500 return rc;
6501 }
6502
6503 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6504 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6505 qeth_set_tso_off(card, prot);
6506 return -EOPNOTSUPP;
6507 }
6508
6509 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6510 tso_data.mss);
6511 return 0;
6512 }
6513
6514 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6515 enum qeth_prot_versions prot)
6516 {
6517 int rc = on ? qeth_set_tso_on(card, prot) :
6518 qeth_set_tso_off(card, prot);
6519
6520 return rc ? -EIO : 0;
6521 }
6522
6523 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6524 {
6525 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6526 int rc_ipv6;
6527
6528 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6529 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6530 QETH_PROT_IPV4);
6531 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6532 /* no/one Offload Assist available, so the rc is trivial */
6533 return rc_ipv4;
6534
6535 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6536 QETH_PROT_IPV6);
6537
6538 if (on)
6539 /* enable: success if any Assist is active */
6540 return (rc_ipv6) ? rc_ipv4 : 0;
6541
6542 /* disable: failure if any Assist is still active */
6543 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6544 }
6545
6546 #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
6547 NETIF_F_IPV6_CSUM | NETIF_F_TSO6)
6548 /**
6549 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6550 * @dev: a net_device
6551 */
6552 void qeth_enable_hw_features(struct net_device *dev)
6553 {
6554 struct qeth_card *card = dev->ml_priv;
6555 netdev_features_t features;
6556
6557 rtnl_lock();
6558 features = dev->features;
6559 /* force-off any feature that needs an IPA sequence.
6560 * netdev_update_features() will restart them.
6561 */
6562 dev->features &= ~QETH_HW_FEATURES;
6563 netdev_update_features(dev);
6564 if (features != dev->features)
6565 dev_warn(&card->gdev->dev,
6566 "Device recovery failed to restore all offload features\n");
6567 rtnl_unlock();
6568 }
6569 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6570
6571 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6572 {
6573 struct qeth_card *card = dev->ml_priv;
6574 netdev_features_t changed = dev->features ^ features;
6575 int rc = 0;
6576
6577 QETH_DBF_TEXT(SETUP, 2, "setfeat");
6578 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6579
6580 if ((changed & NETIF_F_IP_CSUM)) {
6581 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6582 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
6583 if (rc)
6584 changed ^= NETIF_F_IP_CSUM;
6585 }
6586 if (changed & NETIF_F_IPV6_CSUM) {
6587 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6588 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
6589 if (rc)
6590 changed ^= NETIF_F_IPV6_CSUM;
6591 }
6592 if (changed & NETIF_F_RXCSUM) {
6593 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6594 if (rc)
6595 changed ^= NETIF_F_RXCSUM;
6596 }
6597 if (changed & NETIF_F_TSO) {
6598 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6599 QETH_PROT_IPV4);
6600 if (rc)
6601 changed ^= NETIF_F_TSO;
6602 }
6603 if (changed & NETIF_F_TSO6) {
6604 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6605 QETH_PROT_IPV6);
6606 if (rc)
6607 changed ^= NETIF_F_TSO6;
6608 }
6609
6610 /* everything changed successfully? */
6611 if ((dev->features ^ features) == changed)
6612 return 0;
6613 /* something went wrong. save changed features and return error */
6614 dev->features ^= changed;
6615 return -EIO;
6616 }
6617 EXPORT_SYMBOL_GPL(qeth_set_features);
6618
6619 netdev_features_t qeth_fix_features(struct net_device *dev,
6620 netdev_features_t features)
6621 {
6622 struct qeth_card *card = dev->ml_priv;
6623
6624 QETH_DBF_TEXT(SETUP, 2, "fixfeat");
6625 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6626 features &= ~NETIF_F_IP_CSUM;
6627 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6628 features &= ~NETIF_F_IPV6_CSUM;
6629 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6630 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6631 features &= ~NETIF_F_RXCSUM;
6632 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6633 features &= ~NETIF_F_TSO;
6634 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6635 features &= ~NETIF_F_TSO6;
6636 /* if the card isn't up, remove features that require hw changes */
6637 if (card->state == CARD_STATE_DOWN ||
6638 card->state == CARD_STATE_RECOVER)
6639 features &= ~QETH_HW_FEATURES;
6640 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6641 return features;
6642 }
6643 EXPORT_SYMBOL_GPL(qeth_fix_features);
6644
6645 netdev_features_t qeth_features_check(struct sk_buff *skb,
6646 struct net_device *dev,
6647 netdev_features_t features)
6648 {
6649 /* GSO segmentation builds skbs with
6650 * a (small) linear part for the headers, and
6651 * page frags for the data.
6652 * Compared to a linear skb, the header-only part consumes an
6653 * additional buffer element. This reduces buffer utilization, and
6654 * hurts throughput. So compress small segments into one element.
6655 */
6656 if (netif_needs_gso(skb, features)) {
6657 /* match skb_segment(): */
6658 unsigned int doffset = skb->data - skb_mac_header(skb);
6659 unsigned int hsize = skb_shinfo(skb)->gso_size;
6660 unsigned int hroom = skb_headroom(skb);
6661
6662 /* linearize only if resulting skb allocations are order-0: */
6663 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6664 features &= ~NETIF_F_SG;
6665 }
6666
6667 return vlan_features_check(skb, features);
6668 }
6669 EXPORT_SYMBOL_GPL(qeth_features_check);
6670
6671 static int __init qeth_core_init(void)
6672 {
6673 int rc;
6674
6675 pr_info("loading core functions\n");
6676
6677 qeth_wq = create_singlethread_workqueue("qeth_wq");
6678 if (!qeth_wq) {
6679 rc = -ENOMEM;
6680 goto out_err;
6681 }
6682
6683 rc = qeth_register_dbf_views();
6684 if (rc)
6685 goto dbf_err;
6686 qeth_core_root_dev = root_device_register("qeth");
6687 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6688 if (rc)
6689 goto register_err;
6690 qeth_core_header_cache =
6691 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
6692 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
6693 0, NULL);
6694 if (!qeth_core_header_cache) {
6695 rc = -ENOMEM;
6696 goto slab_err;
6697 }
6698 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
6699 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
6700 if (!qeth_qdio_outbuf_cache) {
6701 rc = -ENOMEM;
6702 goto cqslab_err;
6703 }
6704 rc = ccw_driver_register(&qeth_ccw_driver);
6705 if (rc)
6706 goto ccw_err;
6707 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
6708 if (rc)
6709 goto ccwgroup_err;
6710
6711 return 0;
6712
6713 ccwgroup_err:
6714 ccw_driver_unregister(&qeth_ccw_driver);
6715 ccw_err:
6716 kmem_cache_destroy(qeth_qdio_outbuf_cache);
6717 cqslab_err:
6718 kmem_cache_destroy(qeth_core_header_cache);
6719 slab_err:
6720 root_device_unregister(qeth_core_root_dev);
6721 register_err:
6722 qeth_unregister_dbf_views();
6723 dbf_err:
6724 destroy_workqueue(qeth_wq);
6725 out_err:
6726 pr_err("Initializing the qeth device driver failed\n");
6727 return rc;
6728 }
6729
6730 static void __exit qeth_core_exit(void)
6731 {
6732 qeth_clear_dbf_list();
6733 destroy_workqueue(qeth_wq);
6734 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
6735 ccw_driver_unregister(&qeth_ccw_driver);
6736 kmem_cache_destroy(qeth_qdio_outbuf_cache);
6737 kmem_cache_destroy(qeth_core_header_cache);
6738 root_device_unregister(qeth_core_root_dev);
6739 qeth_unregister_dbf_views();
6740 pr_info("core functions removed\n");
6741 }
6742
6743 module_init(qeth_core_init);
6744 module_exit(qeth_core_exit);
6745 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
6746 MODULE_DESCRIPTION("qeth core functions");
6747 MODULE_LICENSE("GPL");