]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/s390/net/qeth_core_main.c
30679bfc8a1baa840715c2bc2b64ea77ec3a6295
[mirror_ubuntu-jammy-kernel.git] / drivers / s390 / net / qeth_core_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
23 #include <linux/kthread.h>
24 #include <linux/slab.h>
25 #include <linux/if_vlan.h>
26 #include <linux/netdevice.h>
27 #include <linux/netdev_features.h>
28 #include <linux/skbuff.h>
29 #include <linux/vmalloc.h>
30
31 #include <net/iucv/af_iucv.h>
32 #include <net/dsfield.h>
33
34 #include <asm/ebcdic.h>
35 #include <asm/chpid.h>
36 #include <asm/io.h>
37 #include <asm/sysinfo.h>
38 #include <asm/diag.h>
39 #include <asm/cio.h>
40 #include <asm/ccwdev.h>
41 #include <asm/cpcmd.h>
42
43 #include "qeth_core.h"
44
45 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
46 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
47 /* N P A M L V H */
48 [QETH_DBF_SETUP] = {"qeth_setup",
49 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
50 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
51 &debug_sprintf_view, NULL},
52 [QETH_DBF_CTRL] = {"qeth_control",
53 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
54 };
55 EXPORT_SYMBOL_GPL(qeth_dbf);
56
57 struct kmem_cache *qeth_core_header_cache;
58 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
59 static struct kmem_cache *qeth_qdio_outbuf_cache;
60
61 static struct device *qeth_core_root_dev;
62 static struct lock_class_key qdio_out_skb_queue_key;
63
64 static void qeth_send_control_data_cb(struct qeth_card *card,
65 struct qeth_channel *channel,
66 struct qeth_cmd_buffer *iob);
67 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
68 static void qeth_free_buffer_pool(struct qeth_card *);
69 static int qeth_qdio_establish(struct qeth_card *);
70 static void qeth_free_qdio_buffers(struct qeth_card *);
71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
72 struct qeth_qdio_out_buffer *buf,
73 enum iucv_tx_notify notification);
74 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
75 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
76
77 struct workqueue_struct *qeth_wq;
78 EXPORT_SYMBOL_GPL(qeth_wq);
79
80 int qeth_card_hw_is_reachable(struct qeth_card *card)
81 {
82 return (card->state == CARD_STATE_SOFTSETUP) ||
83 (card->state == CARD_STATE_UP);
84 }
85 EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
86
87 static void qeth_close_dev_handler(struct work_struct *work)
88 {
89 struct qeth_card *card;
90
91 card = container_of(work, struct qeth_card, close_dev_work);
92 QETH_CARD_TEXT(card, 2, "cldevhdl");
93 rtnl_lock();
94 dev_close(card->dev);
95 rtnl_unlock();
96 ccwgroup_set_offline(card->gdev);
97 }
98
99 void qeth_close_dev(struct qeth_card *card)
100 {
101 QETH_CARD_TEXT(card, 2, "cldevsubm");
102 queue_work(qeth_wq, &card->close_dev_work);
103 }
104 EXPORT_SYMBOL_GPL(qeth_close_dev);
105
106 static const char *qeth_get_cardname(struct qeth_card *card)
107 {
108 if (card->info.guestlan) {
109 switch (card->info.type) {
110 case QETH_CARD_TYPE_OSD:
111 return " Virtual NIC QDIO";
112 case QETH_CARD_TYPE_IQD:
113 return " Virtual NIC Hiper";
114 case QETH_CARD_TYPE_OSM:
115 return " Virtual NIC QDIO - OSM";
116 case QETH_CARD_TYPE_OSX:
117 return " Virtual NIC QDIO - OSX";
118 default:
119 return " unknown";
120 }
121 } else {
122 switch (card->info.type) {
123 case QETH_CARD_TYPE_OSD:
124 return " OSD Express";
125 case QETH_CARD_TYPE_IQD:
126 return " HiperSockets";
127 case QETH_CARD_TYPE_OSN:
128 return " OSN QDIO";
129 case QETH_CARD_TYPE_OSM:
130 return " OSM QDIO";
131 case QETH_CARD_TYPE_OSX:
132 return " OSX QDIO";
133 default:
134 return " unknown";
135 }
136 }
137 return " n/a";
138 }
139
140 /* max length to be returned: 14 */
141 const char *qeth_get_cardname_short(struct qeth_card *card)
142 {
143 if (card->info.guestlan) {
144 switch (card->info.type) {
145 case QETH_CARD_TYPE_OSD:
146 return "Virt.NIC QDIO";
147 case QETH_CARD_TYPE_IQD:
148 return "Virt.NIC Hiper";
149 case QETH_CARD_TYPE_OSM:
150 return "Virt.NIC OSM";
151 case QETH_CARD_TYPE_OSX:
152 return "Virt.NIC OSX";
153 default:
154 return "unknown";
155 }
156 } else {
157 switch (card->info.type) {
158 case QETH_CARD_TYPE_OSD:
159 switch (card->info.link_type) {
160 case QETH_LINK_TYPE_FAST_ETH:
161 return "OSD_100";
162 case QETH_LINK_TYPE_HSTR:
163 return "HSTR";
164 case QETH_LINK_TYPE_GBIT_ETH:
165 return "OSD_1000";
166 case QETH_LINK_TYPE_10GBIT_ETH:
167 return "OSD_10GIG";
168 case QETH_LINK_TYPE_25GBIT_ETH:
169 return "OSD_25GIG";
170 case QETH_LINK_TYPE_LANE_ETH100:
171 return "OSD_FE_LANE";
172 case QETH_LINK_TYPE_LANE_TR:
173 return "OSD_TR_LANE";
174 case QETH_LINK_TYPE_LANE_ETH1000:
175 return "OSD_GbE_LANE";
176 case QETH_LINK_TYPE_LANE:
177 return "OSD_ATM_LANE";
178 default:
179 return "OSD_Express";
180 }
181 case QETH_CARD_TYPE_IQD:
182 return "HiperSockets";
183 case QETH_CARD_TYPE_OSN:
184 return "OSN";
185 case QETH_CARD_TYPE_OSM:
186 return "OSM_1000";
187 case QETH_CARD_TYPE_OSX:
188 return "OSX_10GIG";
189 default:
190 return "unknown";
191 }
192 }
193 return "n/a";
194 }
195
196 void qeth_set_recovery_task(struct qeth_card *card)
197 {
198 card->recovery_task = current;
199 }
200 EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
201
202 void qeth_clear_recovery_task(struct qeth_card *card)
203 {
204 card->recovery_task = NULL;
205 }
206 EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
207
208 static bool qeth_is_recovery_task(const struct qeth_card *card)
209 {
210 return card->recovery_task == current;
211 }
212
213 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
214 int clear_start_mask)
215 {
216 unsigned long flags;
217
218 spin_lock_irqsave(&card->thread_mask_lock, flags);
219 card->thread_allowed_mask = threads;
220 if (clear_start_mask)
221 card->thread_start_mask &= threads;
222 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
223 wake_up(&card->wait_q);
224 }
225 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
226
227 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
228 {
229 unsigned long flags;
230 int rc = 0;
231
232 spin_lock_irqsave(&card->thread_mask_lock, flags);
233 rc = (card->thread_running_mask & threads);
234 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
235 return rc;
236 }
237 EXPORT_SYMBOL_GPL(qeth_threads_running);
238
239 int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
240 {
241 if (qeth_is_recovery_task(card))
242 return 0;
243 return wait_event_interruptible(card->wait_q,
244 qeth_threads_running(card, threads) == 0);
245 }
246 EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
247
248 void qeth_clear_working_pool_list(struct qeth_card *card)
249 {
250 struct qeth_buffer_pool_entry *pool_entry, *tmp;
251
252 QETH_CARD_TEXT(card, 5, "clwrklst");
253 list_for_each_entry_safe(pool_entry, tmp,
254 &card->qdio.in_buf_pool.entry_list, list){
255 list_del(&pool_entry->list);
256 }
257 }
258 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
259
260 static int qeth_alloc_buffer_pool(struct qeth_card *card)
261 {
262 struct qeth_buffer_pool_entry *pool_entry;
263 void *ptr;
264 int i, j;
265
266 QETH_CARD_TEXT(card, 5, "alocpool");
267 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
268 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
269 if (!pool_entry) {
270 qeth_free_buffer_pool(card);
271 return -ENOMEM;
272 }
273 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
274 ptr = (void *) __get_free_page(GFP_KERNEL);
275 if (!ptr) {
276 while (j > 0)
277 free_page((unsigned long)
278 pool_entry->elements[--j]);
279 kfree(pool_entry);
280 qeth_free_buffer_pool(card);
281 return -ENOMEM;
282 }
283 pool_entry->elements[j] = ptr;
284 }
285 list_add(&pool_entry->init_list,
286 &card->qdio.init_pool.entry_list);
287 }
288 return 0;
289 }
290
291 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
292 {
293 QETH_CARD_TEXT(card, 2, "realcbp");
294
295 if ((card->state != CARD_STATE_DOWN) &&
296 (card->state != CARD_STATE_RECOVER))
297 return -EPERM;
298
299 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
300 qeth_clear_working_pool_list(card);
301 qeth_free_buffer_pool(card);
302 card->qdio.in_buf_pool.buf_count = bufcnt;
303 card->qdio.init_pool.buf_count = bufcnt;
304 return qeth_alloc_buffer_pool(card);
305 }
306 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
307
308 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
309 {
310 if (!q)
311 return;
312
313 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
314 kfree(q);
315 }
316
317 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
318 {
319 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
320 int i;
321
322 if (!q)
323 return NULL;
324
325 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
326 kfree(q);
327 return NULL;
328 }
329
330 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
331 q->bufs[i].buffer = q->qdio_bufs[i];
332
333 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
334 return q;
335 }
336
337 static int qeth_cq_init(struct qeth_card *card)
338 {
339 int rc;
340
341 if (card->options.cq == QETH_CQ_ENABLED) {
342 QETH_DBF_TEXT(SETUP, 2, "cqinit");
343 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
344 QDIO_MAX_BUFFERS_PER_Q);
345 card->qdio.c_q->next_buf_to_init = 127;
346 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
347 card->qdio.no_in_queues - 1, 0,
348 127);
349 if (rc) {
350 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
351 goto out;
352 }
353 }
354 rc = 0;
355 out:
356 return rc;
357 }
358
359 static int qeth_alloc_cq(struct qeth_card *card)
360 {
361 int rc;
362
363 if (card->options.cq == QETH_CQ_ENABLED) {
364 int i;
365 struct qdio_outbuf_state *outbuf_states;
366
367 QETH_DBF_TEXT(SETUP, 2, "cqon");
368 card->qdio.c_q = qeth_alloc_qdio_queue();
369 if (!card->qdio.c_q) {
370 rc = -1;
371 goto kmsg_out;
372 }
373 card->qdio.no_in_queues = 2;
374 card->qdio.out_bufstates =
375 kcalloc(card->qdio.no_out_queues *
376 QDIO_MAX_BUFFERS_PER_Q,
377 sizeof(struct qdio_outbuf_state),
378 GFP_KERNEL);
379 outbuf_states = card->qdio.out_bufstates;
380 if (outbuf_states == NULL) {
381 rc = -1;
382 goto free_cq_out;
383 }
384 for (i = 0; i < card->qdio.no_out_queues; ++i) {
385 card->qdio.out_qs[i]->bufstates = outbuf_states;
386 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
387 }
388 } else {
389 QETH_DBF_TEXT(SETUP, 2, "nocq");
390 card->qdio.c_q = NULL;
391 card->qdio.no_in_queues = 1;
392 }
393 QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
394 rc = 0;
395 out:
396 return rc;
397 free_cq_out:
398 qeth_free_qdio_queue(card->qdio.c_q);
399 card->qdio.c_q = NULL;
400 kmsg_out:
401 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
402 goto out;
403 }
404
405 static void qeth_free_cq(struct qeth_card *card)
406 {
407 if (card->qdio.c_q) {
408 --card->qdio.no_in_queues;
409 qeth_free_qdio_queue(card->qdio.c_q);
410 card->qdio.c_q = NULL;
411 }
412 kfree(card->qdio.out_bufstates);
413 card->qdio.out_bufstates = NULL;
414 }
415
416 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
417 int delayed)
418 {
419 enum iucv_tx_notify n;
420
421 switch (sbalf15) {
422 case 0:
423 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
424 break;
425 case 4:
426 case 16:
427 case 17:
428 case 18:
429 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
430 TX_NOTIFY_UNREACHABLE;
431 break;
432 default:
433 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
434 TX_NOTIFY_GENERALERROR;
435 break;
436 }
437
438 return n;
439 }
440
441 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
442 int forced_cleanup)
443 {
444 if (q->card->options.cq != QETH_CQ_ENABLED)
445 return;
446
447 if (q->bufs[bidx]->next_pending != NULL) {
448 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
449 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
450
451 while (c) {
452 if (forced_cleanup ||
453 atomic_read(&c->state) ==
454 QETH_QDIO_BUF_HANDLED_DELAYED) {
455 struct qeth_qdio_out_buffer *f = c;
456 QETH_CARD_TEXT(f->q->card, 5, "fp");
457 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
458 /* release here to avoid interleaving between
459 outbound tasklet and inbound tasklet
460 regarding notifications and lifecycle */
461 qeth_release_skbs(c);
462
463 c = f->next_pending;
464 WARN_ON_ONCE(head->next_pending != f);
465 head->next_pending = c;
466 kmem_cache_free(qeth_qdio_outbuf_cache, f);
467 } else {
468 head = c;
469 c = c->next_pending;
470 }
471
472 }
473 }
474 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
475 QETH_QDIO_BUF_HANDLED_DELAYED)) {
476 /* for recovery situations */
477 qeth_init_qdio_out_buf(q, bidx);
478 QETH_CARD_TEXT(q->card, 2, "clprecov");
479 }
480 }
481
482
483 static void qeth_qdio_handle_aob(struct qeth_card *card,
484 unsigned long phys_aob_addr)
485 {
486 struct qaob *aob;
487 struct qeth_qdio_out_buffer *buffer;
488 enum iucv_tx_notify notification;
489 unsigned int i;
490
491 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
492 QETH_CARD_TEXT(card, 5, "haob");
493 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
494 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
495 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
496
497 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
498 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
499 notification = TX_NOTIFY_OK;
500 } else {
501 WARN_ON_ONCE(atomic_read(&buffer->state) !=
502 QETH_QDIO_BUF_PENDING);
503 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
504 notification = TX_NOTIFY_DELAYED_OK;
505 }
506
507 if (aob->aorc != 0) {
508 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
509 notification = qeth_compute_cq_notification(aob->aorc, 1);
510 }
511 qeth_notify_skbs(buffer->q, buffer, notification);
512
513 /* Free dangling allocations. The attached skbs are handled by
514 * qeth_cleanup_handled_pending().
515 */
516 for (i = 0;
517 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
518 i++) {
519 if (aob->sba[i] && buffer->is_header[i])
520 kmem_cache_free(qeth_core_header_cache,
521 (void *) aob->sba[i]);
522 }
523 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
524
525 qdio_release_aob(aob);
526 }
527
528 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
529 {
530 return card->options.cq == QETH_CQ_ENABLED &&
531 card->qdio.c_q != NULL &&
532 queue != 0 &&
533 queue == card->qdio.no_in_queues - 1;
534 }
535
536 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data)
537 {
538 ccw->cmd_code = cmd_code;
539 ccw->flags = CCW_FLAG_SLI;
540 ccw->count = len;
541 ccw->cda = (__u32) __pa(data);
542 }
543
544 static int __qeth_issue_next_read(struct qeth_card *card)
545 {
546 struct qeth_channel *channel = &card->read;
547 struct qeth_cmd_buffer *iob;
548 int rc;
549
550 QETH_CARD_TEXT(card, 5, "issnxrd");
551 if (channel->state != CH_STATE_UP)
552 return -EIO;
553 iob = qeth_get_buffer(channel);
554 if (!iob) {
555 dev_warn(&card->gdev->dev, "The qeth device driver "
556 "failed to recover an error on the device\n");
557 QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
558 CARD_DEVID(card));
559 return -ENOMEM;
560 }
561 qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
562 QETH_CARD_TEXT(card, 6, "noirqpnd");
563 rc = ccw_device_start(channel->ccwdev, channel->ccw,
564 (addr_t) iob, 0, 0);
565 if (rc) {
566 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
567 rc, CARD_DEVID(card));
568 atomic_set(&channel->irq_pending, 0);
569 qeth_release_buffer(channel, iob);
570 card->read_or_write_problem = 1;
571 qeth_schedule_recovery(card);
572 wake_up(&card->wait_q);
573 }
574 return rc;
575 }
576
577 static int qeth_issue_next_read(struct qeth_card *card)
578 {
579 int ret;
580
581 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
582 ret = __qeth_issue_next_read(card);
583 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
584
585 return ret;
586 }
587
588 static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
589 {
590 struct qeth_reply *reply;
591
592 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
593 if (reply) {
594 refcount_set(&reply->refcnt, 1);
595 atomic_set(&reply->received, 0);
596 }
597 return reply;
598 }
599
600 static void qeth_get_reply(struct qeth_reply *reply)
601 {
602 refcount_inc(&reply->refcnt);
603 }
604
605 static void qeth_put_reply(struct qeth_reply *reply)
606 {
607 if (refcount_dec_and_test(&reply->refcnt))
608 kfree(reply);
609 }
610
611 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
612 struct qeth_card *card)
613 {
614 const char *ipa_name;
615 int com = cmd->hdr.command;
616 ipa_name = qeth_get_ipa_cmd_name(com);
617
618 if (rc)
619 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
620 ipa_name, com, CARD_DEVID(card), rc,
621 qeth_get_ipa_msg(rc));
622 else
623 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
624 ipa_name, com, CARD_DEVID(card));
625 }
626
627 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
628 struct qeth_ipa_cmd *cmd)
629 {
630 QETH_CARD_TEXT(card, 5, "chkipad");
631
632 if (IS_IPA_REPLY(cmd)) {
633 if (cmd->hdr.command != IPA_CMD_SETCCID &&
634 cmd->hdr.command != IPA_CMD_DELCCID &&
635 cmd->hdr.command != IPA_CMD_MODCCID &&
636 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
637 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
638 return cmd;
639 }
640
641 /* handle unsolicited event: */
642 switch (cmd->hdr.command) {
643 case IPA_CMD_STOPLAN:
644 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
645 dev_err(&card->gdev->dev,
646 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
647 QETH_CARD_IFNAME(card));
648 qeth_close_dev(card);
649 } else {
650 dev_warn(&card->gdev->dev,
651 "The link for interface %s on CHPID 0x%X failed\n",
652 QETH_CARD_IFNAME(card), card->info.chpid);
653 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
654 netif_carrier_off(card->dev);
655 }
656 return NULL;
657 case IPA_CMD_STARTLAN:
658 dev_info(&card->gdev->dev,
659 "The link for %s on CHPID 0x%X has been restored\n",
660 QETH_CARD_IFNAME(card), card->info.chpid);
661 if (card->info.hwtrap)
662 card->info.hwtrap = 2;
663 qeth_schedule_recovery(card);
664 return NULL;
665 case IPA_CMD_SETBRIDGEPORT_IQD:
666 case IPA_CMD_SETBRIDGEPORT_OSA:
667 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
668 if (card->discipline->control_event_handler(card, cmd))
669 return cmd;
670 return NULL;
671 case IPA_CMD_MODCCID:
672 return cmd;
673 case IPA_CMD_REGISTER_LOCAL_ADDR:
674 QETH_CARD_TEXT(card, 3, "irla");
675 return NULL;
676 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
677 QETH_CARD_TEXT(card, 3, "urla");
678 return NULL;
679 default:
680 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
681 return cmd;
682 }
683 }
684
685 void qeth_clear_ipacmd_list(struct qeth_card *card)
686 {
687 struct qeth_reply *reply, *r;
688 unsigned long flags;
689
690 QETH_CARD_TEXT(card, 4, "clipalst");
691
692 spin_lock_irqsave(&card->lock, flags);
693 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
694 qeth_get_reply(reply);
695 reply->rc = -EIO;
696 atomic_inc(&reply->received);
697 list_del_init(&reply->list);
698 wake_up(&reply->wait_q);
699 qeth_put_reply(reply);
700 }
701 spin_unlock_irqrestore(&card->lock, flags);
702 }
703 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
704
705 static int qeth_check_idx_response(struct qeth_card *card,
706 unsigned char *buffer)
707 {
708 if (!buffer)
709 return 0;
710
711 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
712 if ((buffer[2] & 0xc0) == 0xc0) {
713 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
714 buffer[4]);
715 QETH_CARD_TEXT(card, 2, "ckidxres");
716 QETH_CARD_TEXT(card, 2, " idxterm");
717 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
718 if (buffer[4] == 0xf6) {
719 dev_err(&card->gdev->dev,
720 "The qeth device is not configured "
721 "for the OSI layer required by z/VM\n");
722 return -EPERM;
723 }
724 return -EIO;
725 }
726 return 0;
727 }
728
729 static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
730 {
731 __u8 index;
732
733 index = channel->io_buf_no;
734 do {
735 if (channel->iob[index].state == BUF_STATE_FREE) {
736 channel->iob[index].state = BUF_STATE_LOCKED;
737 channel->io_buf_no = (channel->io_buf_no + 1) %
738 QETH_CMD_BUFFER_NO;
739 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
740 return channel->iob + index;
741 }
742 index = (index + 1) % QETH_CMD_BUFFER_NO;
743 } while (index != channel->io_buf_no);
744
745 return NULL;
746 }
747
748 void qeth_release_buffer(struct qeth_channel *channel,
749 struct qeth_cmd_buffer *iob)
750 {
751 unsigned long flags;
752
753 spin_lock_irqsave(&channel->iob_lock, flags);
754 iob->state = BUF_STATE_FREE;
755 iob->callback = qeth_send_control_data_cb;
756 iob->rc = 0;
757 spin_unlock_irqrestore(&channel->iob_lock, flags);
758 wake_up(&channel->wait_q);
759 }
760 EXPORT_SYMBOL_GPL(qeth_release_buffer);
761
762 static void qeth_release_buffer_cb(struct qeth_card *card,
763 struct qeth_channel *channel,
764 struct qeth_cmd_buffer *iob)
765 {
766 qeth_release_buffer(channel, iob);
767 }
768
769 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
770 {
771 struct qeth_cmd_buffer *buffer = NULL;
772 unsigned long flags;
773
774 spin_lock_irqsave(&channel->iob_lock, flags);
775 buffer = __qeth_get_buffer(channel);
776 spin_unlock_irqrestore(&channel->iob_lock, flags);
777 return buffer;
778 }
779
780 struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
781 {
782 struct qeth_cmd_buffer *buffer;
783 wait_event(channel->wait_q,
784 ((buffer = qeth_get_buffer(channel)) != NULL));
785 return buffer;
786 }
787 EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
788
789 void qeth_clear_cmd_buffers(struct qeth_channel *channel)
790 {
791 int cnt;
792
793 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
794 qeth_release_buffer(channel, &channel->iob[cnt]);
795 channel->io_buf_no = 0;
796 }
797 EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
798
799 static void qeth_send_control_data_cb(struct qeth_card *card,
800 struct qeth_channel *channel,
801 struct qeth_cmd_buffer *iob)
802 {
803 struct qeth_ipa_cmd *cmd = NULL;
804 struct qeth_reply *reply, *r;
805 unsigned long flags;
806 int keep_reply;
807 int rc = 0;
808
809 QETH_CARD_TEXT(card, 4, "sndctlcb");
810 rc = qeth_check_idx_response(card, iob->data);
811 switch (rc) {
812 case 0:
813 break;
814 case -EIO:
815 qeth_clear_ipacmd_list(card);
816 qeth_schedule_recovery(card);
817 /* fall through */
818 default:
819 goto out;
820 }
821
822 if (IS_IPA(iob->data)) {
823 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
824 cmd = qeth_check_ipa_data(card, cmd);
825 if (!cmd)
826 goto out;
827 if (IS_OSN(card) && card->osn_info.assist_cb &&
828 cmd->hdr.command != IPA_CMD_STARTLAN) {
829 card->osn_info.assist_cb(card->dev, cmd);
830 goto out;
831 }
832 } else {
833 /* non-IPA commands should only flow during initialization */
834 if (card->state != CARD_STATE_DOWN)
835 goto out;
836 }
837
838 spin_lock_irqsave(&card->lock, flags);
839 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
840 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
841 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
842 qeth_get_reply(reply);
843 list_del_init(&reply->list);
844 spin_unlock_irqrestore(&card->lock, flags);
845 keep_reply = 0;
846 if (reply->callback != NULL) {
847 if (cmd) {
848 reply->offset = (__u16)((char *)cmd -
849 (char *)iob->data);
850 keep_reply = reply->callback(card,
851 reply,
852 (unsigned long)cmd);
853 } else
854 keep_reply = reply->callback(card,
855 reply,
856 (unsigned long)iob);
857 }
858 if (cmd)
859 reply->rc = (u16) cmd->hdr.return_code;
860 else if (iob->rc)
861 reply->rc = iob->rc;
862 if (keep_reply) {
863 spin_lock_irqsave(&card->lock, flags);
864 list_add_tail(&reply->list,
865 &card->cmd_waiter_list);
866 spin_unlock_irqrestore(&card->lock, flags);
867 } else {
868 atomic_inc(&reply->received);
869 wake_up(&reply->wait_q);
870 }
871 qeth_put_reply(reply);
872 goto out;
873 }
874 }
875 spin_unlock_irqrestore(&card->lock, flags);
876 out:
877 memcpy(&card->seqno.pdu_hdr_ack,
878 QETH_PDU_HEADER_SEQ_NO(iob->data),
879 QETH_SEQ_NO_LENGTH);
880 qeth_release_buffer(channel, iob);
881 }
882
883 static int qeth_set_thread_start_bit(struct qeth_card *card,
884 unsigned long thread)
885 {
886 unsigned long flags;
887
888 spin_lock_irqsave(&card->thread_mask_lock, flags);
889 if (!(card->thread_allowed_mask & thread) ||
890 (card->thread_start_mask & thread)) {
891 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
892 return -EPERM;
893 }
894 card->thread_start_mask |= thread;
895 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
896 return 0;
897 }
898
899 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
900 {
901 unsigned long flags;
902
903 spin_lock_irqsave(&card->thread_mask_lock, flags);
904 card->thread_start_mask &= ~thread;
905 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
906 wake_up(&card->wait_q);
907 }
908 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
909
910 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
911 {
912 unsigned long flags;
913
914 spin_lock_irqsave(&card->thread_mask_lock, flags);
915 card->thread_running_mask &= ~thread;
916 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
917 wake_up_all(&card->wait_q);
918 }
919 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
920
921 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
922 {
923 unsigned long flags;
924 int rc = 0;
925
926 spin_lock_irqsave(&card->thread_mask_lock, flags);
927 if (card->thread_start_mask & thread) {
928 if ((card->thread_allowed_mask & thread) &&
929 !(card->thread_running_mask & thread)) {
930 rc = 1;
931 card->thread_start_mask &= ~thread;
932 card->thread_running_mask |= thread;
933 } else
934 rc = -EPERM;
935 }
936 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
937 return rc;
938 }
939
940 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
941 {
942 int rc = 0;
943
944 wait_event(card->wait_q,
945 (rc = __qeth_do_run_thread(card, thread)) >= 0);
946 return rc;
947 }
948 EXPORT_SYMBOL_GPL(qeth_do_run_thread);
949
950 void qeth_schedule_recovery(struct qeth_card *card)
951 {
952 QETH_CARD_TEXT(card, 2, "startrec");
953 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
954 schedule_work(&card->kernel_thread_starter);
955 }
956 EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
957
958 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
959 struct irb *irb)
960 {
961 int dstat, cstat;
962 char *sense;
963
964 sense = (char *) irb->ecw;
965 cstat = irb->scsw.cmd.cstat;
966 dstat = irb->scsw.cmd.dstat;
967
968 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
969 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
970 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
971 QETH_CARD_TEXT(card, 2, "CGENCHK");
972 dev_warn(&cdev->dev, "The qeth device driver "
973 "failed to recover an error on the device\n");
974 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
975 CCW_DEVID(cdev), dstat, cstat);
976 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
977 16, 1, irb, 64, 1);
978 return 1;
979 }
980
981 if (dstat & DEV_STAT_UNIT_CHECK) {
982 if (sense[SENSE_RESETTING_EVENT_BYTE] &
983 SENSE_RESETTING_EVENT_FLAG) {
984 QETH_CARD_TEXT(card, 2, "REVIND");
985 return 1;
986 }
987 if (sense[SENSE_COMMAND_REJECT_BYTE] &
988 SENSE_COMMAND_REJECT_FLAG) {
989 QETH_CARD_TEXT(card, 2, "CMDREJi");
990 return 1;
991 }
992 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
993 QETH_CARD_TEXT(card, 2, "AFFE");
994 return 1;
995 }
996 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
997 QETH_CARD_TEXT(card, 2, "ZEROSEN");
998 return 0;
999 }
1000 QETH_CARD_TEXT(card, 2, "DGENCHK");
1001 return 1;
1002 }
1003 return 0;
1004 }
1005
1006 static long qeth_check_irb_error(struct qeth_card *card,
1007 struct ccw_device *cdev, unsigned long intparm,
1008 struct irb *irb)
1009 {
1010 if (!IS_ERR(irb))
1011 return 0;
1012
1013 switch (PTR_ERR(irb)) {
1014 case -EIO:
1015 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1016 CCW_DEVID(cdev));
1017 QETH_CARD_TEXT(card, 2, "ckirberr");
1018 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
1019 break;
1020 case -ETIMEDOUT:
1021 dev_warn(&cdev->dev, "A hardware operation timed out"
1022 " on the device\n");
1023 QETH_CARD_TEXT(card, 2, "ckirberr");
1024 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
1025 if (intparm == QETH_RCD_PARM) {
1026 if (card->data.ccwdev == cdev) {
1027 card->data.state = CH_STATE_DOWN;
1028 wake_up(&card->wait_q);
1029 }
1030 }
1031 break;
1032 default:
1033 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1034 PTR_ERR(irb), CCW_DEVID(cdev));
1035 QETH_CARD_TEXT(card, 2, "ckirberr");
1036 QETH_CARD_TEXT(card, 2, " rc???");
1037 }
1038 return PTR_ERR(irb);
1039 }
1040
1041 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1042 struct irb *irb)
1043 {
1044 int rc;
1045 int cstat, dstat;
1046 struct qeth_cmd_buffer *iob = NULL;
1047 struct ccwgroup_device *gdev;
1048 struct qeth_channel *channel;
1049 struct qeth_card *card;
1050
1051 /* while we hold the ccwdev lock, this stays valid: */
1052 gdev = dev_get_drvdata(&cdev->dev);
1053 card = dev_get_drvdata(&gdev->dev);
1054 if (!card)
1055 return;
1056
1057 QETH_CARD_TEXT(card, 5, "irq");
1058
1059 if (card->read.ccwdev == cdev) {
1060 channel = &card->read;
1061 QETH_CARD_TEXT(card, 5, "read");
1062 } else if (card->write.ccwdev == cdev) {
1063 channel = &card->write;
1064 QETH_CARD_TEXT(card, 5, "write");
1065 } else {
1066 channel = &card->data;
1067 QETH_CARD_TEXT(card, 5, "data");
1068 }
1069
1070 if (qeth_intparm_is_iob(intparm))
1071 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1072
1073 if (qeth_check_irb_error(card, cdev, intparm, irb)) {
1074 /* IO was terminated, free its resources. */
1075 if (iob)
1076 qeth_release_buffer(iob->channel, iob);
1077 atomic_set(&channel->irq_pending, 0);
1078 wake_up(&card->wait_q);
1079 return;
1080 }
1081
1082 atomic_set(&channel->irq_pending, 0);
1083
1084 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
1085 channel->state = CH_STATE_STOPPED;
1086
1087 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
1088 channel->state = CH_STATE_HALTED;
1089
1090 /*let's wake up immediately on data channel*/
1091 if ((channel == &card->data) && (intparm != 0) &&
1092 (intparm != QETH_RCD_PARM))
1093 goto out;
1094
1095 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
1096 QETH_CARD_TEXT(card, 6, "clrchpar");
1097 /* we don't have to handle this further */
1098 intparm = 0;
1099 }
1100 if (intparm == QETH_HALT_CHANNEL_PARM) {
1101 QETH_CARD_TEXT(card, 6, "hltchpar");
1102 /* we don't have to handle this further */
1103 intparm = 0;
1104 }
1105
1106 cstat = irb->scsw.cmd.cstat;
1107 dstat = irb->scsw.cmd.dstat;
1108
1109 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1110 (dstat & DEV_STAT_UNIT_CHECK) ||
1111 (cstat)) {
1112 if (irb->esw.esw0.erw.cons) {
1113 dev_warn(&channel->ccwdev->dev,
1114 "The qeth device driver failed to recover "
1115 "an error on the device\n");
1116 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1117 CCW_DEVID(channel->ccwdev), cstat,
1118 dstat);
1119 print_hex_dump(KERN_WARNING, "qeth: irb ",
1120 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1121 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1122 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1123 }
1124 if (intparm == QETH_RCD_PARM) {
1125 channel->state = CH_STATE_DOWN;
1126 goto out;
1127 }
1128 rc = qeth_get_problem(card, cdev, irb);
1129 if (rc) {
1130 card->read_or_write_problem = 1;
1131 if (iob)
1132 qeth_release_buffer(iob->channel, iob);
1133 qeth_clear_ipacmd_list(card);
1134 qeth_schedule_recovery(card);
1135 goto out;
1136 }
1137 }
1138
1139 if (intparm == QETH_RCD_PARM) {
1140 channel->state = CH_STATE_RCD_DONE;
1141 goto out;
1142 }
1143 if (channel == &card->data)
1144 return;
1145 if (channel == &card->read &&
1146 channel->state == CH_STATE_UP)
1147 __qeth_issue_next_read(card);
1148
1149 if (iob && iob->callback)
1150 iob->callback(card, iob->channel, iob);
1151
1152 out:
1153 wake_up(&card->wait_q);
1154 return;
1155 }
1156
1157 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1158 struct qeth_qdio_out_buffer *buf,
1159 enum iucv_tx_notify notification)
1160 {
1161 struct sk_buff *skb;
1162
1163 skb_queue_walk(&buf->skb_list, skb) {
1164 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1165 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1166 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1167 iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1168 }
1169 }
1170
1171 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1172 {
1173 /* release may never happen from within CQ tasklet scope */
1174 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1175
1176 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1177 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
1178
1179 __skb_queue_purge(&buf->skb_list);
1180 }
1181
1182 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1183 struct qeth_qdio_out_buffer *buf)
1184 {
1185 int i;
1186
1187 /* is PCI flag set on buffer? */
1188 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1189 atomic_dec(&queue->set_pci_flags_count);
1190
1191 qeth_release_skbs(buf);
1192
1193 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
1194 if (buf->buffer->element[i].addr && buf->is_header[i])
1195 kmem_cache_free(qeth_core_header_cache,
1196 buf->buffer->element[i].addr);
1197 buf->is_header[i] = 0;
1198 }
1199
1200 qeth_scrub_qdio_buffer(buf->buffer,
1201 QETH_MAX_BUFFER_ELEMENTS(queue->card));
1202 buf->next_element_to_fill = 0;
1203 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1204 }
1205
1206 static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
1207 {
1208 int j;
1209
1210 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1211 if (!q->bufs[j])
1212 continue;
1213 qeth_cleanup_handled_pending(q, j, 1);
1214 qeth_clear_output_buffer(q, q->bufs[j]);
1215 if (free) {
1216 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1217 q->bufs[j] = NULL;
1218 }
1219 }
1220 }
1221
1222 void qeth_clear_qdio_buffers(struct qeth_card *card)
1223 {
1224 int i;
1225
1226 QETH_CARD_TEXT(card, 2, "clearqdbf");
1227 /* clear outbound buffers to free skbs */
1228 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1229 if (card->qdio.out_qs[i]) {
1230 qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
1231 }
1232 }
1233 }
1234 EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
1235
1236 static void qeth_free_buffer_pool(struct qeth_card *card)
1237 {
1238 struct qeth_buffer_pool_entry *pool_entry, *tmp;
1239 int i = 0;
1240 list_for_each_entry_safe(pool_entry, tmp,
1241 &card->qdio.init_pool.entry_list, init_list){
1242 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
1243 free_page((unsigned long)pool_entry->elements[i]);
1244 list_del(&pool_entry->init_list);
1245 kfree(pool_entry);
1246 }
1247 }
1248
1249 static void qeth_clean_channel(struct qeth_channel *channel)
1250 {
1251 struct ccw_device *cdev = channel->ccwdev;
1252 int cnt;
1253
1254 QETH_DBF_TEXT(SETUP, 2, "freech");
1255
1256 spin_lock_irq(get_ccwdev_lock(cdev));
1257 cdev->handler = NULL;
1258 spin_unlock_irq(get_ccwdev_lock(cdev));
1259
1260 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1261 kfree(channel->iob[cnt].data);
1262 kfree(channel->ccw);
1263 }
1264
1265 static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
1266 {
1267 struct ccw_device *cdev = channel->ccwdev;
1268 int cnt;
1269
1270 QETH_DBF_TEXT(SETUP, 2, "setupch");
1271
1272 channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1273 if (!channel->ccw)
1274 return -ENOMEM;
1275 channel->state = CH_STATE_DOWN;
1276 atomic_set(&channel->irq_pending, 0);
1277 init_waitqueue_head(&channel->wait_q);
1278
1279 spin_lock_irq(get_ccwdev_lock(cdev));
1280 cdev->handler = qeth_irq;
1281 spin_unlock_irq(get_ccwdev_lock(cdev));
1282
1283 if (!alloc_buffers)
1284 return 0;
1285
1286 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
1287 channel->iob[cnt].data = kmalloc(QETH_BUFSIZE,
1288 GFP_KERNEL | GFP_DMA);
1289 if (channel->iob[cnt].data == NULL)
1290 break;
1291 channel->iob[cnt].state = BUF_STATE_FREE;
1292 channel->iob[cnt].channel = channel;
1293 channel->iob[cnt].callback = qeth_send_control_data_cb;
1294 channel->iob[cnt].rc = 0;
1295 }
1296 if (cnt < QETH_CMD_BUFFER_NO) {
1297 qeth_clean_channel(channel);
1298 return -ENOMEM;
1299 }
1300 channel->io_buf_no = 0;
1301 spin_lock_init(&channel->iob_lock);
1302
1303 return 0;
1304 }
1305
1306 static void qeth_set_single_write_queues(struct qeth_card *card)
1307 {
1308 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1309 (card->qdio.no_out_queues == 4))
1310 qeth_free_qdio_buffers(card);
1311
1312 card->qdio.no_out_queues = 1;
1313 if (card->qdio.default_out_queue != 0)
1314 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1315
1316 card->qdio.default_out_queue = 0;
1317 }
1318
1319 static void qeth_set_multiple_write_queues(struct qeth_card *card)
1320 {
1321 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1322 (card->qdio.no_out_queues == 1)) {
1323 qeth_free_qdio_buffers(card);
1324 card->qdio.default_out_queue = 2;
1325 }
1326 card->qdio.no_out_queues = 4;
1327 }
1328
1329 static void qeth_update_from_chp_desc(struct qeth_card *card)
1330 {
1331 struct ccw_device *ccwdev;
1332 struct channel_path_desc_fmt0 *chp_dsc;
1333
1334 QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1335
1336 ccwdev = card->data.ccwdev;
1337 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1338 if (!chp_dsc)
1339 goto out;
1340
1341 card->info.func_level = 0x4100 + chp_dsc->desc;
1342 if (card->info.type == QETH_CARD_TYPE_IQD)
1343 goto out;
1344
1345 /* CHPP field bit 6 == 1 -> single queue */
1346 if ((chp_dsc->chpp & 0x02) == 0x02)
1347 qeth_set_single_write_queues(card);
1348 else
1349 qeth_set_multiple_write_queues(card);
1350 out:
1351 kfree(chp_dsc);
1352 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1353 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1354 }
1355
1356 static void qeth_init_qdio_info(struct qeth_card *card)
1357 {
1358 QETH_DBF_TEXT(SETUP, 4, "intqdinf");
1359 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1360 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1361 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1362 card->qdio.no_out_queues = QETH_MAX_QUEUES;
1363
1364 /* inbound */
1365 card->qdio.no_in_queues = 1;
1366 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1367 if (card->info.type == QETH_CARD_TYPE_IQD)
1368 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1369 else
1370 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1371 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1372 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1373 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1374 }
1375
1376 static void qeth_set_initial_options(struct qeth_card *card)
1377 {
1378 card->options.route4.type = NO_ROUTER;
1379 card->options.route6.type = NO_ROUTER;
1380 card->options.rx_sg_cb = QETH_RX_SG_CB;
1381 card->options.isolation = ISOLATION_MODE_NONE;
1382 card->options.cq = QETH_CQ_DISABLED;
1383 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1384 }
1385
1386 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1387 {
1388 unsigned long flags;
1389 int rc = 0;
1390
1391 spin_lock_irqsave(&card->thread_mask_lock, flags);
1392 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1393 (u8) card->thread_start_mask,
1394 (u8) card->thread_allowed_mask,
1395 (u8) card->thread_running_mask);
1396 rc = (card->thread_start_mask & thread);
1397 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1398 return rc;
1399 }
1400
1401 static void qeth_start_kernel_thread(struct work_struct *work)
1402 {
1403 struct task_struct *ts;
1404 struct qeth_card *card = container_of(work, struct qeth_card,
1405 kernel_thread_starter);
1406 QETH_CARD_TEXT(card , 2, "strthrd");
1407
1408 if (card->read.state != CH_STATE_UP &&
1409 card->write.state != CH_STATE_UP)
1410 return;
1411 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1412 ts = kthread_run(card->discipline->recover, (void *)card,
1413 "qeth_recover");
1414 if (IS_ERR(ts)) {
1415 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1416 qeth_clear_thread_running_bit(card,
1417 QETH_RECOVER_THREAD);
1418 }
1419 }
1420 }
1421
1422 static void qeth_buffer_reclaim_work(struct work_struct *);
1423 static void qeth_setup_card(struct qeth_card *card)
1424 {
1425 QETH_DBF_TEXT(SETUP, 2, "setupcrd");
1426 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1427
1428 card->info.type = CARD_RDEV(card)->id.driver_info;
1429 card->state = CARD_STATE_DOWN;
1430 spin_lock_init(&card->mclock);
1431 spin_lock_init(&card->lock);
1432 spin_lock_init(&card->ip_lock);
1433 spin_lock_init(&card->thread_mask_lock);
1434 mutex_init(&card->conf_mutex);
1435 mutex_init(&card->discipline_mutex);
1436 mutex_init(&card->vid_list_mutex);
1437 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1438 INIT_LIST_HEAD(&card->cmd_waiter_list);
1439 init_waitqueue_head(&card->wait_q);
1440 qeth_set_initial_options(card);
1441 /* IP address takeover */
1442 INIT_LIST_HEAD(&card->ipato.entries);
1443 qeth_init_qdio_info(card);
1444 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1445 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1446 }
1447
1448 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1449 {
1450 struct qeth_card *card = container_of(slr, struct qeth_card,
1451 qeth_service_level);
1452 if (card->info.mcl_level[0])
1453 seq_printf(m, "qeth: %s firmware level %s\n",
1454 CARD_BUS_ID(card), card->info.mcl_level);
1455 }
1456
1457 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1458 {
1459 struct qeth_card *card;
1460
1461 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1462 card = kzalloc(sizeof(*card), GFP_KERNEL);
1463 if (!card)
1464 goto out;
1465 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1466
1467 card->gdev = gdev;
1468 dev_set_drvdata(&gdev->dev, card);
1469 CARD_RDEV(card) = gdev->cdev[0];
1470 CARD_WDEV(card) = gdev->cdev[1];
1471 CARD_DDEV(card) = gdev->cdev[2];
1472 if (qeth_setup_channel(&card->read, true))
1473 goto out_ip;
1474 if (qeth_setup_channel(&card->write, true))
1475 goto out_channel;
1476 if (qeth_setup_channel(&card->data, false))
1477 goto out_data;
1478 card->qeth_service_level.seq_print = qeth_core_sl_print;
1479 register_service_level(&card->qeth_service_level);
1480 return card;
1481
1482 out_data:
1483 qeth_clean_channel(&card->write);
1484 out_channel:
1485 qeth_clean_channel(&card->read);
1486 out_ip:
1487 dev_set_drvdata(&gdev->dev, NULL);
1488 kfree(card);
1489 out:
1490 return NULL;
1491 }
1492
1493 static int qeth_clear_channel(struct qeth_card *card,
1494 struct qeth_channel *channel)
1495 {
1496 int rc;
1497
1498 QETH_CARD_TEXT(card, 3, "clearch");
1499 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1500 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1501 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1502
1503 if (rc)
1504 return rc;
1505 rc = wait_event_interruptible_timeout(card->wait_q,
1506 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1507 if (rc == -ERESTARTSYS)
1508 return rc;
1509 if (channel->state != CH_STATE_STOPPED)
1510 return -ETIME;
1511 channel->state = CH_STATE_DOWN;
1512 return 0;
1513 }
1514
1515 static int qeth_halt_channel(struct qeth_card *card,
1516 struct qeth_channel *channel)
1517 {
1518 int rc;
1519
1520 QETH_CARD_TEXT(card, 3, "haltch");
1521 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1522 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1523 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1524
1525 if (rc)
1526 return rc;
1527 rc = wait_event_interruptible_timeout(card->wait_q,
1528 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1529 if (rc == -ERESTARTSYS)
1530 return rc;
1531 if (channel->state != CH_STATE_HALTED)
1532 return -ETIME;
1533 return 0;
1534 }
1535
1536 static int qeth_halt_channels(struct qeth_card *card)
1537 {
1538 int rc1 = 0, rc2 = 0, rc3 = 0;
1539
1540 QETH_CARD_TEXT(card, 3, "haltchs");
1541 rc1 = qeth_halt_channel(card, &card->read);
1542 rc2 = qeth_halt_channel(card, &card->write);
1543 rc3 = qeth_halt_channel(card, &card->data);
1544 if (rc1)
1545 return rc1;
1546 if (rc2)
1547 return rc2;
1548 return rc3;
1549 }
1550
1551 static int qeth_clear_channels(struct qeth_card *card)
1552 {
1553 int rc1 = 0, rc2 = 0, rc3 = 0;
1554
1555 QETH_CARD_TEXT(card, 3, "clearchs");
1556 rc1 = qeth_clear_channel(card, &card->read);
1557 rc2 = qeth_clear_channel(card, &card->write);
1558 rc3 = qeth_clear_channel(card, &card->data);
1559 if (rc1)
1560 return rc1;
1561 if (rc2)
1562 return rc2;
1563 return rc3;
1564 }
1565
1566 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1567 {
1568 int rc = 0;
1569
1570 QETH_CARD_TEXT(card, 3, "clhacrd");
1571
1572 if (halt)
1573 rc = qeth_halt_channels(card);
1574 if (rc)
1575 return rc;
1576 return qeth_clear_channels(card);
1577 }
1578
1579 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1580 {
1581 int rc = 0;
1582
1583 QETH_CARD_TEXT(card, 3, "qdioclr");
1584 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1585 QETH_QDIO_CLEANING)) {
1586 case QETH_QDIO_ESTABLISHED:
1587 if (card->info.type == QETH_CARD_TYPE_IQD)
1588 rc = qdio_shutdown(CARD_DDEV(card),
1589 QDIO_FLAG_CLEANUP_USING_HALT);
1590 else
1591 rc = qdio_shutdown(CARD_DDEV(card),
1592 QDIO_FLAG_CLEANUP_USING_CLEAR);
1593 if (rc)
1594 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1595 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1596 break;
1597 case QETH_QDIO_CLEANING:
1598 return rc;
1599 default:
1600 break;
1601 }
1602 rc = qeth_clear_halt_card(card, use_halt);
1603 if (rc)
1604 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1605 card->state = CARD_STATE_DOWN;
1606 return rc;
1607 }
1608 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1609
1610 static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1611 int *length)
1612 {
1613 struct ciw *ciw;
1614 char *rcd_buf;
1615 int ret;
1616 struct qeth_channel *channel = &card->data;
1617
1618 /*
1619 * scan for RCD command in extended SenseID data
1620 */
1621 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1622 if (!ciw || ciw->cmd == 0)
1623 return -EOPNOTSUPP;
1624 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1625 if (!rcd_buf)
1626 return -ENOMEM;
1627
1628 qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
1629 channel->state = CH_STATE_RCD;
1630 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1631 ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1632 QETH_RCD_PARM, LPM_ANYPATH, 0,
1633 QETH_RCD_TIMEOUT);
1634 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1635 if (!ret)
1636 wait_event(card->wait_q,
1637 (channel->state == CH_STATE_RCD_DONE ||
1638 channel->state == CH_STATE_DOWN));
1639 if (channel->state == CH_STATE_DOWN)
1640 ret = -EIO;
1641 else
1642 channel->state = CH_STATE_DOWN;
1643 if (ret) {
1644 kfree(rcd_buf);
1645 *buffer = NULL;
1646 *length = 0;
1647 } else {
1648 *length = ciw->count;
1649 *buffer = rcd_buf;
1650 }
1651 return ret;
1652 }
1653
1654 static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
1655 {
1656 QETH_DBF_TEXT(SETUP, 2, "cfgunit");
1657 card->info.chpid = prcd[30];
1658 card->info.unit_addr2 = prcd[31];
1659 card->info.cula = prcd[63];
1660 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1661 (prcd[0x11] == _ascebc['M']));
1662 }
1663
1664 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1665 {
1666 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1667 struct diag26c_vnic_resp *response = NULL;
1668 struct diag26c_vnic_req *request = NULL;
1669 struct ccw_dev_id id;
1670 char userid[80];
1671 int rc = 0;
1672
1673 QETH_DBF_TEXT(SETUP, 2, "vmlayer");
1674
1675 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1676 if (rc)
1677 goto out;
1678
1679 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1680 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1681 if (!request || !response) {
1682 rc = -ENOMEM;
1683 goto out;
1684 }
1685
1686 ccw_device_get_id(CARD_RDEV(card), &id);
1687 request->resp_buf_len = sizeof(*response);
1688 request->resp_version = DIAG26C_VERSION6_VM65918;
1689 request->req_format = DIAG26C_VNIC_INFO;
1690 ASCEBC(userid, 8);
1691 memcpy(&request->sys_name, userid, 8);
1692 request->devno = id.devno;
1693
1694 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1695 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1696 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1697 if (rc)
1698 goto out;
1699 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1700
1701 if (request->resp_buf_len < sizeof(*response) ||
1702 response->version != request->resp_version) {
1703 rc = -EIO;
1704 goto out;
1705 }
1706
1707 if (response->protocol == VNIC_INFO_PROT_L2)
1708 disc = QETH_DISCIPLINE_LAYER2;
1709 else if (response->protocol == VNIC_INFO_PROT_L3)
1710 disc = QETH_DISCIPLINE_LAYER3;
1711
1712 out:
1713 kfree(response);
1714 kfree(request);
1715 if (rc)
1716 QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
1717 return disc;
1718 }
1719
1720 /* Determine whether the device requires a specific layer discipline */
1721 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1722 {
1723 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1724
1725 if (card->info.type == QETH_CARD_TYPE_OSM ||
1726 card->info.type == QETH_CARD_TYPE_OSN)
1727 disc = QETH_DISCIPLINE_LAYER2;
1728 else if (card->info.guestlan)
1729 disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
1730 QETH_DISCIPLINE_LAYER3 :
1731 qeth_vm_detect_layer(card);
1732
1733 switch (disc) {
1734 case QETH_DISCIPLINE_LAYER2:
1735 QETH_DBF_TEXT(SETUP, 3, "force l2");
1736 break;
1737 case QETH_DISCIPLINE_LAYER3:
1738 QETH_DBF_TEXT(SETUP, 3, "force l3");
1739 break;
1740 default:
1741 QETH_DBF_TEXT(SETUP, 3, "force no");
1742 }
1743
1744 return disc;
1745 }
1746
1747 static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1748 {
1749 QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1750
1751 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
1752 prcd[76] >= 0xF1 && prcd[76] <= 0xF4) {
1753 card->info.blkt.time_total = 0;
1754 card->info.blkt.inter_packet = 0;
1755 card->info.blkt.inter_packet_jumbo = 0;
1756 } else {
1757 card->info.blkt.time_total = 250;
1758 card->info.blkt.inter_packet = 5;
1759 card->info.blkt.inter_packet_jumbo = 15;
1760 }
1761 }
1762
1763 static void qeth_init_tokens(struct qeth_card *card)
1764 {
1765 card->token.issuer_rm_w = 0x00010103UL;
1766 card->token.cm_filter_w = 0x00010108UL;
1767 card->token.cm_connection_w = 0x0001010aUL;
1768 card->token.ulp_filter_w = 0x0001010bUL;
1769 card->token.ulp_connection_w = 0x0001010dUL;
1770 }
1771
1772 static void qeth_init_func_level(struct qeth_card *card)
1773 {
1774 switch (card->info.type) {
1775 case QETH_CARD_TYPE_IQD:
1776 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1777 break;
1778 case QETH_CARD_TYPE_OSD:
1779 case QETH_CARD_TYPE_OSN:
1780 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1781 break;
1782 default:
1783 break;
1784 }
1785 }
1786
1787 static int qeth_idx_activate_get_answer(struct qeth_card *card,
1788 struct qeth_channel *channel,
1789 void (*reply_cb)(struct qeth_card *,
1790 struct qeth_channel *,
1791 struct qeth_cmd_buffer *))
1792 {
1793 struct qeth_cmd_buffer *iob;
1794 int rc;
1795
1796 QETH_DBF_TEXT(SETUP, 2, "idxanswr");
1797 iob = qeth_get_buffer(channel);
1798 if (!iob)
1799 return -ENOMEM;
1800 iob->callback = reply_cb;
1801 qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
1802
1803 wait_event(card->wait_q,
1804 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1805 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1806 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1807 rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1808 (addr_t) iob, 0, 0, QETH_TIMEOUT);
1809 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1810
1811 if (rc) {
1812 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
1813 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
1814 atomic_set(&channel->irq_pending, 0);
1815 qeth_release_buffer(channel, iob);
1816 wake_up(&card->wait_q);
1817 return rc;
1818 }
1819 rc = wait_event_interruptible_timeout(card->wait_q,
1820 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1821 if (rc == -ERESTARTSYS)
1822 return rc;
1823 if (channel->state != CH_STATE_UP) {
1824 rc = -ETIME;
1825 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
1826 } else
1827 rc = 0;
1828 return rc;
1829 }
1830
1831 static int qeth_idx_activate_channel(struct qeth_card *card,
1832 struct qeth_channel *channel,
1833 void (*reply_cb)(struct qeth_card *,
1834 struct qeth_channel *,
1835 struct qeth_cmd_buffer *))
1836 {
1837 struct qeth_cmd_buffer *iob;
1838 __u16 temp;
1839 __u8 tmp;
1840 int rc;
1841 struct ccw_dev_id temp_devid;
1842
1843 QETH_DBF_TEXT(SETUP, 2, "idxactch");
1844
1845 iob = qeth_get_buffer(channel);
1846 if (!iob)
1847 return -ENOMEM;
1848 iob->callback = reply_cb;
1849 qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
1850 iob->data);
1851 if (channel == &card->write) {
1852 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1853 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1854 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1855 card->seqno.trans_hdr++;
1856 } else {
1857 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1858 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1859 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1860 }
1861 tmp = ((u8)card->dev->dev_port) | 0x80;
1862 memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
1863 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1864 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1865 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1866 &card->info.func_level, sizeof(__u16));
1867 ccw_device_get_id(CARD_DDEV(card), &temp_devid);
1868 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
1869 temp = (card->info.cula << 8) + card->info.unit_addr2;
1870 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1871
1872 wait_event(card->wait_q,
1873 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1874 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1875 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1876 rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1877 (addr_t) iob, 0, 0, QETH_TIMEOUT);
1878 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1879
1880 if (rc) {
1881 QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
1882 rc);
1883 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1884 atomic_set(&channel->irq_pending, 0);
1885 qeth_release_buffer(channel, iob);
1886 wake_up(&card->wait_q);
1887 return rc;
1888 }
1889 rc = wait_event_interruptible_timeout(card->wait_q,
1890 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1891 if (rc == -ERESTARTSYS)
1892 return rc;
1893 if (channel->state != CH_STATE_ACTIVATING) {
1894 dev_warn(&channel->ccwdev->dev, "The qeth device driver"
1895 " failed to recover an error on the device\n");
1896 QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
1897 CCW_DEVID(channel->ccwdev));
1898 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1899 return -ETIME;
1900 }
1901 return qeth_idx_activate_get_answer(card, channel, reply_cb);
1902 }
1903
1904 static int qeth_peer_func_level(int level)
1905 {
1906 if ((level & 0xff) == 8)
1907 return (level & 0xff) + 0x400;
1908 if (((level >> 8) & 3) == 1)
1909 return (level & 0xff) + 0x200;
1910 return level;
1911 }
1912
1913 static void qeth_idx_write_cb(struct qeth_card *card,
1914 struct qeth_channel *channel,
1915 struct qeth_cmd_buffer *iob)
1916 {
1917 __u16 temp;
1918
1919 QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
1920
1921 if (channel->state == CH_STATE_DOWN) {
1922 channel->state = CH_STATE_ACTIVATING;
1923 goto out;
1924 }
1925
1926 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1927 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
1928 dev_err(&channel->ccwdev->dev,
1929 "The adapter is used exclusively by another "
1930 "host\n");
1931 else
1932 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1933 CCW_DEVID(channel->ccwdev));
1934 goto out;
1935 }
1936 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1937 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1938 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1939 CCW_DEVID(channel->ccwdev),
1940 card->info.func_level, temp);
1941 goto out;
1942 }
1943 channel->state = CH_STATE_UP;
1944 out:
1945 qeth_release_buffer(channel, iob);
1946 }
1947
1948 static void qeth_idx_read_cb(struct qeth_card *card,
1949 struct qeth_channel *channel,
1950 struct qeth_cmd_buffer *iob)
1951 {
1952 __u16 temp;
1953
1954 QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
1955 if (channel->state == CH_STATE_DOWN) {
1956 channel->state = CH_STATE_ACTIVATING;
1957 goto out;
1958 }
1959
1960 if (qeth_check_idx_response(card, iob->data))
1961 goto out;
1962
1963 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1964 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1965 case QETH_IDX_ACT_ERR_EXCL:
1966 dev_err(&channel->ccwdev->dev,
1967 "The adapter is used exclusively by another "
1968 "host\n");
1969 break;
1970 case QETH_IDX_ACT_ERR_AUTH:
1971 case QETH_IDX_ACT_ERR_AUTH_USER:
1972 dev_err(&channel->ccwdev->dev,
1973 "Setting the device online failed because of "
1974 "insufficient authorization\n");
1975 break;
1976 default:
1977 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1978 CCW_DEVID(channel->ccwdev));
1979 }
1980 QETH_CARD_TEXT_(card, 2, "idxread%c",
1981 QETH_IDX_ACT_CAUSE_CODE(iob->data));
1982 goto out;
1983 }
1984
1985 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1986 if (temp != qeth_peer_func_level(card->info.func_level)) {
1987 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1988 CCW_DEVID(channel->ccwdev),
1989 card->info.func_level, temp);
1990 goto out;
1991 }
1992 memcpy(&card->token.issuer_rm_r,
1993 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1994 QETH_MPC_TOKEN_LENGTH);
1995 memcpy(&card->info.mcl_level[0],
1996 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1997 channel->state = CH_STATE_UP;
1998 out:
1999 qeth_release_buffer(channel, iob);
2000 }
2001
2002 void qeth_prepare_control_data(struct qeth_card *card, int len,
2003 struct qeth_cmd_buffer *iob)
2004 {
2005 qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
2006 iob->callback = qeth_release_buffer_cb;
2007
2008 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
2009 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
2010 card->seqno.trans_hdr++;
2011 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
2012 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
2013 card->seqno.pdu_hdr++;
2014 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
2015 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2016 QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
2017 }
2018 EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
2019
2020 /**
2021 * qeth_send_control_data() - send control command to the card
2022 * @card: qeth_card structure pointer
2023 * @len: size of the command buffer
2024 * @iob: qeth_cmd_buffer pointer
2025 * @reply_cb: callback function pointer
2026 * @cb_card: pointer to the qeth_card structure
2027 * @cb_reply: pointer to the qeth_reply structure
2028 * @cb_cmd: pointer to the original iob for non-IPA
2029 * commands, or to the qeth_ipa_cmd structure
2030 * for the IPA commands.
2031 * @reply_param: private pointer passed to the callback
2032 *
2033 * Returns the value of the `return_code' field of the response
2034 * block returned from the hardware, or other error indication.
2035 * Value of zero indicates successful execution of the command.
2036 *
2037 * Callback function gets called one or more times, with cb_cmd
2038 * pointing to the response returned by the hardware. Callback
2039 * function must return non-zero if more reply blocks are expected,
2040 * and zero if the last or only reply block is received. Callback
2041 * function can get the value of the reply_param pointer from the
2042 * field 'param' of the structure qeth_reply.
2043 */
2044
2045 int qeth_send_control_data(struct qeth_card *card, int len,
2046 struct qeth_cmd_buffer *iob,
2047 int (*reply_cb)(struct qeth_card *cb_card,
2048 struct qeth_reply *cb_reply,
2049 unsigned long cb_cmd),
2050 void *reply_param)
2051 {
2052 struct qeth_channel *channel = iob->channel;
2053 int rc;
2054 struct qeth_reply *reply = NULL;
2055 unsigned long timeout, event_timeout;
2056 struct qeth_ipa_cmd *cmd = NULL;
2057
2058 QETH_CARD_TEXT(card, 2, "sendctl");
2059
2060 if (card->read_or_write_problem) {
2061 qeth_release_buffer(channel, iob);
2062 return -EIO;
2063 }
2064 reply = qeth_alloc_reply(card);
2065 if (!reply) {
2066 qeth_release_buffer(channel, iob);
2067 return -ENOMEM;
2068 }
2069 reply->callback = reply_cb;
2070 reply->param = reply_param;
2071
2072 init_waitqueue_head(&reply->wait_q);
2073
2074 while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
2075
2076 if (IS_IPA(iob->data)) {
2077 cmd = __ipa_cmd(iob);
2078 cmd->hdr.seqno = card->seqno.ipa++;
2079 reply->seqno = cmd->hdr.seqno;
2080 event_timeout = QETH_IPA_TIMEOUT;
2081 } else {
2082 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2083 event_timeout = QETH_TIMEOUT;
2084 }
2085 qeth_prepare_control_data(card, len, iob);
2086
2087 spin_lock_irq(&card->lock);
2088 list_add_tail(&reply->list, &card->cmd_waiter_list);
2089 spin_unlock_irq(&card->lock);
2090
2091 timeout = jiffies + event_timeout;
2092
2093 QETH_CARD_TEXT(card, 6, "noirqpnd");
2094 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2095 rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
2096 (addr_t) iob, 0, 0, event_timeout);
2097 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2098 if (rc) {
2099 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2100 CARD_DEVID(card), rc);
2101 QETH_CARD_TEXT_(card, 2, " err%d", rc);
2102 spin_lock_irq(&card->lock);
2103 list_del_init(&reply->list);
2104 qeth_put_reply(reply);
2105 spin_unlock_irq(&card->lock);
2106 qeth_release_buffer(channel, iob);
2107 atomic_set(&channel->irq_pending, 0);
2108 wake_up(&card->wait_q);
2109 return rc;
2110 }
2111
2112 /* we have only one long running ipassist, since we can ensure
2113 process context of this command we can sleep */
2114 if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
2115 cmd->hdr.prot_version == QETH_PROT_IPV4) {
2116 if (!wait_event_timeout(reply->wait_q,
2117 atomic_read(&reply->received), event_timeout))
2118 goto time_err;
2119 } else {
2120 while (!atomic_read(&reply->received)) {
2121 if (time_after(jiffies, timeout))
2122 goto time_err;
2123 cpu_relax();
2124 }
2125 }
2126
2127 rc = reply->rc;
2128 qeth_put_reply(reply);
2129 return rc;
2130
2131 time_err:
2132 reply->rc = -ETIME;
2133 spin_lock_irq(&card->lock);
2134 list_del_init(&reply->list);
2135 spin_unlock_irq(&card->lock);
2136 atomic_inc(&reply->received);
2137 rc = reply->rc;
2138 qeth_put_reply(reply);
2139 return rc;
2140 }
2141 EXPORT_SYMBOL_GPL(qeth_send_control_data);
2142
2143 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2144 unsigned long data)
2145 {
2146 struct qeth_cmd_buffer *iob;
2147
2148 QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
2149
2150 iob = (struct qeth_cmd_buffer *) data;
2151 memcpy(&card->token.cm_filter_r,
2152 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2153 QETH_MPC_TOKEN_LENGTH);
2154 return 0;
2155 }
2156
2157 static int qeth_cm_enable(struct qeth_card *card)
2158 {
2159 int rc;
2160 struct qeth_cmd_buffer *iob;
2161
2162 QETH_DBF_TEXT(SETUP, 2, "cmenable");
2163
2164 iob = qeth_wait_for_buffer(&card->write);
2165 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
2166 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2167 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2168 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2169 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2170
2171 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
2172 qeth_cm_enable_cb, NULL);
2173 return rc;
2174 }
2175
2176 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2177 unsigned long data)
2178 {
2179 struct qeth_cmd_buffer *iob;
2180
2181 QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
2182
2183 iob = (struct qeth_cmd_buffer *) data;
2184 memcpy(&card->token.cm_connection_r,
2185 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2186 QETH_MPC_TOKEN_LENGTH);
2187 return 0;
2188 }
2189
2190 static int qeth_cm_setup(struct qeth_card *card)
2191 {
2192 int rc;
2193 struct qeth_cmd_buffer *iob;
2194
2195 QETH_DBF_TEXT(SETUP, 2, "cmsetup");
2196
2197 iob = qeth_wait_for_buffer(&card->write);
2198 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
2199 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2200 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2201 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2202 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2203 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2204 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2205 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
2206 qeth_cm_setup_cb, NULL);
2207 return rc;
2208 }
2209
2210 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2211 {
2212 struct net_device *dev = card->dev;
2213 unsigned int new_mtu;
2214
2215 if (!max_mtu) {
2216 /* IQD needs accurate max MTU to set up its RX buffers: */
2217 if (IS_IQD(card))
2218 return -EINVAL;
2219 /* tolerate quirky HW: */
2220 max_mtu = ETH_MAX_MTU;
2221 }
2222
2223 rtnl_lock();
2224 if (IS_IQD(card)) {
2225 /* move any device with default MTU to new max MTU: */
2226 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2227
2228 /* adjust RX buffer size to new max MTU: */
2229 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2230 if (dev->max_mtu && dev->max_mtu != max_mtu)
2231 qeth_free_qdio_buffers(card);
2232 } else {
2233 if (dev->mtu)
2234 new_mtu = dev->mtu;
2235 /* default MTUs for first setup: */
2236 else if (IS_LAYER2(card))
2237 new_mtu = ETH_DATA_LEN;
2238 else
2239 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2240 }
2241
2242 dev->max_mtu = max_mtu;
2243 dev->mtu = min(new_mtu, max_mtu);
2244 rtnl_unlock();
2245 return 0;
2246 }
2247
2248 static int qeth_get_mtu_outof_framesize(int framesize)
2249 {
2250 switch (framesize) {
2251 case 0x4000:
2252 return 8192;
2253 case 0x6000:
2254 return 16384;
2255 case 0xa000:
2256 return 32768;
2257 case 0xffff:
2258 return 57344;
2259 default:
2260 return 0;
2261 }
2262 }
2263
2264 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2265 unsigned long data)
2266 {
2267 __u16 mtu, framesize;
2268 __u16 len;
2269 __u8 link_type;
2270 struct qeth_cmd_buffer *iob;
2271
2272 QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
2273
2274 iob = (struct qeth_cmd_buffer *) data;
2275 memcpy(&card->token.ulp_filter_r,
2276 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2277 QETH_MPC_TOKEN_LENGTH);
2278 if (card->info.type == QETH_CARD_TYPE_IQD) {
2279 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2280 mtu = qeth_get_mtu_outof_framesize(framesize);
2281 } else {
2282 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2283 }
2284 *(u16 *)reply->param = mtu;
2285
2286 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2287 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2288 memcpy(&link_type,
2289 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2290 card->info.link_type = link_type;
2291 } else
2292 card->info.link_type = 0;
2293 QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
2294 return 0;
2295 }
2296
2297 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2298 {
2299 if (IS_OSN(card))
2300 return QETH_PROT_OSN2;
2301 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2302 }
2303
2304 static int qeth_ulp_enable(struct qeth_card *card)
2305 {
2306 u8 prot_type = qeth_mpc_select_prot_type(card);
2307 struct qeth_cmd_buffer *iob;
2308 u16 max_mtu;
2309 int rc;
2310
2311 /*FIXME: trace view callbacks*/
2312 QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
2313
2314 iob = qeth_wait_for_buffer(&card->write);
2315 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2316
2317 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2318 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2319 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2320 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2321 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2322 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2323 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2324 qeth_ulp_enable_cb, &max_mtu);
2325 if (rc)
2326 return rc;
2327 return qeth_update_max_mtu(card, max_mtu);
2328 }
2329
2330 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2331 unsigned long data)
2332 {
2333 struct qeth_cmd_buffer *iob;
2334
2335 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
2336
2337 iob = (struct qeth_cmd_buffer *) data;
2338 memcpy(&card->token.ulp_connection_r,
2339 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2340 QETH_MPC_TOKEN_LENGTH);
2341 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2342 3)) {
2343 QETH_DBF_TEXT(SETUP, 2, "olmlimit");
2344 dev_err(&card->gdev->dev, "A connection could not be "
2345 "established because of an OLM limit\n");
2346 iob->rc = -EMLINK;
2347 }
2348 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
2349 return 0;
2350 }
2351
2352 static int qeth_ulp_setup(struct qeth_card *card)
2353 {
2354 int rc;
2355 __u16 temp;
2356 struct qeth_cmd_buffer *iob;
2357 struct ccw_dev_id dev_id;
2358
2359 QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
2360
2361 iob = qeth_wait_for_buffer(&card->write);
2362 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2363
2364 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2365 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2366 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2367 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2368 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2369 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2370
2371 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2372 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2373 temp = (card->info.cula << 8) + card->info.unit_addr2;
2374 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2375 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2376 qeth_ulp_setup_cb, NULL);
2377 return rc;
2378 }
2379
2380 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2381 {
2382 struct qeth_qdio_out_buffer *newbuf;
2383
2384 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2385 if (!newbuf)
2386 return -ENOMEM;
2387
2388 newbuf->buffer = q->qdio_bufs[bidx];
2389 skb_queue_head_init(&newbuf->skb_list);
2390 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2391 newbuf->q = q;
2392 newbuf->next_pending = q->bufs[bidx];
2393 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2394 q->bufs[bidx] = newbuf;
2395 return 0;
2396 }
2397
2398 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2399 {
2400 if (!q)
2401 return;
2402
2403 qeth_clear_outq_buffers(q, 1);
2404 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2405 kfree(q);
2406 }
2407
2408 static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
2409 {
2410 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2411
2412 if (!q)
2413 return NULL;
2414
2415 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2416 kfree(q);
2417 return NULL;
2418 }
2419 return q;
2420 }
2421
2422 static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2423 {
2424 int i, j;
2425
2426 QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
2427
2428 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2429 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2430 return 0;
2431
2432 QETH_DBF_TEXT(SETUP, 2, "inq");
2433 card->qdio.in_q = qeth_alloc_qdio_queue();
2434 if (!card->qdio.in_q)
2435 goto out_nomem;
2436
2437 /* inbound buffer pool */
2438 if (qeth_alloc_buffer_pool(card))
2439 goto out_freeinq;
2440
2441 /* outbound */
2442 card->qdio.out_qs =
2443 kcalloc(card->qdio.no_out_queues,
2444 sizeof(struct qeth_qdio_out_q *),
2445 GFP_KERNEL);
2446 if (!card->qdio.out_qs)
2447 goto out_freepool;
2448 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2449 card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
2450 if (!card->qdio.out_qs[i])
2451 goto out_freeoutq;
2452 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
2453 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
2454 card->qdio.out_qs[i]->queue_no = i;
2455 /* give outbound qeth_qdio_buffers their qdio_buffers */
2456 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2457 WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
2458 if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
2459 goto out_freeoutqbufs;
2460 }
2461 }
2462
2463 /* completion */
2464 if (qeth_alloc_cq(card))
2465 goto out_freeoutq;
2466
2467 return 0;
2468
2469 out_freeoutqbufs:
2470 while (j > 0) {
2471 --j;
2472 kmem_cache_free(qeth_qdio_outbuf_cache,
2473 card->qdio.out_qs[i]->bufs[j]);
2474 card->qdio.out_qs[i]->bufs[j] = NULL;
2475 }
2476 out_freeoutq:
2477 while (i > 0)
2478 qeth_free_output_queue(card->qdio.out_qs[--i]);
2479 kfree(card->qdio.out_qs);
2480 card->qdio.out_qs = NULL;
2481 out_freepool:
2482 qeth_free_buffer_pool(card);
2483 out_freeinq:
2484 qeth_free_qdio_queue(card->qdio.in_q);
2485 card->qdio.in_q = NULL;
2486 out_nomem:
2487 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2488 return -ENOMEM;
2489 }
2490
2491 static void qeth_free_qdio_buffers(struct qeth_card *card)
2492 {
2493 int i, j;
2494
2495 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2496 QETH_QDIO_UNINITIALIZED)
2497 return;
2498
2499 qeth_free_cq(card);
2500 cancel_delayed_work_sync(&card->buffer_reclaim_work);
2501 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2502 if (card->qdio.in_q->bufs[j].rx_skb)
2503 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2504 }
2505 qeth_free_qdio_queue(card->qdio.in_q);
2506 card->qdio.in_q = NULL;
2507 /* inbound buffer pool */
2508 qeth_free_buffer_pool(card);
2509 /* free outbound qdio_qs */
2510 if (card->qdio.out_qs) {
2511 for (i = 0; i < card->qdio.no_out_queues; i++)
2512 qeth_free_output_queue(card->qdio.out_qs[i]);
2513 kfree(card->qdio.out_qs);
2514 card->qdio.out_qs = NULL;
2515 }
2516 }
2517
2518 static void qeth_create_qib_param_field(struct qeth_card *card,
2519 char *param_field)
2520 {
2521
2522 param_field[0] = _ascebc['P'];
2523 param_field[1] = _ascebc['C'];
2524 param_field[2] = _ascebc['I'];
2525 param_field[3] = _ascebc['T'];
2526 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2527 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2528 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2529 }
2530
2531 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2532 char *param_field)
2533 {
2534 param_field[16] = _ascebc['B'];
2535 param_field[17] = _ascebc['L'];
2536 param_field[18] = _ascebc['K'];
2537 param_field[19] = _ascebc['T'];
2538 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2539 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2540 *((unsigned int *) (&param_field[28])) =
2541 card->info.blkt.inter_packet_jumbo;
2542 }
2543
2544 static int qeth_qdio_activate(struct qeth_card *card)
2545 {
2546 QETH_DBF_TEXT(SETUP, 3, "qdioact");
2547 return qdio_activate(CARD_DDEV(card));
2548 }
2549
2550 static int qeth_dm_act(struct qeth_card *card)
2551 {
2552 int rc;
2553 struct qeth_cmd_buffer *iob;
2554
2555 QETH_DBF_TEXT(SETUP, 2, "dmact");
2556
2557 iob = qeth_wait_for_buffer(&card->write);
2558 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
2559
2560 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2561 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2562 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2563 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2564 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
2565 return rc;
2566 }
2567
2568 static int qeth_mpc_initialize(struct qeth_card *card)
2569 {
2570 int rc;
2571
2572 QETH_DBF_TEXT(SETUP, 2, "mpcinit");
2573
2574 rc = qeth_issue_next_read(card);
2575 if (rc) {
2576 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2577 return rc;
2578 }
2579 rc = qeth_cm_enable(card);
2580 if (rc) {
2581 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2582 goto out_qdio;
2583 }
2584 rc = qeth_cm_setup(card);
2585 if (rc) {
2586 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
2587 goto out_qdio;
2588 }
2589 rc = qeth_ulp_enable(card);
2590 if (rc) {
2591 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
2592 goto out_qdio;
2593 }
2594 rc = qeth_ulp_setup(card);
2595 if (rc) {
2596 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2597 goto out_qdio;
2598 }
2599 rc = qeth_alloc_qdio_buffers(card);
2600 if (rc) {
2601 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2602 goto out_qdio;
2603 }
2604 rc = qeth_qdio_establish(card);
2605 if (rc) {
2606 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
2607 qeth_free_qdio_buffers(card);
2608 goto out_qdio;
2609 }
2610 rc = qeth_qdio_activate(card);
2611 if (rc) {
2612 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
2613 goto out_qdio;
2614 }
2615 rc = qeth_dm_act(card);
2616 if (rc) {
2617 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
2618 goto out_qdio;
2619 }
2620
2621 return 0;
2622 out_qdio:
2623 qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
2624 qdio_free(CARD_DDEV(card));
2625 return rc;
2626 }
2627
2628 void qeth_print_status_message(struct qeth_card *card)
2629 {
2630 switch (card->info.type) {
2631 case QETH_CARD_TYPE_OSD:
2632 case QETH_CARD_TYPE_OSM:
2633 case QETH_CARD_TYPE_OSX:
2634 /* VM will use a non-zero first character
2635 * to indicate a HiperSockets like reporting
2636 * of the level OSA sets the first character to zero
2637 * */
2638 if (!card->info.mcl_level[0]) {
2639 sprintf(card->info.mcl_level, "%02x%02x",
2640 card->info.mcl_level[2],
2641 card->info.mcl_level[3]);
2642 break;
2643 }
2644 /* fallthrough */
2645 case QETH_CARD_TYPE_IQD:
2646 if ((card->info.guestlan) ||
2647 (card->info.mcl_level[0] & 0x80)) {
2648 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2649 card->info.mcl_level[0]];
2650 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2651 card->info.mcl_level[1]];
2652 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2653 card->info.mcl_level[2]];
2654 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2655 card->info.mcl_level[3]];
2656 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2657 }
2658 break;
2659 default:
2660 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2661 }
2662 dev_info(&card->gdev->dev,
2663 "Device is a%s card%s%s%s\nwith link type %s.\n",
2664 qeth_get_cardname(card),
2665 (card->info.mcl_level[0]) ? " (level: " : "",
2666 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2667 (card->info.mcl_level[0]) ? ")" : "",
2668 qeth_get_cardname_short(card));
2669 }
2670 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2671
2672 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2673 {
2674 struct qeth_buffer_pool_entry *entry;
2675
2676 QETH_CARD_TEXT(card, 5, "inwrklst");
2677
2678 list_for_each_entry(entry,
2679 &card->qdio.init_pool.entry_list, init_list) {
2680 qeth_put_buffer_pool_entry(card, entry);
2681 }
2682 }
2683
2684 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2685 struct qeth_card *card)
2686 {
2687 struct list_head *plh;
2688 struct qeth_buffer_pool_entry *entry;
2689 int i, free;
2690 struct page *page;
2691
2692 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2693 return NULL;
2694
2695 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2696 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2697 free = 1;
2698 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2699 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2700 free = 0;
2701 break;
2702 }
2703 }
2704 if (free) {
2705 list_del_init(&entry->list);
2706 return entry;
2707 }
2708 }
2709
2710 /* no free buffer in pool so take first one and swap pages */
2711 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2712 struct qeth_buffer_pool_entry, list);
2713 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2714 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2715 page = alloc_page(GFP_ATOMIC);
2716 if (!page) {
2717 return NULL;
2718 } else {
2719 free_page((unsigned long)entry->elements[i]);
2720 entry->elements[i] = page_address(page);
2721 if (card->options.performance_stats)
2722 card->perf_stats.sg_alloc_page_rx++;
2723 }
2724 }
2725 }
2726 list_del_init(&entry->list);
2727 return entry;
2728 }
2729
2730 static int qeth_init_input_buffer(struct qeth_card *card,
2731 struct qeth_qdio_buffer *buf)
2732 {
2733 struct qeth_buffer_pool_entry *pool_entry;
2734 int i;
2735
2736 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2737 buf->rx_skb = netdev_alloc_skb(card->dev,
2738 QETH_RX_PULL_LEN + ETH_HLEN);
2739 if (!buf->rx_skb)
2740 return 1;
2741 }
2742
2743 pool_entry = qeth_find_free_buffer_pool_entry(card);
2744 if (!pool_entry)
2745 return 1;
2746
2747 /*
2748 * since the buffer is accessed only from the input_tasklet
2749 * there shouldn't be a need to synchronize; also, since we use
2750 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2751 * buffers
2752 */
2753
2754 buf->pool_entry = pool_entry;
2755 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2756 buf->buffer->element[i].length = PAGE_SIZE;
2757 buf->buffer->element[i].addr = pool_entry->elements[i];
2758 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2759 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2760 else
2761 buf->buffer->element[i].eflags = 0;
2762 buf->buffer->element[i].sflags = 0;
2763 }
2764 return 0;
2765 }
2766
2767 int qeth_init_qdio_queues(struct qeth_card *card)
2768 {
2769 int i, j;
2770 int rc;
2771
2772 QETH_DBF_TEXT(SETUP, 2, "initqdqs");
2773
2774 /* inbound queue */
2775 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2776 memset(&card->rx, 0, sizeof(struct qeth_rx));
2777 qeth_initialize_working_pool_list(card);
2778 /*give only as many buffers to hardware as we have buffer pool entries*/
2779 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2780 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2781 card->qdio.in_q->next_buf_to_init =
2782 card->qdio.in_buf_pool.buf_count - 1;
2783 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2784 card->qdio.in_buf_pool.buf_count - 1);
2785 if (rc) {
2786 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2787 return rc;
2788 }
2789
2790 /* completion */
2791 rc = qeth_cq_init(card);
2792 if (rc) {
2793 return rc;
2794 }
2795
2796 /* outbound queue */
2797 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2798 qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
2799 QDIO_MAX_BUFFERS_PER_Q);
2800 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2801 qeth_clear_output_buffer(card->qdio.out_qs[i],
2802 card->qdio.out_qs[i]->bufs[j]);
2803 }
2804 card->qdio.out_qs[i]->card = card;
2805 card->qdio.out_qs[i]->next_buf_to_fill = 0;
2806 card->qdio.out_qs[i]->do_pack = 0;
2807 atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
2808 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2809 atomic_set(&card->qdio.out_qs[i]->state,
2810 QETH_OUT_Q_UNLOCKED);
2811 }
2812 return 0;
2813 }
2814 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2815
2816 static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
2817 {
2818 switch (link_type) {
2819 case QETH_LINK_TYPE_HSTR:
2820 return 2;
2821 default:
2822 return 1;
2823 }
2824 }
2825
2826 static void qeth_fill_ipacmd_header(struct qeth_card *card,
2827 struct qeth_ipa_cmd *cmd,
2828 enum qeth_ipa_cmds command,
2829 enum qeth_prot_versions prot)
2830 {
2831 cmd->hdr.command = command;
2832 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2833 /* cmd->hdr.seqno is set by qeth_send_control_data() */
2834 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2835 cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
2836 cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1;
2837 cmd->hdr.param_count = 1;
2838 cmd->hdr.prot_version = prot;
2839 }
2840
2841 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
2842 {
2843 u8 prot_type = qeth_mpc_select_prot_type(card);
2844
2845 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2846 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2847 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2848 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2849 }
2850 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2851
2852 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2853 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2854 {
2855 struct qeth_cmd_buffer *iob;
2856
2857 iob = qeth_get_buffer(&card->write);
2858 if (iob) {
2859 qeth_prepare_ipa_cmd(card, iob);
2860 qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
2861 } else {
2862 dev_warn(&card->gdev->dev,
2863 "The qeth driver ran out of channel command buffers\n");
2864 QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
2865 CARD_DEVID(card));
2866 }
2867
2868 return iob;
2869 }
2870 EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
2871
2872 /**
2873 * qeth_send_ipa_cmd() - send an IPA command
2874 *
2875 * See qeth_send_control_data() for explanation of the arguments.
2876 */
2877
2878 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2879 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2880 unsigned long),
2881 void *reply_param)
2882 {
2883 int rc;
2884
2885 QETH_CARD_TEXT(card, 4, "sendipa");
2886 rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
2887 iob, reply_cb, reply_param);
2888 if (rc == -ETIME) {
2889 qeth_clear_ipacmd_list(card);
2890 qeth_schedule_recovery(card);
2891 }
2892 return rc;
2893 }
2894 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2895
2896 static int qeth_send_startlan(struct qeth_card *card)
2897 {
2898 int rc;
2899 struct qeth_cmd_buffer *iob;
2900
2901 QETH_DBF_TEXT(SETUP, 2, "strtlan");
2902
2903 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
2904 if (!iob)
2905 return -ENOMEM;
2906 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2907 return rc;
2908 }
2909
2910 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
2911 {
2912 if (!cmd->hdr.return_code)
2913 cmd->hdr.return_code =
2914 cmd->data.setadapterparms.hdr.return_code;
2915 return cmd->hdr.return_code;
2916 }
2917
2918 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2919 struct qeth_reply *reply, unsigned long data)
2920 {
2921 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2922
2923 QETH_CARD_TEXT(card, 3, "quyadpcb");
2924 if (qeth_setadpparms_inspect_rc(cmd))
2925 return 0;
2926
2927 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2928 card->info.link_type =
2929 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2930 QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
2931 }
2932 card->options.adp.supported_funcs =
2933 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2934 return 0;
2935 }
2936
2937 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2938 __u32 command, __u32 cmdlen)
2939 {
2940 struct qeth_cmd_buffer *iob;
2941 struct qeth_ipa_cmd *cmd;
2942
2943 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
2944 QETH_PROT_IPV4);
2945 if (iob) {
2946 cmd = __ipa_cmd(iob);
2947 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
2948 cmd->data.setadapterparms.hdr.command_code = command;
2949 cmd->data.setadapterparms.hdr.used_total = 1;
2950 cmd->data.setadapterparms.hdr.seq_no = 1;
2951 }
2952
2953 return iob;
2954 }
2955
2956 static int qeth_query_setadapterparms(struct qeth_card *card)
2957 {
2958 int rc;
2959 struct qeth_cmd_buffer *iob;
2960
2961 QETH_CARD_TEXT(card, 3, "queryadp");
2962 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2963 sizeof(struct qeth_ipacmd_setadpparms));
2964 if (!iob)
2965 return -ENOMEM;
2966 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2967 return rc;
2968 }
2969
2970 static int qeth_query_ipassists_cb(struct qeth_card *card,
2971 struct qeth_reply *reply, unsigned long data)
2972 {
2973 struct qeth_ipa_cmd *cmd;
2974
2975 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
2976
2977 cmd = (struct qeth_ipa_cmd *) data;
2978
2979 switch (cmd->hdr.return_code) {
2980 case IPA_RC_NOTSUPP:
2981 case IPA_RC_L2_UNSUPPORTED_CMD:
2982 QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
2983 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
2984 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
2985 return 0;
2986 default:
2987 if (cmd->hdr.return_code) {
2988 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2989 CARD_DEVID(card),
2990 cmd->hdr.return_code);
2991 return 0;
2992 }
2993 }
2994
2995 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2996 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2997 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2998 } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
2999 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
3000 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
3001 } else
3002 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3003 CARD_DEVID(card));
3004 return 0;
3005 }
3006
3007 static int qeth_query_ipassists(struct qeth_card *card,
3008 enum qeth_prot_versions prot)
3009 {
3010 int rc;
3011 struct qeth_cmd_buffer *iob;
3012
3013 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
3014 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
3015 if (!iob)
3016 return -ENOMEM;
3017 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3018 return rc;
3019 }
3020
3021 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3022 struct qeth_reply *reply, unsigned long data)
3023 {
3024 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3025 struct qeth_query_switch_attributes *attrs;
3026 struct qeth_switch_info *sw_info;
3027
3028 QETH_CARD_TEXT(card, 2, "qswiatcb");
3029 if (qeth_setadpparms_inspect_rc(cmd))
3030 return 0;
3031
3032 sw_info = (struct qeth_switch_info *)reply->param;
3033 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3034 sw_info->capabilities = attrs->capabilities;
3035 sw_info->settings = attrs->settings;
3036 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3037 sw_info->settings);
3038 return 0;
3039 }
3040
3041 int qeth_query_switch_attributes(struct qeth_card *card,
3042 struct qeth_switch_info *sw_info)
3043 {
3044 struct qeth_cmd_buffer *iob;
3045
3046 QETH_CARD_TEXT(card, 2, "qswiattr");
3047 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3048 return -EOPNOTSUPP;
3049 if (!netif_carrier_ok(card->dev))
3050 return -ENOMEDIUM;
3051 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
3052 sizeof(struct qeth_ipacmd_setadpparms_hdr));
3053 if (!iob)
3054 return -ENOMEM;
3055 return qeth_send_ipa_cmd(card, iob,
3056 qeth_query_switch_attributes_cb, sw_info);
3057 }
3058
3059 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3060 struct qeth_reply *reply, unsigned long data)
3061 {
3062 struct qeth_ipa_cmd *cmd;
3063 __u16 rc;
3064
3065 cmd = (struct qeth_ipa_cmd *)data;
3066 rc = cmd->hdr.return_code;
3067 if (rc)
3068 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3069 else
3070 card->info.diagass_support = cmd->data.diagass.ext;
3071 return 0;
3072 }
3073
3074 static int qeth_query_setdiagass(struct qeth_card *card)
3075 {
3076 struct qeth_cmd_buffer *iob;
3077 struct qeth_ipa_cmd *cmd;
3078
3079 QETH_DBF_TEXT(SETUP, 2, "qdiagass");
3080 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3081 if (!iob)
3082 return -ENOMEM;
3083 cmd = __ipa_cmd(iob);
3084 cmd->data.diagass.subcmd_len = 16;
3085 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
3086 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3087 }
3088
3089 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3090 {
3091 unsigned long info = get_zeroed_page(GFP_KERNEL);
3092 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3093 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3094 struct ccw_dev_id ccwid;
3095 int level;
3096
3097 tid->chpid = card->info.chpid;
3098 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3099 tid->ssid = ccwid.ssid;
3100 tid->devno = ccwid.devno;
3101 if (!info)
3102 return;
3103 level = stsi(NULL, 0, 0, 0);
3104 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3105 tid->lparnr = info222->lpar_number;
3106 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3107 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3108 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3109 }
3110 free_page(info);
3111 return;
3112 }
3113
3114 static int qeth_hw_trap_cb(struct qeth_card *card,
3115 struct qeth_reply *reply, unsigned long data)
3116 {
3117 struct qeth_ipa_cmd *cmd;
3118 __u16 rc;
3119
3120 cmd = (struct qeth_ipa_cmd *)data;
3121 rc = cmd->hdr.return_code;
3122 if (rc)
3123 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3124 return 0;
3125 }
3126
3127 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3128 {
3129 struct qeth_cmd_buffer *iob;
3130 struct qeth_ipa_cmd *cmd;
3131
3132 QETH_DBF_TEXT(SETUP, 2, "diagtrap");
3133 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3134 if (!iob)
3135 return -ENOMEM;
3136 cmd = __ipa_cmd(iob);
3137 cmd->data.diagass.subcmd_len = 80;
3138 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
3139 cmd->data.diagass.type = 1;
3140 cmd->data.diagass.action = action;
3141 switch (action) {
3142 case QETH_DIAGS_TRAP_ARM:
3143 cmd->data.diagass.options = 0x0003;
3144 cmd->data.diagass.ext = 0x00010000 +
3145 sizeof(struct qeth_trap_id);
3146 qeth_get_trap_id(card,
3147 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3148 break;
3149 case QETH_DIAGS_TRAP_DISARM:
3150 cmd->data.diagass.options = 0x0001;
3151 break;
3152 case QETH_DIAGS_TRAP_CAPTURE:
3153 break;
3154 }
3155 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3156 }
3157 EXPORT_SYMBOL_GPL(qeth_hw_trap);
3158
3159 static int qeth_check_qdio_errors(struct qeth_card *card,
3160 struct qdio_buffer *buf,
3161 unsigned int qdio_error,
3162 const char *dbftext)
3163 {
3164 if (qdio_error) {
3165 QETH_CARD_TEXT(card, 2, dbftext);
3166 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3167 buf->element[15].sflags);
3168 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3169 buf->element[14].sflags);
3170 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3171 if ((buf->element[15].sflags) == 0x12) {
3172 card->stats.rx_dropped++;
3173 return 0;
3174 } else
3175 return 1;
3176 }
3177 return 0;
3178 }
3179
3180 static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3181 {
3182 struct qeth_qdio_q *queue = card->qdio.in_q;
3183 struct list_head *lh;
3184 int count;
3185 int i;
3186 int rc;
3187 int newcount = 0;
3188
3189 count = (index < queue->next_buf_to_init)?
3190 card->qdio.in_buf_pool.buf_count -
3191 (queue->next_buf_to_init - index) :
3192 card->qdio.in_buf_pool.buf_count -
3193 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
3194 /* only requeue at a certain threshold to avoid SIGAs */
3195 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3196 for (i = queue->next_buf_to_init;
3197 i < queue->next_buf_to_init + count; ++i) {
3198 if (qeth_init_input_buffer(card,
3199 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
3200 break;
3201 } else {
3202 newcount++;
3203 }
3204 }
3205
3206 if (newcount < count) {
3207 /* we are in memory shortage so we switch back to
3208 traditional skb allocation and drop packages */
3209 atomic_set(&card->force_alloc_skb, 3);
3210 count = newcount;
3211 } else {
3212 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3213 }
3214
3215 if (!count) {
3216 i = 0;
3217 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3218 i++;
3219 if (i == card->qdio.in_buf_pool.buf_count) {
3220 QETH_CARD_TEXT(card, 2, "qsarbw");
3221 card->reclaim_index = index;
3222 schedule_delayed_work(
3223 &card->buffer_reclaim_work,
3224 QETH_RECLAIM_WORK_TIME);
3225 }
3226 return;
3227 }
3228
3229 /*
3230 * according to old code it should be avoided to requeue all
3231 * 128 buffers in order to benefit from PCI avoidance.
3232 * this function keeps at least one buffer (the buffer at
3233 * 'index') un-requeued -> this buffer is the first buffer that
3234 * will be requeued the next time
3235 */
3236 if (card->options.performance_stats) {
3237 card->perf_stats.inbound_do_qdio_cnt++;
3238 card->perf_stats.inbound_do_qdio_start_time =
3239 qeth_get_micros();
3240 }
3241 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3242 queue->next_buf_to_init, count);
3243 if (card->options.performance_stats)
3244 card->perf_stats.inbound_do_qdio_time +=
3245 qeth_get_micros() -
3246 card->perf_stats.inbound_do_qdio_start_time;
3247 if (rc) {
3248 QETH_CARD_TEXT(card, 2, "qinberr");
3249 }
3250 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
3251 QDIO_MAX_BUFFERS_PER_Q;
3252 }
3253 }
3254
3255 static void qeth_buffer_reclaim_work(struct work_struct *work)
3256 {
3257 struct qeth_card *card = container_of(work, struct qeth_card,
3258 buffer_reclaim_work.work);
3259
3260 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3261 qeth_queue_input_buffer(card, card->reclaim_index);
3262 }
3263
3264 static void qeth_handle_send_error(struct qeth_card *card,
3265 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3266 {
3267 int sbalf15 = buffer->buffer->element[15].sflags;
3268
3269 QETH_CARD_TEXT(card, 6, "hdsnderr");
3270 if (card->info.type == QETH_CARD_TYPE_IQD) {
3271 if (sbalf15 == 0) {
3272 qdio_err = 0;
3273 } else {
3274 qdio_err = 1;
3275 }
3276 }
3277 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3278
3279 if (!qdio_err)
3280 return;
3281
3282 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3283 return;
3284
3285 QETH_CARD_TEXT(card, 1, "lnkfail");
3286 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3287 (u16)qdio_err, (u8)sbalf15);
3288 }
3289
3290 /**
3291 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3292 * @queue: queue to check for packing buffer
3293 *
3294 * Returns number of buffers that were prepared for flush.
3295 */
3296 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3297 {
3298 struct qeth_qdio_out_buffer *buffer;
3299
3300 buffer = queue->bufs[queue->next_buf_to_fill];
3301 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3302 (buffer->next_element_to_fill > 0)) {
3303 /* it's a packing buffer */
3304 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3305 queue->next_buf_to_fill =
3306 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
3307 return 1;
3308 }
3309 return 0;
3310 }
3311
3312 /*
3313 * Switched to packing state if the number of used buffers on a queue
3314 * reaches a certain limit.
3315 */
3316 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3317 {
3318 if (!queue->do_pack) {
3319 if (atomic_read(&queue->used_buffers)
3320 >= QETH_HIGH_WATERMARK_PACK){
3321 /* switch non-PACKING -> PACKING */
3322 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3323 if (queue->card->options.performance_stats)
3324 queue->card->perf_stats.sc_dp_p++;
3325 queue->do_pack = 1;
3326 }
3327 }
3328 }
3329
3330 /*
3331 * Switches from packing to non-packing mode. If there is a packing
3332 * buffer on the queue this buffer will be prepared to be flushed.
3333 * In that case 1 is returned to inform the caller. If no buffer
3334 * has to be flushed, zero is returned.
3335 */
3336 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3337 {
3338 if (queue->do_pack) {
3339 if (atomic_read(&queue->used_buffers)
3340 <= QETH_LOW_WATERMARK_PACK) {
3341 /* switch PACKING -> non-PACKING */
3342 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3343 if (queue->card->options.performance_stats)
3344 queue->card->perf_stats.sc_p_dp++;
3345 queue->do_pack = 0;
3346 return qeth_prep_flush_pack_buffer(queue);
3347 }
3348 }
3349 return 0;
3350 }
3351
3352 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3353 int count)
3354 {
3355 struct qeth_qdio_out_buffer *buf;
3356 int rc;
3357 int i;
3358 unsigned int qdio_flags;
3359
3360 for (i = index; i < index + count; ++i) {
3361 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3362 buf = queue->bufs[bidx];
3363 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3364 SBAL_EFLAGS_LAST_ENTRY;
3365
3366 if (queue->bufstates)
3367 queue->bufstates[bidx].user = buf;
3368
3369 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
3370 continue;
3371
3372 if (!queue->do_pack) {
3373 if ((atomic_read(&queue->used_buffers) >=
3374 (QETH_HIGH_WATERMARK_PACK -
3375 QETH_WATERMARK_PACK_FUZZ)) &&
3376 !atomic_read(&queue->set_pci_flags_count)) {
3377 /* it's likely that we'll go to packing
3378 * mode soon */
3379 atomic_inc(&queue->set_pci_flags_count);
3380 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3381 }
3382 } else {
3383 if (!atomic_read(&queue->set_pci_flags_count)) {
3384 /*
3385 * there's no outstanding PCI any more, so we
3386 * have to request a PCI to be sure the the PCI
3387 * will wake at some time in the future then we
3388 * can flush packed buffers that might still be
3389 * hanging around, which can happen if no
3390 * further send was requested by the stack
3391 */
3392 atomic_inc(&queue->set_pci_flags_count);
3393 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3394 }
3395 }
3396 }
3397
3398 netif_trans_update(queue->card->dev);
3399 if (queue->card->options.performance_stats) {
3400 queue->card->perf_stats.outbound_do_qdio_cnt++;
3401 queue->card->perf_stats.outbound_do_qdio_start_time =
3402 qeth_get_micros();
3403 }
3404 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3405 if (atomic_read(&queue->set_pci_flags_count))
3406 qdio_flags |= QDIO_FLAG_PCI_OUT;
3407 atomic_add(count, &queue->used_buffers);
3408
3409 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3410 queue->queue_no, index, count);
3411 if (queue->card->options.performance_stats)
3412 queue->card->perf_stats.outbound_do_qdio_time +=
3413 qeth_get_micros() -
3414 queue->card->perf_stats.outbound_do_qdio_start_time;
3415 if (rc) {
3416 queue->card->stats.tx_errors += count;
3417 /* ignore temporary SIGA errors without busy condition */
3418 if (rc == -ENOBUFS)
3419 return;
3420 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3421 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3422 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3423 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3424 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3425
3426 /* this must not happen under normal circumstances. if it
3427 * happens something is really wrong -> recover */
3428 qeth_schedule_recovery(queue->card);
3429 return;
3430 }
3431 if (queue->card->options.performance_stats)
3432 queue->card->perf_stats.bufs_sent += count;
3433 }
3434
3435 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3436 {
3437 int index;
3438 int flush_cnt = 0;
3439 int q_was_packing = 0;
3440
3441 /*
3442 * check if weed have to switch to non-packing mode or if
3443 * we have to get a pci flag out on the queue
3444 */
3445 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3446 !atomic_read(&queue->set_pci_flags_count)) {
3447 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3448 QETH_OUT_Q_UNLOCKED) {
3449 /*
3450 * If we get in here, there was no action in
3451 * do_send_packet. So, we check if there is a
3452 * packing buffer to be flushed here.
3453 */
3454 netif_stop_queue(queue->card->dev);
3455 index = queue->next_buf_to_fill;
3456 q_was_packing = queue->do_pack;
3457 /* queue->do_pack may change */
3458 barrier();
3459 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3460 if (!flush_cnt &&
3461 !atomic_read(&queue->set_pci_flags_count))
3462 flush_cnt += qeth_prep_flush_pack_buffer(queue);
3463 if (queue->card->options.performance_stats &&
3464 q_was_packing)
3465 queue->card->perf_stats.bufs_sent_pack +=
3466 flush_cnt;
3467 if (flush_cnt)
3468 qeth_flush_buffers(queue, index, flush_cnt);
3469 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3470 }
3471 }
3472 }
3473
3474 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3475 unsigned long card_ptr)
3476 {
3477 struct qeth_card *card = (struct qeth_card *)card_ptr;
3478
3479 if (card->dev->flags & IFF_UP)
3480 napi_schedule(&card->napi);
3481 }
3482
3483 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3484 {
3485 int rc;
3486
3487 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3488 rc = -1;
3489 goto out;
3490 } else {
3491 if (card->options.cq == cq) {
3492 rc = 0;
3493 goto out;
3494 }
3495
3496 if (card->state != CARD_STATE_DOWN &&
3497 card->state != CARD_STATE_RECOVER) {
3498 rc = -1;
3499 goto out;
3500 }
3501
3502 qeth_free_qdio_buffers(card);
3503 card->options.cq = cq;
3504 rc = 0;
3505 }
3506 out:
3507 return rc;
3508
3509 }
3510 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3511
3512 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3513 unsigned int queue, int first_element,
3514 int count)
3515 {
3516 struct qeth_qdio_q *cq = card->qdio.c_q;
3517 int i;
3518 int rc;
3519
3520 if (!qeth_is_cq(card, queue))
3521 goto out;
3522
3523 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3524 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3525 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3526
3527 if (qdio_err) {
3528 netif_stop_queue(card->dev);
3529 qeth_schedule_recovery(card);
3530 goto out;
3531 }
3532
3533 if (card->options.performance_stats) {
3534 card->perf_stats.cq_cnt++;
3535 card->perf_stats.cq_start_time = qeth_get_micros();
3536 }
3537
3538 for (i = first_element; i < first_element + count; ++i) {
3539 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3540 struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
3541 int e = 0;
3542
3543 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3544 buffer->element[e].addr) {
3545 unsigned long phys_aob_addr;
3546
3547 phys_aob_addr = (unsigned long) buffer->element[e].addr;
3548 qeth_qdio_handle_aob(card, phys_aob_addr);
3549 ++e;
3550 }
3551 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3552 }
3553 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3554 card->qdio.c_q->next_buf_to_init,
3555 count);
3556 if (rc) {
3557 dev_warn(&card->gdev->dev,
3558 "QDIO reported an error, rc=%i\n", rc);
3559 QETH_CARD_TEXT(card, 2, "qcqherr");
3560 }
3561 card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
3562 + count) % QDIO_MAX_BUFFERS_PER_Q;
3563
3564 netif_wake_queue(card->dev);
3565
3566 if (card->options.performance_stats) {
3567 int delta_t = qeth_get_micros();
3568 delta_t -= card->perf_stats.cq_start_time;
3569 card->perf_stats.cq_time += delta_t;
3570 }
3571 out:
3572 return;
3573 }
3574
3575 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3576 unsigned int qdio_err, int queue,
3577 int first_elem, int count,
3578 unsigned long card_ptr)
3579 {
3580 struct qeth_card *card = (struct qeth_card *)card_ptr;
3581
3582 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3583 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3584
3585 if (qeth_is_cq(card, queue))
3586 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3587 else if (qdio_err)
3588 qeth_schedule_recovery(card);
3589 }
3590
3591 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3592 unsigned int qdio_error, int __queue,
3593 int first_element, int count,
3594 unsigned long card_ptr)
3595 {
3596 struct qeth_card *card = (struct qeth_card *) card_ptr;
3597 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3598 struct qeth_qdio_out_buffer *buffer;
3599 int i;
3600
3601 QETH_CARD_TEXT(card, 6, "qdouhdl");
3602 if (qdio_error & QDIO_ERROR_FATAL) {
3603 QETH_CARD_TEXT(card, 2, "achkcond");
3604 netif_stop_queue(card->dev);
3605 qeth_schedule_recovery(card);
3606 return;
3607 }
3608 if (card->options.performance_stats) {
3609 card->perf_stats.outbound_handler_cnt++;
3610 card->perf_stats.outbound_handler_start_time =
3611 qeth_get_micros();
3612 }
3613 for (i = first_element; i < (first_element + count); ++i) {
3614 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3615 buffer = queue->bufs[bidx];
3616 qeth_handle_send_error(card, buffer, qdio_error);
3617
3618 if (queue->bufstates &&
3619 (queue->bufstates[bidx].flags &
3620 QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
3621 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
3622
3623 if (atomic_cmpxchg(&buffer->state,
3624 QETH_QDIO_BUF_PRIMED,
3625 QETH_QDIO_BUF_PENDING) ==
3626 QETH_QDIO_BUF_PRIMED) {
3627 qeth_notify_skbs(queue, buffer,
3628 TX_NOTIFY_PENDING);
3629 }
3630 QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
3631
3632 /* prepare the queue slot for re-use: */
3633 qeth_scrub_qdio_buffer(buffer->buffer,
3634 QETH_MAX_BUFFER_ELEMENTS(card));
3635 if (qeth_init_qdio_out_buf(queue, bidx)) {
3636 QETH_CARD_TEXT(card, 2, "outofbuf");
3637 qeth_schedule_recovery(card);
3638 }
3639 } else {
3640 if (card->options.cq == QETH_CQ_ENABLED) {
3641 enum iucv_tx_notify n;
3642
3643 n = qeth_compute_cq_notification(
3644 buffer->buffer->element[15].sflags, 0);
3645 qeth_notify_skbs(queue, buffer, n);
3646 }
3647
3648 qeth_clear_output_buffer(queue, buffer);
3649 }
3650 qeth_cleanup_handled_pending(queue, bidx, 0);
3651 }
3652 atomic_sub(count, &queue->used_buffers);
3653 /* check if we need to do something on this outbound queue */
3654 if (card->info.type != QETH_CARD_TYPE_IQD)
3655 qeth_check_outbound_queue(queue);
3656
3657 netif_wake_queue(queue->card->dev);
3658 if (card->options.performance_stats)
3659 card->perf_stats.outbound_handler_time += qeth_get_micros() -
3660 card->perf_stats.outbound_handler_start_time;
3661 }
3662
3663 /* We cannot use outbound queue 3 for unicast packets on HiperSockets */
3664 static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
3665 {
3666 if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
3667 return 2;
3668 return queue_num;
3669 }
3670
3671 /**
3672 * Note: Function assumes that we have 4 outbound queues.
3673 */
3674 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3675 int ipv)
3676 {
3677 __be16 *tci;
3678 u8 tos;
3679
3680 switch (card->qdio.do_prio_queueing) {
3681 case QETH_PRIO_Q_ING_TOS:
3682 case QETH_PRIO_Q_ING_PREC:
3683 switch (ipv) {
3684 case 4:
3685 tos = ipv4_get_dsfield(ip_hdr(skb));
3686 break;
3687 case 6:
3688 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3689 break;
3690 default:
3691 return card->qdio.default_out_queue;
3692 }
3693 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3694 return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
3695 if (tos & IPTOS_MINCOST)
3696 return qeth_cut_iqd_prio(card, 3);
3697 if (tos & IPTOS_RELIABILITY)
3698 return 2;
3699 if (tos & IPTOS_THROUGHPUT)
3700 return 1;
3701 if (tos & IPTOS_LOWDELAY)
3702 return 0;
3703 break;
3704 case QETH_PRIO_Q_ING_SKB:
3705 if (skb->priority > 5)
3706 return 0;
3707 return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
3708 case QETH_PRIO_Q_ING_VLAN:
3709 tci = &((struct ethhdr *)skb->data)->h_proto;
3710 if (be16_to_cpu(*tci) == ETH_P_8021Q)
3711 return qeth_cut_iqd_prio(card,
3712 ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
3713 break;
3714 default:
3715 break;
3716 }
3717 return card->qdio.default_out_queue;
3718 }
3719 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3720
3721 /**
3722 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3723 * @skb: SKB address
3724 *
3725 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3726 * fragmented part of the SKB. Returns zero for linear SKB.
3727 */
3728 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3729 {
3730 int cnt, elements = 0;
3731
3732 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3733 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
3734
3735 elements += qeth_get_elements_for_range(
3736 (addr_t)skb_frag_address(frag),
3737 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3738 }
3739 return elements;
3740 }
3741
3742 /**
3743 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3744 * to transmit an skb.
3745 * @skb: the skb to operate on.
3746 * @data_offset: skip this part of the skb's linear data
3747 *
3748 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3749 * skb's data (both its linear part and paged fragments).
3750 */
3751 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3752 {
3753 unsigned int elements = qeth_get_elements_for_frags(skb);
3754 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3755 addr_t start = (addr_t)skb->data + data_offset;
3756
3757 if (start != end)
3758 elements += qeth_get_elements_for_range(start, end);
3759 return elements;
3760 }
3761 EXPORT_SYMBOL_GPL(qeth_count_elements);
3762
3763 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3764 MAX_TCP_HEADER)
3765
3766 /**
3767 * qeth_add_hw_header() - add a HW header to an skb.
3768 * @skb: skb that the HW header should be added to.
3769 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3770 * it contains a valid pointer to a qeth_hdr.
3771 * @hdr_len: length of the HW header.
3772 * @proto_len: length of protocol headers that need to be in same page as the
3773 * HW header.
3774 *
3775 * Returns the pushed length. If the header can't be pushed on
3776 * (eg. because it would cross a page boundary), it is allocated from
3777 * the cache instead and 0 is returned.
3778 * The number of needed buffer elements is returned in @elements.
3779 * Error to create the hdr is indicated by returning with < 0.
3780 */
3781 static int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
3782 struct qeth_hdr **hdr, unsigned int hdr_len,
3783 unsigned int proto_len, unsigned int *elements)
3784 {
3785 const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3786 const unsigned int contiguous = proto_len ? proto_len : 1;
3787 unsigned int __elements;
3788 addr_t start, end;
3789 bool push_ok;
3790 int rc;
3791
3792 check_layout:
3793 start = (addr_t)skb->data - hdr_len;
3794 end = (addr_t)skb->data;
3795
3796 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3797 /* Push HW header into same page as first protocol header. */
3798 push_ok = true;
3799 /* ... but TSO always needs a separate element for headers: */
3800 if (skb_is_gso(skb))
3801 __elements = 1 + qeth_count_elements(skb, proto_len);
3802 else
3803 __elements = qeth_count_elements(skb, 0);
3804 } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
3805 /* Push HW header into a new page. */
3806 push_ok = true;
3807 __elements = 1 + qeth_count_elements(skb, 0);
3808 } else {
3809 /* Use header cache, copy protocol headers up. */
3810 push_ok = false;
3811 __elements = 1 + qeth_count_elements(skb, proto_len);
3812 }
3813
3814 /* Compress skb to fit into one IO buffer: */
3815 if (__elements > max_elements) {
3816 if (!skb_is_nonlinear(skb)) {
3817 /* Drop it, no easy way of shrinking it further. */
3818 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3819 max_elements, __elements, skb->len);
3820 return -E2BIG;
3821 }
3822
3823 rc = skb_linearize(skb);
3824 if (card->options.performance_stats) {
3825 if (rc)
3826 card->perf_stats.tx_linfail++;
3827 else
3828 card->perf_stats.tx_lin++;
3829 }
3830 if (rc)
3831 return rc;
3832
3833 /* Linearization changed the layout, re-evaluate: */
3834 goto check_layout;
3835 }
3836
3837 *elements = __elements;
3838 /* Add the header: */
3839 if (push_ok) {
3840 *hdr = skb_push(skb, hdr_len);
3841 return hdr_len;
3842 }
3843 /* fall back */
3844 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3845 return -E2BIG;
3846 *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
3847 if (!*hdr)
3848 return -ENOMEM;
3849 /* Copy protocol headers behind HW header: */
3850 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3851 return 0;
3852 }
3853
3854 static void __qeth_fill_buffer(struct sk_buff *skb,
3855 struct qeth_qdio_out_buffer *buf,
3856 bool is_first_elem, unsigned int offset)
3857 {
3858 struct qdio_buffer *buffer = buf->buffer;
3859 int element = buf->next_element_to_fill;
3860 int length = skb_headlen(skb) - offset;
3861 char *data = skb->data + offset;
3862 int length_here, cnt;
3863
3864 /* map linear part into buffer element(s) */
3865 while (length > 0) {
3866 /* length_here is the remaining amount of data in this page */
3867 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3868 if (length < length_here)
3869 length_here = length;
3870
3871 buffer->element[element].addr = data;
3872 buffer->element[element].length = length_here;
3873 length -= length_here;
3874 if (is_first_elem) {
3875 is_first_elem = false;
3876 if (length || skb_is_nonlinear(skb))
3877 /* skb needs additional elements */
3878 buffer->element[element].eflags =
3879 SBAL_EFLAGS_FIRST_FRAG;
3880 else
3881 buffer->element[element].eflags = 0;
3882 } else {
3883 buffer->element[element].eflags =
3884 SBAL_EFLAGS_MIDDLE_FRAG;
3885 }
3886 data += length_here;
3887 element++;
3888 }
3889
3890 /* map page frags into buffer element(s) */
3891 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3892 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3893
3894 data = skb_frag_address(frag);
3895 length = skb_frag_size(frag);
3896 while (length > 0) {
3897 length_here = PAGE_SIZE -
3898 ((unsigned long) data % PAGE_SIZE);
3899 if (length < length_here)
3900 length_here = length;
3901
3902 buffer->element[element].addr = data;
3903 buffer->element[element].length = length_here;
3904 buffer->element[element].eflags =
3905 SBAL_EFLAGS_MIDDLE_FRAG;
3906 length -= length_here;
3907 data += length_here;
3908 element++;
3909 }
3910 }
3911
3912 if (buffer->element[element - 1].eflags)
3913 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3914 buf->next_element_to_fill = element;
3915 }
3916
3917 /**
3918 * qeth_fill_buffer() - map skb into an output buffer
3919 * @queue: QDIO queue to submit the buffer on
3920 * @buf: buffer to transport the skb
3921 * @skb: skb to map into the buffer
3922 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
3923 * from qeth_core_header_cache.
3924 * @offset: when mapping the skb, start at skb->data + offset
3925 * @hd_len: if > 0, build a dedicated header element of this size
3926 */
3927 static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3928 struct qeth_qdio_out_buffer *buf,
3929 struct sk_buff *skb, struct qeth_hdr *hdr,
3930 unsigned int offset, unsigned int hd_len)
3931 {
3932 struct qdio_buffer *buffer = buf->buffer;
3933 bool is_first_elem = true;
3934 int flush_cnt = 0;
3935
3936 __skb_queue_tail(&buf->skb_list, skb);
3937
3938 /* build dedicated header element */
3939 if (hd_len) {
3940 int element = buf->next_element_to_fill;
3941 is_first_elem = false;
3942
3943 buffer->element[element].addr = hdr;
3944 buffer->element[element].length = hd_len;
3945 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3946 /* remember to free cache-allocated qeth_hdr: */
3947 buf->is_header[element] = ((void *)hdr != skb->data);
3948 buf->next_element_to_fill++;
3949 }
3950
3951 __qeth_fill_buffer(skb, buf, is_first_elem, offset);
3952
3953 if (!queue->do_pack) {
3954 QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
3955 /* set state to PRIMED -> will be flushed */
3956 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3957 flush_cnt = 1;
3958 } else {
3959 QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
3960 if (queue->card->options.performance_stats)
3961 queue->card->perf_stats.skbs_sent_pack++;
3962 if (buf->next_element_to_fill >=
3963 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3964 /*
3965 * packed buffer if full -> set state PRIMED
3966 * -> will be flushed
3967 */
3968 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3969 flush_cnt = 1;
3970 }
3971 }
3972 return flush_cnt;
3973 }
3974
3975 static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
3976 struct sk_buff *skb, struct qeth_hdr *hdr,
3977 unsigned int offset, unsigned int hd_len)
3978 {
3979 int index = queue->next_buf_to_fill;
3980 struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
3981
3982 /*
3983 * check if buffer is empty to make sure that we do not 'overtake'
3984 * ourselves and try to fill a buffer that is already primed
3985 */
3986 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3987 return -EBUSY;
3988 queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
3989 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
3990 qeth_flush_buffers(queue, index, 1);
3991 return 0;
3992 }
3993
3994 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3995 struct sk_buff *skb, struct qeth_hdr *hdr,
3996 unsigned int offset, unsigned int hd_len,
3997 int elements_needed)
3998 {
3999 struct qeth_qdio_out_buffer *buffer;
4000 int start_index;
4001 int flush_count = 0;
4002 int do_pack = 0;
4003 int tmp;
4004 int rc = 0;
4005
4006 /* spin until we get the queue ... */
4007 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4008 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4009 start_index = queue->next_buf_to_fill;
4010 buffer = queue->bufs[queue->next_buf_to_fill];
4011 /*
4012 * check if buffer is empty to make sure that we do not 'overtake'
4013 * ourselves and try to fill a buffer that is already primed
4014 */
4015 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4016 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4017 return -EBUSY;
4018 }
4019 /* check if we need to switch packing state of this queue */
4020 qeth_switch_to_packing_if_needed(queue);
4021 if (queue->do_pack) {
4022 do_pack = 1;
4023 /* does packet fit in current buffer? */
4024 if ((QETH_MAX_BUFFER_ELEMENTS(card) -
4025 buffer->next_element_to_fill) < elements_needed) {
4026 /* ... no -> set state PRIMED */
4027 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4028 flush_count++;
4029 queue->next_buf_to_fill =
4030 (queue->next_buf_to_fill + 1) %
4031 QDIO_MAX_BUFFERS_PER_Q;
4032 buffer = queue->bufs[queue->next_buf_to_fill];
4033 /* we did a step forward, so check buffer state
4034 * again */
4035 if (atomic_read(&buffer->state) !=
4036 QETH_QDIO_BUF_EMPTY) {
4037 qeth_flush_buffers(queue, start_index,
4038 flush_count);
4039 atomic_set(&queue->state,
4040 QETH_OUT_Q_UNLOCKED);
4041 rc = -EBUSY;
4042 goto out;
4043 }
4044 }
4045 }
4046 tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
4047 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4048 QDIO_MAX_BUFFERS_PER_Q;
4049 flush_count += tmp;
4050 if (flush_count)
4051 qeth_flush_buffers(queue, start_index, flush_count);
4052 else if (!atomic_read(&queue->set_pci_flags_count))
4053 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4054 /*
4055 * queue->state will go from LOCKED -> UNLOCKED or from
4056 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4057 * (switch packing state or flush buffer to get another pci flag out).
4058 * In that case we will enter this loop
4059 */
4060 while (atomic_dec_return(&queue->state)) {
4061 start_index = queue->next_buf_to_fill;
4062 /* check if we can go back to non-packing state */
4063 tmp = qeth_switch_to_nonpacking_if_needed(queue);
4064 /*
4065 * check if we need to flush a packing buffer to get a pci
4066 * flag out on the queue
4067 */
4068 if (!tmp && !atomic_read(&queue->set_pci_flags_count))
4069 tmp = qeth_prep_flush_pack_buffer(queue);
4070 if (tmp) {
4071 qeth_flush_buffers(queue, start_index, tmp);
4072 flush_count += tmp;
4073 }
4074 }
4075 out:
4076 /* at this point the queue is UNLOCKED again */
4077 if (queue->card->options.performance_stats && do_pack)
4078 queue->card->perf_stats.bufs_sent_pack += flush_count;
4079
4080 return rc;
4081 }
4082 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4083
4084 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4085 unsigned int payload_len, struct sk_buff *skb,
4086 unsigned int proto_len)
4087 {
4088 struct qeth_hdr_ext_tso *ext = &hdr->ext;
4089
4090 ext->hdr_tot_len = sizeof(*ext);
4091 ext->imb_hdr_no = 1;
4092 ext->hdr_type = 1;
4093 ext->hdr_version = 1;
4094 ext->hdr_len = 28;
4095 ext->payload_len = payload_len;
4096 ext->mss = skb_shinfo(skb)->gso_size;
4097 ext->dg_hdr_len = proto_len;
4098 }
4099
4100 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4101 struct qeth_qdio_out_q *queue, int ipv, int cast_type,
4102 void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
4103 struct sk_buff *skb, int ipv, int cast_type,
4104 unsigned int data_len))
4105 {
4106 unsigned int proto_len, hw_hdr_len;
4107 unsigned int frame_len = skb->len;
4108 bool is_tso = skb_is_gso(skb);
4109 unsigned int data_offset = 0;
4110 struct qeth_hdr *hdr = NULL;
4111 unsigned int hd_len = 0;
4112 unsigned int elements;
4113 int push_len, rc;
4114 bool is_sg;
4115
4116 if (is_tso) {
4117 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4118 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4119 } else {
4120 hw_hdr_len = sizeof(struct qeth_hdr);
4121 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4122 }
4123
4124 rc = skb_cow_head(skb, hw_hdr_len);
4125 if (rc)
4126 return rc;
4127
4128 push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
4129 &elements);
4130 if (push_len < 0)
4131 return push_len;
4132 if (is_tso || !push_len) {
4133 /* HW header needs its own buffer element. */
4134 hd_len = hw_hdr_len + proto_len;
4135 data_offset = push_len + proto_len;
4136 }
4137 memset(hdr, 0, hw_hdr_len);
4138 fill_header(card, hdr, skb, ipv, cast_type, frame_len);
4139 if (is_tso)
4140 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4141 frame_len - proto_len, skb, proto_len);
4142
4143 is_sg = skb_is_nonlinear(skb);
4144 if (IS_IQD(card)) {
4145 rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
4146 hd_len);
4147 } else {
4148 /* TODO: drop skb_orphan() once TX completion is fast enough */
4149 skb_orphan(skb);
4150 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4151 hd_len, elements);
4152 }
4153
4154 if (!rc) {
4155 if (card->options.performance_stats) {
4156 card->perf_stats.buf_elements_sent += elements;
4157 if (is_sg)
4158 card->perf_stats.sg_skbs_sent++;
4159 if (is_tso) {
4160 card->perf_stats.large_send_bytes += frame_len;
4161 card->perf_stats.large_send_cnt++;
4162 }
4163 }
4164 } else {
4165 if (!push_len)
4166 kmem_cache_free(qeth_core_header_cache, hdr);
4167 if (rc == -EBUSY)
4168 /* roll back to ETH header */
4169 skb_pull(skb, push_len);
4170 }
4171 return rc;
4172 }
4173 EXPORT_SYMBOL_GPL(qeth_xmit);
4174
4175 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4176 struct qeth_reply *reply, unsigned long data)
4177 {
4178 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4179 struct qeth_ipacmd_setadpparms *setparms;
4180
4181 QETH_CARD_TEXT(card, 4, "prmadpcb");
4182
4183 setparms = &(cmd->data.setadapterparms);
4184 if (qeth_setadpparms_inspect_rc(cmd)) {
4185 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4186 setparms->data.mode = SET_PROMISC_MODE_OFF;
4187 }
4188 card->info.promisc_mode = setparms->data.mode;
4189 return 0;
4190 }
4191
4192 void qeth_setadp_promisc_mode(struct qeth_card *card)
4193 {
4194 enum qeth_ipa_promisc_modes mode;
4195 struct net_device *dev = card->dev;
4196 struct qeth_cmd_buffer *iob;
4197 struct qeth_ipa_cmd *cmd;
4198
4199 QETH_CARD_TEXT(card, 4, "setprom");
4200
4201 if (((dev->flags & IFF_PROMISC) &&
4202 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
4203 (!(dev->flags & IFF_PROMISC) &&
4204 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
4205 return;
4206 mode = SET_PROMISC_MODE_OFF;
4207 if (dev->flags & IFF_PROMISC)
4208 mode = SET_PROMISC_MODE_ON;
4209 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4210
4211 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4212 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
4213 if (!iob)
4214 return;
4215 cmd = __ipa_cmd(iob);
4216 cmd->data.setadapterparms.data.mode = mode;
4217 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4218 }
4219 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4220
4221 struct net_device_stats *qeth_get_stats(struct net_device *dev)
4222 {
4223 struct qeth_card *card;
4224
4225 card = dev->ml_priv;
4226
4227 QETH_CARD_TEXT(card, 5, "getstat");
4228
4229 return &card->stats;
4230 }
4231 EXPORT_SYMBOL_GPL(qeth_get_stats);
4232
4233 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4234 struct qeth_reply *reply, unsigned long data)
4235 {
4236 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4237 struct qeth_ipacmd_setadpparms *adp_cmd;
4238
4239 QETH_CARD_TEXT(card, 4, "chgmaccb");
4240 if (qeth_setadpparms_inspect_rc(cmd))
4241 return 0;
4242
4243 adp_cmd = &cmd->data.setadapterparms;
4244 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4245 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4246 return 0;
4247
4248 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4249 return 0;
4250 }
4251
4252 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4253 {
4254 int rc;
4255 struct qeth_cmd_buffer *iob;
4256 struct qeth_ipa_cmd *cmd;
4257
4258 QETH_CARD_TEXT(card, 4, "chgmac");
4259
4260 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4261 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4262 sizeof(struct qeth_change_addr));
4263 if (!iob)
4264 return -ENOMEM;
4265 cmd = __ipa_cmd(iob);
4266 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4267 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4268 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4269 card->dev->dev_addr);
4270 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4271 NULL);
4272 return rc;
4273 }
4274 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4275
4276 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4277 struct qeth_reply *reply, unsigned long data)
4278 {
4279 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4280 struct qeth_set_access_ctrl *access_ctrl_req;
4281 int fallback = *(int *)reply->param;
4282
4283 QETH_CARD_TEXT(card, 4, "setaccb");
4284 if (cmd->hdr.return_code)
4285 return 0;
4286 qeth_setadpparms_inspect_rc(cmd);
4287
4288 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4289 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
4290 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
4291 QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
4292 cmd->data.setadapterparms.hdr.return_code);
4293 if (cmd->data.setadapterparms.hdr.return_code !=
4294 SET_ACCESS_CTRL_RC_SUCCESS)
4295 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4296 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4297 cmd->data.setadapterparms.hdr.return_code);
4298 switch (cmd->data.setadapterparms.hdr.return_code) {
4299 case SET_ACCESS_CTRL_RC_SUCCESS:
4300 if (card->options.isolation == ISOLATION_MODE_NONE) {
4301 dev_info(&card->gdev->dev,
4302 "QDIO data connection isolation is deactivated\n");
4303 } else {
4304 dev_info(&card->gdev->dev,
4305 "QDIO data connection isolation is activated\n");
4306 }
4307 break;
4308 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4309 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4310 CARD_DEVID(card));
4311 if (fallback)
4312 card->options.isolation = card->options.prev_isolation;
4313 break;
4314 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4315 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4316 CARD_DEVID(card));
4317 if (fallback)
4318 card->options.isolation = card->options.prev_isolation;
4319 break;
4320 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4321 dev_err(&card->gdev->dev, "Adapter does not "
4322 "support QDIO data connection isolation\n");
4323 break;
4324 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4325 dev_err(&card->gdev->dev,
4326 "Adapter is dedicated. "
4327 "QDIO data connection isolation not supported\n");
4328 if (fallback)
4329 card->options.isolation = card->options.prev_isolation;
4330 break;
4331 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4332 dev_err(&card->gdev->dev,
4333 "TSO does not permit QDIO data connection isolation\n");
4334 if (fallback)
4335 card->options.isolation = card->options.prev_isolation;
4336 break;
4337 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4338 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4339 "support reflective relay mode\n");
4340 if (fallback)
4341 card->options.isolation = card->options.prev_isolation;
4342 break;
4343 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4344 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4345 "enabled at the adjacent switch port");
4346 if (fallback)
4347 card->options.isolation = card->options.prev_isolation;
4348 break;
4349 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4350 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4351 "at the adjacent switch failed\n");
4352 break;
4353 default:
4354 /* this should never happen */
4355 if (fallback)
4356 card->options.isolation = card->options.prev_isolation;
4357 break;
4358 }
4359 return 0;
4360 }
4361
4362 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4363 enum qeth_ipa_isolation_modes isolation, int fallback)
4364 {
4365 int rc;
4366 struct qeth_cmd_buffer *iob;
4367 struct qeth_ipa_cmd *cmd;
4368 struct qeth_set_access_ctrl *access_ctrl_req;
4369
4370 QETH_CARD_TEXT(card, 4, "setacctl");
4371
4372 QETH_DBF_TEXT_(SETUP, 2, "setacctl");
4373 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
4374
4375 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4376 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4377 sizeof(struct qeth_set_access_ctrl));
4378 if (!iob)
4379 return -ENOMEM;
4380 cmd = __ipa_cmd(iob);
4381 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4382 access_ctrl_req->subcmd_code = isolation;
4383
4384 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4385 &fallback);
4386 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
4387 return rc;
4388 }
4389
4390 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4391 {
4392 int rc = 0;
4393
4394 QETH_CARD_TEXT(card, 4, "setactlo");
4395
4396 if ((card->info.type == QETH_CARD_TYPE_OSD ||
4397 card->info.type == QETH_CARD_TYPE_OSX) &&
4398 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4399 rc = qeth_setadpparms_set_access_ctrl(card,
4400 card->options.isolation, fallback);
4401 if (rc) {
4402 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4403 rc, CARD_DEVID(card));
4404 rc = -EOPNOTSUPP;
4405 }
4406 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4407 card->options.isolation = ISOLATION_MODE_NONE;
4408
4409 dev_err(&card->gdev->dev, "Adapter does not "
4410 "support QDIO data connection isolation\n");
4411 rc = -EOPNOTSUPP;
4412 }
4413 return rc;
4414 }
4415 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
4416
4417 void qeth_tx_timeout(struct net_device *dev)
4418 {
4419 struct qeth_card *card;
4420
4421 card = dev->ml_priv;
4422 QETH_CARD_TEXT(card, 4, "txtimeo");
4423 card->stats.tx_errors++;
4424 qeth_schedule_recovery(card);
4425 }
4426 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4427
4428 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4429 {
4430 struct qeth_card *card = dev->ml_priv;
4431 int rc = 0;
4432
4433 switch (regnum) {
4434 case MII_BMCR: /* Basic mode control register */
4435 rc = BMCR_FULLDPLX;
4436 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4437 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4438 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4439 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4440 rc |= BMCR_SPEED100;
4441 break;
4442 case MII_BMSR: /* Basic mode status register */
4443 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4444 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4445 BMSR_100BASE4;
4446 break;
4447 case MII_PHYSID1: /* PHYS ID 1 */
4448 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4449 dev->dev_addr[2];
4450 rc = (rc >> 5) & 0xFFFF;
4451 break;
4452 case MII_PHYSID2: /* PHYS ID 2 */
4453 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4454 break;
4455 case MII_ADVERTISE: /* Advertisement control reg */
4456 rc = ADVERTISE_ALL;
4457 break;
4458 case MII_LPA: /* Link partner ability reg */
4459 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4460 LPA_100BASE4 | LPA_LPACK;
4461 break;
4462 case MII_EXPANSION: /* Expansion register */
4463 break;
4464 case MII_DCOUNTER: /* disconnect counter */
4465 break;
4466 case MII_FCSCOUNTER: /* false carrier counter */
4467 break;
4468 case MII_NWAYTEST: /* N-way auto-neg test register */
4469 break;
4470 case MII_RERRCOUNTER: /* rx error counter */
4471 rc = card->stats.rx_errors;
4472 break;
4473 case MII_SREVISION: /* silicon revision */
4474 break;
4475 case MII_RESV1: /* reserved 1 */
4476 break;
4477 case MII_LBRERROR: /* loopback, rx, bypass error */
4478 break;
4479 case MII_PHYADDR: /* physical address */
4480 break;
4481 case MII_RESV2: /* reserved 2 */
4482 break;
4483 case MII_TPISTATUS: /* TPI status for 10mbps */
4484 break;
4485 case MII_NCONFIG: /* network interface config */
4486 break;
4487 default:
4488 break;
4489 }
4490 return rc;
4491 }
4492
4493 static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
4494 struct qeth_cmd_buffer *iob, int len,
4495 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
4496 unsigned long),
4497 void *reply_param)
4498 {
4499 u16 s1, s2;
4500
4501 QETH_CARD_TEXT(card, 4, "sendsnmp");
4502
4503 /* adjust PDU length fields in IPA_PDU_HEADER */
4504 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4505 s2 = (u32) len;
4506 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4507 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4508 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4509 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4510 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4511 reply_cb, reply_param);
4512 }
4513
4514 static int qeth_snmp_command_cb(struct qeth_card *card,
4515 struct qeth_reply *reply, unsigned long sdata)
4516 {
4517 struct qeth_ipa_cmd *cmd;
4518 struct qeth_arp_query_info *qinfo;
4519 unsigned char *data;
4520 void *snmp_data;
4521 __u16 data_len;
4522
4523 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4524
4525 cmd = (struct qeth_ipa_cmd *) sdata;
4526 data = (unsigned char *)((char *)cmd - reply->offset);
4527 qinfo = (struct qeth_arp_query_info *) reply->param;
4528
4529 if (cmd->hdr.return_code) {
4530 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4531 return 0;
4532 }
4533 if (cmd->data.setadapterparms.hdr.return_code) {
4534 cmd->hdr.return_code =
4535 cmd->data.setadapterparms.hdr.return_code;
4536 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4537 return 0;
4538 }
4539 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
4540 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4541 snmp_data = &cmd->data.setadapterparms.data.snmp;
4542 data_len -= offsetof(struct qeth_ipa_cmd,
4543 data.setadapterparms.data.snmp);
4544 } else {
4545 snmp_data = &cmd->data.setadapterparms.data.snmp.request;
4546 data_len -= offsetof(struct qeth_ipa_cmd,
4547 data.setadapterparms.data.snmp.request);
4548 }
4549
4550 /* check if there is enough room in userspace */
4551 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4552 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
4553 cmd->hdr.return_code = IPA_RC_ENOMEM;
4554 return 0;
4555 }
4556 QETH_CARD_TEXT_(card, 4, "snore%i",
4557 cmd->data.setadapterparms.hdr.used_total);
4558 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4559 cmd->data.setadapterparms.hdr.seq_no);
4560 /*copy entries to user buffer*/
4561 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4562 qinfo->udata_offset += data_len;
4563
4564 /* check if all replies received ... */
4565 QETH_CARD_TEXT_(card, 4, "srtot%i",
4566 cmd->data.setadapterparms.hdr.used_total);
4567 QETH_CARD_TEXT_(card, 4, "srseq%i",
4568 cmd->data.setadapterparms.hdr.seq_no);
4569 if (cmd->data.setadapterparms.hdr.seq_no <
4570 cmd->data.setadapterparms.hdr.used_total)
4571 return 1;
4572 return 0;
4573 }
4574
4575 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4576 {
4577 struct qeth_cmd_buffer *iob;
4578 struct qeth_ipa_cmd *cmd;
4579 struct qeth_snmp_ureq *ureq;
4580 unsigned int req_len;
4581 struct qeth_arp_query_info qinfo = {0, };
4582 int rc = 0;
4583
4584 QETH_CARD_TEXT(card, 3, "snmpcmd");
4585
4586 if (card->info.guestlan)
4587 return -EOPNOTSUPP;
4588
4589 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4590 IS_LAYER3(card))
4591 return -EOPNOTSUPP;
4592
4593 /* skip 4 bytes (data_len struct member) to get req_len */
4594 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4595 return -EFAULT;
4596 if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
4597 sizeof(struct qeth_ipacmd_hdr) -
4598 sizeof(struct qeth_ipacmd_setadpparms_hdr)))
4599 return -EINVAL;
4600 ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
4601 if (IS_ERR(ureq)) {
4602 QETH_CARD_TEXT(card, 2, "snmpnome");
4603 return PTR_ERR(ureq);
4604 }
4605 qinfo.udata_len = ureq->hdr.data_len;
4606 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4607 if (!qinfo.udata) {
4608 kfree(ureq);
4609 return -ENOMEM;
4610 }
4611 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4612
4613 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4614 QETH_SNMP_SETADP_CMDLENGTH + req_len);
4615 if (!iob) {
4616 rc = -ENOMEM;
4617 goto out;
4618 }
4619 cmd = __ipa_cmd(iob);
4620 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4621 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4622 qeth_snmp_command_cb, (void *)&qinfo);
4623 if (rc)
4624 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4625 CARD_DEVID(card), rc);
4626 else {
4627 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4628 rc = -EFAULT;
4629 }
4630 out:
4631 kfree(ureq);
4632 kfree(qinfo.udata);
4633 return rc;
4634 }
4635
4636 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4637 struct qeth_reply *reply, unsigned long data)
4638 {
4639 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4640 struct qeth_qoat_priv *priv;
4641 char *resdata;
4642 int resdatalen;
4643
4644 QETH_CARD_TEXT(card, 3, "qoatcb");
4645 if (qeth_setadpparms_inspect_rc(cmd))
4646 return 0;
4647
4648 priv = (struct qeth_qoat_priv *)reply->param;
4649 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4650 resdata = (char *)data + 28;
4651
4652 if (resdatalen > (priv->buffer_len - priv->response_len)) {
4653 cmd->hdr.return_code = IPA_RC_FFFF;
4654 return 0;
4655 }
4656
4657 memcpy((priv->buffer + priv->response_len), resdata,
4658 resdatalen);
4659 priv->response_len += resdatalen;
4660
4661 if (cmd->data.setadapterparms.hdr.seq_no <
4662 cmd->data.setadapterparms.hdr.used_total)
4663 return 1;
4664 return 0;
4665 }
4666
4667 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4668 {
4669 int rc = 0;
4670 struct qeth_cmd_buffer *iob;
4671 struct qeth_ipa_cmd *cmd;
4672 struct qeth_query_oat *oat_req;
4673 struct qeth_query_oat_data oat_data;
4674 struct qeth_qoat_priv priv;
4675 void __user *tmp;
4676
4677 QETH_CARD_TEXT(card, 3, "qoatcmd");
4678
4679 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4680 rc = -EOPNOTSUPP;
4681 goto out;
4682 }
4683
4684 if (copy_from_user(&oat_data, udata,
4685 sizeof(struct qeth_query_oat_data))) {
4686 rc = -EFAULT;
4687 goto out;
4688 }
4689
4690 priv.buffer_len = oat_data.buffer_len;
4691 priv.response_len = 0;
4692 priv.buffer = vzalloc(oat_data.buffer_len);
4693 if (!priv.buffer) {
4694 rc = -ENOMEM;
4695 goto out;
4696 }
4697
4698 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4699 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4700 sizeof(struct qeth_query_oat));
4701 if (!iob) {
4702 rc = -ENOMEM;
4703 goto out_free;
4704 }
4705 cmd = __ipa_cmd(iob);
4706 oat_req = &cmd->data.setadapterparms.data.query_oat;
4707 oat_req->subcmd_code = oat_data.command;
4708
4709 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4710 &priv);
4711 if (!rc) {
4712 if (is_compat_task())
4713 tmp = compat_ptr(oat_data.ptr);
4714 else
4715 tmp = (void __user *)(unsigned long)oat_data.ptr;
4716
4717 if (copy_to_user(tmp, priv.buffer,
4718 priv.response_len)) {
4719 rc = -EFAULT;
4720 goto out_free;
4721 }
4722
4723 oat_data.response_len = priv.response_len;
4724
4725 if (copy_to_user(udata, &oat_data,
4726 sizeof(struct qeth_query_oat_data)))
4727 rc = -EFAULT;
4728 } else
4729 if (rc == IPA_RC_FFFF)
4730 rc = -EFAULT;
4731
4732 out_free:
4733 vfree(priv.buffer);
4734 out:
4735 return rc;
4736 }
4737
4738 static int qeth_query_card_info_cb(struct qeth_card *card,
4739 struct qeth_reply *reply, unsigned long data)
4740 {
4741 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4742 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4743 struct qeth_query_card_info *card_info;
4744
4745 QETH_CARD_TEXT(card, 2, "qcrdincb");
4746 if (qeth_setadpparms_inspect_rc(cmd))
4747 return 0;
4748
4749 card_info = &cmd->data.setadapterparms.data.card_info;
4750 carrier_info->card_type = card_info->card_type;
4751 carrier_info->port_mode = card_info->port_mode;
4752 carrier_info->port_speed = card_info->port_speed;
4753 return 0;
4754 }
4755
4756 static int qeth_query_card_info(struct qeth_card *card,
4757 struct carrier_info *carrier_info)
4758 {
4759 struct qeth_cmd_buffer *iob;
4760
4761 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4762 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4763 return -EOPNOTSUPP;
4764 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
4765 sizeof(struct qeth_ipacmd_setadpparms_hdr));
4766 if (!iob)
4767 return -ENOMEM;
4768 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4769 (void *)carrier_info);
4770 }
4771
4772 /**
4773 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4774 * @card: pointer to a qeth_card
4775 *
4776 * Returns
4777 * 0, if a MAC address has been set for the card's netdevice
4778 * a return code, for various error conditions
4779 */
4780 int qeth_vm_request_mac(struct qeth_card *card)
4781 {
4782 struct diag26c_mac_resp *response;
4783 struct diag26c_mac_req *request;
4784 struct ccw_dev_id id;
4785 int rc;
4786
4787 QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
4788
4789 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4790 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4791 if (!request || !response) {
4792 rc = -ENOMEM;
4793 goto out;
4794 }
4795
4796 ccw_device_get_id(CARD_DDEV(card), &id);
4797 request->resp_buf_len = sizeof(*response);
4798 request->resp_version = DIAG26C_VERSION2;
4799 request->op_code = DIAG26C_GET_MAC;
4800 request->devno = id.devno;
4801
4802 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4803 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4804 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4805 if (rc)
4806 goto out;
4807 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4808
4809 if (request->resp_buf_len < sizeof(*response) ||
4810 response->version != request->resp_version) {
4811 rc = -EIO;
4812 QETH_DBF_TEXT(SETUP, 2, "badresp");
4813 QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len,
4814 sizeof(request->resp_buf_len));
4815 } else if (!is_valid_ether_addr(response->mac)) {
4816 rc = -EINVAL;
4817 QETH_DBF_TEXT(SETUP, 2, "badmac");
4818 QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN);
4819 } else {
4820 ether_addr_copy(card->dev->dev_addr, response->mac);
4821 }
4822
4823 out:
4824 kfree(response);
4825 kfree(request);
4826 return rc;
4827 }
4828 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4829
4830 static int qeth_get_qdio_q_format(struct qeth_card *card)
4831 {
4832 if (card->info.type == QETH_CARD_TYPE_IQD)
4833 return QDIO_IQDIO_QFMT;
4834 else
4835 return QDIO_QETH_QFMT;
4836 }
4837
4838 static void qeth_determine_capabilities(struct qeth_card *card)
4839 {
4840 int rc;
4841 int length;
4842 char *prcd;
4843 struct ccw_device *ddev;
4844 int ddev_offline = 0;
4845
4846 QETH_DBF_TEXT(SETUP, 2, "detcapab");
4847 ddev = CARD_DDEV(card);
4848 if (!ddev->online) {
4849 ddev_offline = 1;
4850 rc = ccw_device_set_online(ddev);
4851 if (rc) {
4852 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4853 goto out;
4854 }
4855 }
4856
4857 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4858 if (rc) {
4859 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4860 CARD_DEVID(card), rc);
4861 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4862 goto out_offline;
4863 }
4864 qeth_configure_unitaddr(card, prcd);
4865 if (ddev_offline)
4866 qeth_configure_blkt_default(card, prcd);
4867 kfree(prcd);
4868
4869 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4870 if (rc)
4871 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4872
4873 QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
4874 QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1);
4875 QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2);
4876 QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3);
4877 QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
4878 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4879 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4880 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4881 dev_info(&card->gdev->dev,
4882 "Completion Queueing supported\n");
4883 } else {
4884 card->options.cq = QETH_CQ_NOTAVAILABLE;
4885 }
4886
4887
4888 out_offline:
4889 if (ddev_offline == 1)
4890 ccw_device_set_offline(ddev);
4891 out:
4892 return;
4893 }
4894
4895 static void qeth_qdio_establish_cq(struct qeth_card *card,
4896 struct qdio_buffer **in_sbal_ptrs,
4897 void (**queue_start_poll)
4898 (struct ccw_device *, int,
4899 unsigned long))
4900 {
4901 int i;
4902
4903 if (card->options.cq == QETH_CQ_ENABLED) {
4904 int offset = QDIO_MAX_BUFFERS_PER_Q *
4905 (card->qdio.no_in_queues - 1);
4906 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4907 in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4908 virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4909 }
4910
4911 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4912 }
4913 }
4914
4915 static int qeth_qdio_establish(struct qeth_card *card)
4916 {
4917 struct qdio_initialize init_data;
4918 char *qib_param_field;
4919 struct qdio_buffer **in_sbal_ptrs;
4920 void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4921 struct qdio_buffer **out_sbal_ptrs;
4922 int i, j, k;
4923 int rc = 0;
4924
4925 QETH_DBF_TEXT(SETUP, 2, "qdioest");
4926
4927 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
4928 GFP_KERNEL);
4929 if (!qib_param_field) {
4930 rc = -ENOMEM;
4931 goto out_free_nothing;
4932 }
4933
4934 qeth_create_qib_param_field(card, qib_param_field);
4935 qeth_create_qib_param_field_blkt(card, qib_param_field);
4936
4937 in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
4938 sizeof(void *),
4939 GFP_KERNEL);
4940 if (!in_sbal_ptrs) {
4941 rc = -ENOMEM;
4942 goto out_free_qib_param;
4943 }
4944 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4945 in_sbal_ptrs[i] = (struct qdio_buffer *)
4946 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4947 }
4948
4949 queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4950 GFP_KERNEL);
4951 if (!queue_start_poll) {
4952 rc = -ENOMEM;
4953 goto out_free_in_sbals;
4954 }
4955 for (i = 0; i < card->qdio.no_in_queues; ++i)
4956 queue_start_poll[i] = qeth_qdio_start_poll;
4957
4958 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4959
4960 out_sbal_ptrs =
4961 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
4962 sizeof(void *),
4963 GFP_KERNEL);
4964 if (!out_sbal_ptrs) {
4965 rc = -ENOMEM;
4966 goto out_free_queue_start_poll;
4967 }
4968 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4969 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4970 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4971 card->qdio.out_qs[i]->bufs[j]->buffer);
4972 }
4973
4974 memset(&init_data, 0, sizeof(struct qdio_initialize));
4975 init_data.cdev = CARD_DDEV(card);
4976 init_data.q_format = qeth_get_qdio_q_format(card);
4977 init_data.qib_param_field_format = 0;
4978 init_data.qib_param_field = qib_param_field;
4979 init_data.no_input_qs = card->qdio.no_in_queues;
4980 init_data.no_output_qs = card->qdio.no_out_queues;
4981 init_data.input_handler = qeth_qdio_input_handler;
4982 init_data.output_handler = qeth_qdio_output_handler;
4983 init_data.queue_start_poll_array = queue_start_poll;
4984 init_data.int_parm = (unsigned long) card;
4985 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
4986 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
4987 init_data.output_sbal_state_array = card->qdio.out_bufstates;
4988 init_data.scan_threshold =
4989 (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
4990
4991 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
4992 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
4993 rc = qdio_allocate(&init_data);
4994 if (rc) {
4995 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4996 goto out;
4997 }
4998 rc = qdio_establish(&init_data);
4999 if (rc) {
5000 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5001 qdio_free(CARD_DDEV(card));
5002 }
5003 }
5004
5005 switch (card->options.cq) {
5006 case QETH_CQ_ENABLED:
5007 dev_info(&card->gdev->dev, "Completion Queue support enabled");
5008 break;
5009 case QETH_CQ_DISABLED:
5010 dev_info(&card->gdev->dev, "Completion Queue support disabled");
5011 break;
5012 default:
5013 break;
5014 }
5015 out:
5016 kfree(out_sbal_ptrs);
5017 out_free_queue_start_poll:
5018 kfree(queue_start_poll);
5019 out_free_in_sbals:
5020 kfree(in_sbal_ptrs);
5021 out_free_qib_param:
5022 kfree(qib_param_field);
5023 out_free_nothing:
5024 return rc;
5025 }
5026
5027 static void qeth_core_free_card(struct qeth_card *card)
5028 {
5029 QETH_DBF_TEXT(SETUP, 2, "freecrd");
5030 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
5031 qeth_clean_channel(&card->read);
5032 qeth_clean_channel(&card->write);
5033 qeth_clean_channel(&card->data);
5034 qeth_free_qdio_buffers(card);
5035 unregister_service_level(&card->qeth_service_level);
5036 dev_set_drvdata(&card->gdev->dev, NULL);
5037 kfree(card);
5038 }
5039
5040 void qeth_trace_features(struct qeth_card *card)
5041 {
5042 QETH_CARD_TEXT(card, 2, "features");
5043 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5044 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5045 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5046 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5047 sizeof(card->info.diagass_support));
5048 }
5049 EXPORT_SYMBOL_GPL(qeth_trace_features);
5050
5051 static struct ccw_device_id qeth_ids[] = {
5052 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5053 .driver_info = QETH_CARD_TYPE_OSD},
5054 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5055 .driver_info = QETH_CARD_TYPE_IQD},
5056 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
5057 .driver_info = QETH_CARD_TYPE_OSN},
5058 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5059 .driver_info = QETH_CARD_TYPE_OSM},
5060 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5061 .driver_info = QETH_CARD_TYPE_OSX},
5062 {},
5063 };
5064 MODULE_DEVICE_TABLE(ccw, qeth_ids);
5065
5066 static struct ccw_driver qeth_ccw_driver = {
5067 .driver = {
5068 .owner = THIS_MODULE,
5069 .name = "qeth",
5070 },
5071 .ids = qeth_ids,
5072 .probe = ccwgroup_probe_ccwdev,
5073 .remove = ccwgroup_remove_ccwdev,
5074 };
5075
5076 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5077 {
5078 int retries = 3;
5079 int rc;
5080
5081 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
5082 atomic_set(&card->force_alloc_skb, 0);
5083 qeth_update_from_chp_desc(card);
5084 retry:
5085 if (retries < 3)
5086 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5087 CARD_DEVID(card));
5088 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
5089 ccw_device_set_offline(CARD_DDEV(card));
5090 ccw_device_set_offline(CARD_WDEV(card));
5091 ccw_device_set_offline(CARD_RDEV(card));
5092 qdio_free(CARD_DDEV(card));
5093 rc = ccw_device_set_online(CARD_RDEV(card));
5094 if (rc)
5095 goto retriable;
5096 rc = ccw_device_set_online(CARD_WDEV(card));
5097 if (rc)
5098 goto retriable;
5099 rc = ccw_device_set_online(CARD_DDEV(card));
5100 if (rc)
5101 goto retriable;
5102 retriable:
5103 if (rc == -ERESTARTSYS) {
5104 QETH_DBF_TEXT(SETUP, 2, "break1");
5105 return rc;
5106 } else if (rc) {
5107 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
5108 if (--retries < 0)
5109 goto out;
5110 else
5111 goto retry;
5112 }
5113 qeth_determine_capabilities(card);
5114 qeth_init_tokens(card);
5115 qeth_init_func_level(card);
5116 rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb);
5117 if (rc == -ERESTARTSYS) {
5118 QETH_DBF_TEXT(SETUP, 2, "break2");
5119 return rc;
5120 } else if (rc) {
5121 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
5122 if (--retries < 0)
5123 goto out;
5124 else
5125 goto retry;
5126 }
5127 rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb);
5128 if (rc == -ERESTARTSYS) {
5129 QETH_DBF_TEXT(SETUP, 2, "break3");
5130 return rc;
5131 } else if (rc) {
5132 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
5133 if (--retries < 0)
5134 goto out;
5135 else
5136 goto retry;
5137 }
5138 card->read_or_write_problem = 0;
5139 rc = qeth_mpc_initialize(card);
5140 if (rc) {
5141 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
5142 goto out;
5143 }
5144
5145 rc = qeth_send_startlan(card);
5146 if (rc) {
5147 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
5148 if (rc == IPA_RC_LAN_OFFLINE) {
5149 dev_warn(&card->gdev->dev,
5150 "The LAN is offline\n");
5151 *carrier_ok = false;
5152 } else {
5153 rc = -ENODEV;
5154 goto out;
5155 }
5156 } else {
5157 *carrier_ok = true;
5158 }
5159
5160 if (qeth_netdev_is_registered(card->dev)) {
5161 if (*carrier_ok)
5162 netif_carrier_on(card->dev);
5163 else
5164 netif_carrier_off(card->dev);
5165 }
5166
5167 card->options.ipa4.supported_funcs = 0;
5168 card->options.ipa6.supported_funcs = 0;
5169 card->options.adp.supported_funcs = 0;
5170 card->options.sbp.supported_funcs = 0;
5171 card->info.diagass_support = 0;
5172 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5173 if (rc == -ENOMEM)
5174 goto out;
5175 if (qeth_is_supported(card, IPA_IPV6)) {
5176 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5177 if (rc == -ENOMEM)
5178 goto out;
5179 }
5180 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5181 rc = qeth_query_setadapterparms(card);
5182 if (rc < 0) {
5183 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
5184 goto out;
5185 }
5186 }
5187 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5188 rc = qeth_query_setdiagass(card);
5189 if (rc < 0) {
5190 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
5191 goto out;
5192 }
5193 }
5194 return 0;
5195 out:
5196 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5197 "an error on the device\n");
5198 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5199 CARD_DEVID(card), rc);
5200 return rc;
5201 }
5202 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
5203
5204 static void qeth_create_skb_frag(struct qdio_buffer_element *element,
5205 struct sk_buff *skb, int offset, int data_len)
5206 {
5207 struct page *page = virt_to_page(element->addr);
5208 unsigned int next_frag;
5209
5210 /* first fill the linear space */
5211 if (!skb->len) {
5212 unsigned int linear = min(data_len, skb_tailroom(skb));
5213
5214 skb_put_data(skb, element->addr + offset, linear);
5215 data_len -= linear;
5216 if (!data_len)
5217 return;
5218 offset += linear;
5219 /* fall through to add page frag for remaining data */
5220 }
5221
5222 next_frag = skb_shinfo(skb)->nr_frags;
5223 get_page(page);
5224 skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
5225 }
5226
5227 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5228 {
5229 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5230 }
5231
5232 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
5233 struct qeth_qdio_buffer *qethbuffer,
5234 struct qdio_buffer_element **__element, int *__offset,
5235 struct qeth_hdr **hdr)
5236 {
5237 struct qdio_buffer_element *element = *__element;
5238 struct qdio_buffer *buffer = qethbuffer->buffer;
5239 int offset = *__offset;
5240 struct sk_buff *skb;
5241 int skb_len = 0;
5242 void *data_ptr;
5243 int data_len;
5244 int headroom = 0;
5245 int use_rx_sg = 0;
5246
5247 /* qeth_hdr must not cross element boundaries */
5248 while (element->length < offset + sizeof(struct qeth_hdr)) {
5249 if (qeth_is_last_sbale(element))
5250 return NULL;
5251 element++;
5252 offset = 0;
5253 }
5254 *hdr = element->addr + offset;
5255
5256 offset += sizeof(struct qeth_hdr);
5257 switch ((*hdr)->hdr.l2.id) {
5258 case QETH_HEADER_TYPE_LAYER2:
5259 skb_len = (*hdr)->hdr.l2.pkt_length;
5260 break;
5261 case QETH_HEADER_TYPE_LAYER3:
5262 skb_len = (*hdr)->hdr.l3.length;
5263 headroom = ETH_HLEN;
5264 break;
5265 case QETH_HEADER_TYPE_OSN:
5266 skb_len = (*hdr)->hdr.osn.pdu_length;
5267 headroom = sizeof(struct qeth_hdr);
5268 break;
5269 default:
5270 break;
5271 }
5272
5273 if (!skb_len)
5274 return NULL;
5275
5276 if (((skb_len >= card->options.rx_sg_cb) &&
5277 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
5278 (!atomic_read(&card->force_alloc_skb))) ||
5279 (card->options.cq == QETH_CQ_ENABLED))
5280 use_rx_sg = 1;
5281
5282 if (use_rx_sg && qethbuffer->rx_skb) {
5283 /* QETH_CQ_ENABLED only: */
5284 skb = qethbuffer->rx_skb;
5285 qethbuffer->rx_skb = NULL;
5286 } else {
5287 unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
5288
5289 skb = napi_alloc_skb(&card->napi, linear + headroom);
5290 }
5291 if (!skb)
5292 goto no_mem;
5293 if (headroom)
5294 skb_reserve(skb, headroom);
5295
5296 data_ptr = element->addr + offset;
5297 while (skb_len) {
5298 data_len = min(skb_len, (int)(element->length - offset));
5299 if (data_len) {
5300 if (use_rx_sg)
5301 qeth_create_skb_frag(element, skb, offset,
5302 data_len);
5303 else
5304 skb_put_data(skb, data_ptr, data_len);
5305 }
5306 skb_len -= data_len;
5307 if (skb_len) {
5308 if (qeth_is_last_sbale(element)) {
5309 QETH_CARD_TEXT(card, 4, "unexeob");
5310 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5311 dev_kfree_skb_any(skb);
5312 card->stats.rx_errors++;
5313 return NULL;
5314 }
5315 element++;
5316 offset = 0;
5317 data_ptr = element->addr;
5318 } else {
5319 offset += data_len;
5320 }
5321 }
5322 *__element = element;
5323 *__offset = offset;
5324 if (use_rx_sg && card->options.performance_stats) {
5325 card->perf_stats.sg_skbs_rx++;
5326 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
5327 }
5328 return skb;
5329 no_mem:
5330 if (net_ratelimit()) {
5331 QETH_CARD_TEXT(card, 2, "noskbmem");
5332 }
5333 card->stats.rx_dropped++;
5334 return NULL;
5335 }
5336 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
5337
5338 int qeth_poll(struct napi_struct *napi, int budget)
5339 {
5340 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5341 int work_done = 0;
5342 struct qeth_qdio_buffer *buffer;
5343 int done;
5344 int new_budget = budget;
5345
5346 if (card->options.performance_stats) {
5347 card->perf_stats.inbound_cnt++;
5348 card->perf_stats.inbound_start_time = qeth_get_micros();
5349 }
5350
5351 while (1) {
5352 if (!card->rx.b_count) {
5353 card->rx.qdio_err = 0;
5354 card->rx.b_count = qdio_get_next_buffers(
5355 card->data.ccwdev, 0, &card->rx.b_index,
5356 &card->rx.qdio_err);
5357 if (card->rx.b_count <= 0) {
5358 card->rx.b_count = 0;
5359 break;
5360 }
5361 card->rx.b_element =
5362 &card->qdio.in_q->bufs[card->rx.b_index]
5363 .buffer->element[0];
5364 card->rx.e_offset = 0;
5365 }
5366
5367 while (card->rx.b_count) {
5368 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5369 if (!(card->rx.qdio_err &&
5370 qeth_check_qdio_errors(card, buffer->buffer,
5371 card->rx.qdio_err, "qinerr")))
5372 work_done +=
5373 card->discipline->process_rx_buffer(
5374 card, new_budget, &done);
5375 else
5376 done = 1;
5377
5378 if (done) {
5379 if (card->options.performance_stats)
5380 card->perf_stats.bufs_rec++;
5381 qeth_put_buffer_pool_entry(card,
5382 buffer->pool_entry);
5383 qeth_queue_input_buffer(card, card->rx.b_index);
5384 card->rx.b_count--;
5385 if (card->rx.b_count) {
5386 card->rx.b_index =
5387 (card->rx.b_index + 1) %
5388 QDIO_MAX_BUFFERS_PER_Q;
5389 card->rx.b_element =
5390 &card->qdio.in_q
5391 ->bufs[card->rx.b_index]
5392 .buffer->element[0];
5393 card->rx.e_offset = 0;
5394 }
5395 }
5396
5397 if (work_done >= budget)
5398 goto out;
5399 else
5400 new_budget = budget - work_done;
5401 }
5402 }
5403
5404 napi_complete_done(napi, work_done);
5405 if (qdio_start_irq(card->data.ccwdev, 0))
5406 napi_schedule(&card->napi);
5407 out:
5408 if (card->options.performance_stats)
5409 card->perf_stats.inbound_time += qeth_get_micros() -
5410 card->perf_stats.inbound_start_time;
5411 return work_done;
5412 }
5413 EXPORT_SYMBOL_GPL(qeth_poll);
5414
5415 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5416 {
5417 if (!cmd->hdr.return_code)
5418 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5419 return cmd->hdr.return_code;
5420 }
5421
5422 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5423 struct qeth_reply *reply,
5424 unsigned long data)
5425 {
5426 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5427 struct qeth_ipa_caps *caps = reply->param;
5428
5429 if (qeth_setassparms_inspect_rc(cmd))
5430 return 0;
5431
5432 caps->supported = cmd->data.setassparms.data.caps.supported;
5433 caps->enabled = cmd->data.setassparms.data.caps.enabled;
5434 return 0;
5435 }
5436
5437 int qeth_setassparms_cb(struct qeth_card *card,
5438 struct qeth_reply *reply, unsigned long data)
5439 {
5440 struct qeth_ipa_cmd *cmd;
5441
5442 QETH_CARD_TEXT(card, 4, "defadpcb");
5443
5444 cmd = (struct qeth_ipa_cmd *) data;
5445 if (cmd->hdr.return_code == 0) {
5446 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5447 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5448 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5449 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5450 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5451 }
5452 return 0;
5453 }
5454 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5455
5456 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5457 enum qeth_ipa_funcs ipa_func,
5458 __u16 cmd_code, __u16 len,
5459 enum qeth_prot_versions prot)
5460 {
5461 struct qeth_cmd_buffer *iob;
5462 struct qeth_ipa_cmd *cmd;
5463
5464 QETH_CARD_TEXT(card, 4, "getasscm");
5465 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
5466
5467 if (iob) {
5468 cmd = __ipa_cmd(iob);
5469 cmd->data.setassparms.hdr.assist_no = ipa_func;
5470 cmd->data.setassparms.hdr.length = 8 + len;
5471 cmd->data.setassparms.hdr.command_code = cmd_code;
5472 }
5473
5474 return iob;
5475 }
5476 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5477
5478 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5479 enum qeth_ipa_funcs ipa_func,
5480 u16 cmd_code, long data,
5481 enum qeth_prot_versions prot)
5482 {
5483 int length = 0;
5484 struct qeth_cmd_buffer *iob;
5485
5486 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5487 if (data)
5488 length = sizeof(__u32);
5489 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
5490 if (!iob)
5491 return -ENOMEM;
5492
5493 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
5494 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
5495 }
5496 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
5497
5498 static void qeth_unregister_dbf_views(void)
5499 {
5500 int x;
5501 for (x = 0; x < QETH_DBF_INFOS; x++) {
5502 debug_unregister(qeth_dbf[x].id);
5503 qeth_dbf[x].id = NULL;
5504 }
5505 }
5506
5507 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
5508 {
5509 char dbf_txt_buf[32];
5510 va_list args;
5511
5512 if (!debug_level_enabled(id, level))
5513 return;
5514 va_start(args, fmt);
5515 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
5516 va_end(args);
5517 debug_text_event(id, level, dbf_txt_buf);
5518 }
5519 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
5520
5521 static int qeth_register_dbf_views(void)
5522 {
5523 int ret;
5524 int x;
5525
5526 for (x = 0; x < QETH_DBF_INFOS; x++) {
5527 /* register the areas */
5528 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
5529 qeth_dbf[x].pages,
5530 qeth_dbf[x].areas,
5531 qeth_dbf[x].len);
5532 if (qeth_dbf[x].id == NULL) {
5533 qeth_unregister_dbf_views();
5534 return -ENOMEM;
5535 }
5536
5537 /* register a view */
5538 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
5539 if (ret) {
5540 qeth_unregister_dbf_views();
5541 return ret;
5542 }
5543
5544 /* set a passing level */
5545 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
5546 }
5547
5548 return 0;
5549 }
5550
5551 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
5552
5553 int qeth_core_load_discipline(struct qeth_card *card,
5554 enum qeth_discipline_id discipline)
5555 {
5556 mutex_lock(&qeth_mod_mutex);
5557 switch (discipline) {
5558 case QETH_DISCIPLINE_LAYER3:
5559 card->discipline = try_then_request_module(
5560 symbol_get(qeth_l3_discipline), "qeth_l3");
5561 break;
5562 case QETH_DISCIPLINE_LAYER2:
5563 card->discipline = try_then_request_module(
5564 symbol_get(qeth_l2_discipline), "qeth_l2");
5565 break;
5566 default:
5567 break;
5568 }
5569 mutex_unlock(&qeth_mod_mutex);
5570
5571 if (!card->discipline) {
5572 dev_err(&card->gdev->dev, "There is no kernel module to "
5573 "support discipline %d\n", discipline);
5574 return -EINVAL;
5575 }
5576
5577 card->options.layer = discipline;
5578 return 0;
5579 }
5580
5581 void qeth_core_free_discipline(struct qeth_card *card)
5582 {
5583 if (IS_LAYER2(card))
5584 symbol_put(qeth_l2_discipline);
5585 else
5586 symbol_put(qeth_l3_discipline);
5587 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
5588 card->discipline = NULL;
5589 }
5590
5591 const struct device_type qeth_generic_devtype = {
5592 .name = "qeth_generic",
5593 .groups = qeth_generic_attr_groups,
5594 };
5595 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5596
5597 static const struct device_type qeth_osn_devtype = {
5598 .name = "qeth_osn",
5599 .groups = qeth_osn_attr_groups,
5600 };
5601
5602 #define DBF_NAME_LEN 20
5603
5604 struct qeth_dbf_entry {
5605 char dbf_name[DBF_NAME_LEN];
5606 debug_info_t *dbf_info;
5607 struct list_head dbf_list;
5608 };
5609
5610 static LIST_HEAD(qeth_dbf_list);
5611 static DEFINE_MUTEX(qeth_dbf_list_mutex);
5612
5613 static debug_info_t *qeth_get_dbf_entry(char *name)
5614 {
5615 struct qeth_dbf_entry *entry;
5616 debug_info_t *rc = NULL;
5617
5618 mutex_lock(&qeth_dbf_list_mutex);
5619 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
5620 if (strcmp(entry->dbf_name, name) == 0) {
5621 rc = entry->dbf_info;
5622 break;
5623 }
5624 }
5625 mutex_unlock(&qeth_dbf_list_mutex);
5626 return rc;
5627 }
5628
5629 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
5630 {
5631 struct qeth_dbf_entry *new_entry;
5632
5633 card->debug = debug_register(name, 2, 1, 8);
5634 if (!card->debug) {
5635 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
5636 goto err;
5637 }
5638 if (debug_register_view(card->debug, &debug_hex_ascii_view))
5639 goto err_dbg;
5640 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
5641 if (!new_entry)
5642 goto err_dbg;
5643 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
5644 new_entry->dbf_info = card->debug;
5645 mutex_lock(&qeth_dbf_list_mutex);
5646 list_add(&new_entry->dbf_list, &qeth_dbf_list);
5647 mutex_unlock(&qeth_dbf_list_mutex);
5648
5649 return 0;
5650
5651 err_dbg:
5652 debug_unregister(card->debug);
5653 err:
5654 return -ENOMEM;
5655 }
5656
5657 static void qeth_clear_dbf_list(void)
5658 {
5659 struct qeth_dbf_entry *entry, *tmp;
5660
5661 mutex_lock(&qeth_dbf_list_mutex);
5662 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
5663 list_del(&entry->dbf_list);
5664 debug_unregister(entry->dbf_info);
5665 kfree(entry);
5666 }
5667 mutex_unlock(&qeth_dbf_list_mutex);
5668 }
5669
5670 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5671 {
5672 struct net_device *dev;
5673
5674 switch (card->info.type) {
5675 case QETH_CARD_TYPE_IQD:
5676 dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
5677 break;
5678 case QETH_CARD_TYPE_OSN:
5679 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
5680 break;
5681 default:
5682 dev = alloc_etherdev(0);
5683 }
5684
5685 if (!dev)
5686 return NULL;
5687
5688 dev->ml_priv = card;
5689 dev->watchdog_timeo = QETH_TX_TIMEOUT;
5690 dev->min_mtu = IS_OSN(card) ? 64 : 576;
5691 /* initialized when device first goes online: */
5692 dev->max_mtu = 0;
5693 dev->mtu = 0;
5694 SET_NETDEV_DEV(dev, &card->gdev->dev);
5695 netif_carrier_off(dev);
5696
5697 if (!IS_OSN(card)) {
5698 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5699 dev->hw_features |= NETIF_F_SG;
5700 dev->vlan_features |= NETIF_F_SG;
5701 if (IS_IQD(card))
5702 dev->features |= NETIF_F_SG;
5703 }
5704
5705 return dev;
5706 }
5707
5708 struct net_device *qeth_clone_netdev(struct net_device *orig)
5709 {
5710 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
5711
5712 if (!clone)
5713 return NULL;
5714
5715 clone->dev_port = orig->dev_port;
5716 return clone;
5717 }
5718
5719 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5720 {
5721 struct qeth_card *card;
5722 struct device *dev;
5723 int rc;
5724 enum qeth_discipline_id enforced_disc;
5725 char dbf_name[DBF_NAME_LEN];
5726
5727 QETH_DBF_TEXT(SETUP, 2, "probedev");
5728
5729 dev = &gdev->dev;
5730 if (!get_device(dev))
5731 return -ENODEV;
5732
5733 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
5734
5735 card = qeth_alloc_card(gdev);
5736 if (!card) {
5737 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
5738 rc = -ENOMEM;
5739 goto err_dev;
5740 }
5741
5742 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
5743 dev_name(&gdev->dev));
5744 card->debug = qeth_get_dbf_entry(dbf_name);
5745 if (!card->debug) {
5746 rc = qeth_add_dbf_entry(card, dbf_name);
5747 if (rc)
5748 goto err_card;
5749 }
5750
5751 qeth_setup_card(card);
5752 qeth_update_from_chp_desc(card);
5753
5754 card->dev = qeth_alloc_netdev(card);
5755 if (!card->dev) {
5756 rc = -ENOMEM;
5757 goto err_card;
5758 }
5759
5760 qeth_determine_capabilities(card);
5761 enforced_disc = qeth_enforce_discipline(card);
5762 switch (enforced_disc) {
5763 case QETH_DISCIPLINE_UNDETERMINED:
5764 gdev->dev.type = &qeth_generic_devtype;
5765 break;
5766 default:
5767 card->info.layer_enforced = true;
5768 rc = qeth_core_load_discipline(card, enforced_disc);
5769 if (rc)
5770 goto err_load;
5771
5772 gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
5773 ? card->discipline->devtype
5774 : &qeth_osn_devtype;
5775 rc = card->discipline->setup(card->gdev);
5776 if (rc)
5777 goto err_disc;
5778 break;
5779 }
5780
5781 return 0;
5782
5783 err_disc:
5784 qeth_core_free_discipline(card);
5785 err_load:
5786 free_netdev(card->dev);
5787 err_card:
5788 qeth_core_free_card(card);
5789 err_dev:
5790 put_device(dev);
5791 return rc;
5792 }
5793
5794 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
5795 {
5796 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5797
5798 QETH_DBF_TEXT(SETUP, 2, "removedv");
5799
5800 if (card->discipline) {
5801 card->discipline->remove(gdev);
5802 qeth_core_free_discipline(card);
5803 }
5804
5805 free_netdev(card->dev);
5806 qeth_core_free_card(card);
5807 put_device(&gdev->dev);
5808 }
5809
5810 static int qeth_core_set_online(struct ccwgroup_device *gdev)
5811 {
5812 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5813 int rc = 0;
5814 enum qeth_discipline_id def_discipline;
5815
5816 if (!card->discipline) {
5817 if (card->info.type == QETH_CARD_TYPE_IQD)
5818 def_discipline = QETH_DISCIPLINE_LAYER3;
5819 else
5820 def_discipline = QETH_DISCIPLINE_LAYER2;
5821 rc = qeth_core_load_discipline(card, def_discipline);
5822 if (rc)
5823 goto err;
5824 rc = card->discipline->setup(card->gdev);
5825 if (rc) {
5826 qeth_core_free_discipline(card);
5827 goto err;
5828 }
5829 }
5830 rc = card->discipline->set_online(gdev);
5831 err:
5832 return rc;
5833 }
5834
5835 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
5836 {
5837 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5838 return card->discipline->set_offline(gdev);
5839 }
5840
5841 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
5842 {
5843 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5844 qeth_set_allowed_threads(card, 0, 1);
5845 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
5846 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5847 qeth_qdio_clear_card(card, 0);
5848 qeth_clear_qdio_buffers(card);
5849 qdio_free(CARD_DDEV(card));
5850 }
5851
5852 static int qeth_core_freeze(struct ccwgroup_device *gdev)
5853 {
5854 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5855 if (card->discipline && card->discipline->freeze)
5856 return card->discipline->freeze(gdev);
5857 return 0;
5858 }
5859
5860 static int qeth_core_thaw(struct ccwgroup_device *gdev)
5861 {
5862 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5863 if (card->discipline && card->discipline->thaw)
5864 return card->discipline->thaw(gdev);
5865 return 0;
5866 }
5867
5868 static int qeth_core_restore(struct ccwgroup_device *gdev)
5869 {
5870 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5871 if (card->discipline && card->discipline->restore)
5872 return card->discipline->restore(gdev);
5873 return 0;
5874 }
5875
5876 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
5877 size_t count)
5878 {
5879 int err;
5880
5881 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
5882 buf);
5883
5884 return err ? err : count;
5885 }
5886 static DRIVER_ATTR_WO(group);
5887
5888 static struct attribute *qeth_drv_attrs[] = {
5889 &driver_attr_group.attr,
5890 NULL,
5891 };
5892 static struct attribute_group qeth_drv_attr_group = {
5893 .attrs = qeth_drv_attrs,
5894 };
5895 static const struct attribute_group *qeth_drv_attr_groups[] = {
5896 &qeth_drv_attr_group,
5897 NULL,
5898 };
5899
5900 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5901 .driver = {
5902 .groups = qeth_drv_attr_groups,
5903 .owner = THIS_MODULE,
5904 .name = "qeth",
5905 },
5906 .ccw_driver = &qeth_ccw_driver,
5907 .setup = qeth_core_probe_device,
5908 .remove = qeth_core_remove_device,
5909 .set_online = qeth_core_set_online,
5910 .set_offline = qeth_core_set_offline,
5911 .shutdown = qeth_core_shutdown,
5912 .prepare = NULL,
5913 .complete = NULL,
5914 .freeze = qeth_core_freeze,
5915 .thaw = qeth_core_thaw,
5916 .restore = qeth_core_restore,
5917 };
5918
5919 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
5920 {
5921 struct ccwgroup_device *gdev;
5922 struct qeth_card *card;
5923
5924 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
5925 if (!gdev)
5926 return NULL;
5927
5928 card = dev_get_drvdata(&gdev->dev);
5929 put_device(&gdev->dev);
5930 return card;
5931 }
5932 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
5933
5934 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5935 {
5936 struct qeth_card *card = dev->ml_priv;
5937 struct mii_ioctl_data *mii_data;
5938 int rc = 0;
5939
5940 if (!card)
5941 return -ENODEV;
5942
5943 if (!qeth_card_hw_is_reachable(card))
5944 return -ENODEV;
5945
5946 if (card->info.type == QETH_CARD_TYPE_OSN)
5947 return -EPERM;
5948
5949 switch (cmd) {
5950 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5951 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5952 break;
5953 case SIOC_QETH_GET_CARD_TYPE:
5954 if ((card->info.type == QETH_CARD_TYPE_OSD ||
5955 card->info.type == QETH_CARD_TYPE_OSM ||
5956 card->info.type == QETH_CARD_TYPE_OSX) &&
5957 !card->info.guestlan)
5958 return 1;
5959 else
5960 return 0;
5961 case SIOCGMIIPHY:
5962 mii_data = if_mii(rq);
5963 mii_data->phy_id = 0;
5964 break;
5965 case SIOCGMIIREG:
5966 mii_data = if_mii(rq);
5967 if (mii_data->phy_id != 0)
5968 rc = -EINVAL;
5969 else
5970 mii_data->val_out = qeth_mdio_read(dev,
5971 mii_data->phy_id, mii_data->reg_num);
5972 break;
5973 case SIOC_QETH_QUERY_OAT:
5974 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
5975 break;
5976 default:
5977 if (card->discipline->do_ioctl)
5978 rc = card->discipline->do_ioctl(dev, rq, cmd);
5979 else
5980 rc = -EOPNOTSUPP;
5981 }
5982 if (rc)
5983 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
5984 return rc;
5985 }
5986 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
5987
5988 static struct {
5989 const char str[ETH_GSTRING_LEN];
5990 } qeth_ethtool_stats_keys[] = {
5991 /* 0 */{"rx skbs"},
5992 {"rx buffers"},
5993 {"tx skbs"},
5994 {"tx buffers"},
5995 {"tx skbs no packing"},
5996 {"tx buffers no packing"},
5997 {"tx skbs packing"},
5998 {"tx buffers packing"},
5999 {"tx sg skbs"},
6000 {"tx buffer elements"},
6001 /* 10 */{"rx sg skbs"},
6002 {"rx sg frags"},
6003 {"rx sg page allocs"},
6004 {"tx large kbytes"},
6005 {"tx large count"},
6006 {"tx pk state ch n->p"},
6007 {"tx pk state ch p->n"},
6008 {"tx pk watermark low"},
6009 {"tx pk watermark high"},
6010 {"queue 0 buffer usage"},
6011 /* 20 */{"queue 1 buffer usage"},
6012 {"queue 2 buffer usage"},
6013 {"queue 3 buffer usage"},
6014 {"rx poll time"},
6015 {"rx poll count"},
6016 {"rx do_QDIO time"},
6017 {"rx do_QDIO count"},
6018 {"tx handler time"},
6019 {"tx handler count"},
6020 {"tx time"},
6021 /* 30 */{"tx count"},
6022 {"tx do_QDIO time"},
6023 {"tx do_QDIO count"},
6024 {"tx csum"},
6025 {"tx lin"},
6026 {"tx linfail"},
6027 {"cq handler count"},
6028 {"cq handler time"},
6029 {"rx csum"}
6030 };
6031
6032 int qeth_core_get_sset_count(struct net_device *dev, int stringset)
6033 {
6034 switch (stringset) {
6035 case ETH_SS_STATS:
6036 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
6037 default:
6038 return -EINVAL;
6039 }
6040 }
6041 EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
6042
6043 void qeth_core_get_ethtool_stats(struct net_device *dev,
6044 struct ethtool_stats *stats, u64 *data)
6045 {
6046 struct qeth_card *card = dev->ml_priv;
6047 data[0] = card->stats.rx_packets -
6048 card->perf_stats.initial_rx_packets;
6049 data[1] = card->perf_stats.bufs_rec;
6050 data[2] = card->stats.tx_packets -
6051 card->perf_stats.initial_tx_packets;
6052 data[3] = card->perf_stats.bufs_sent;
6053 data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
6054 - card->perf_stats.skbs_sent_pack;
6055 data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
6056 data[6] = card->perf_stats.skbs_sent_pack;
6057 data[7] = card->perf_stats.bufs_sent_pack;
6058 data[8] = card->perf_stats.sg_skbs_sent;
6059 data[9] = card->perf_stats.buf_elements_sent;
6060 data[10] = card->perf_stats.sg_skbs_rx;
6061 data[11] = card->perf_stats.sg_frags_rx;
6062 data[12] = card->perf_stats.sg_alloc_page_rx;
6063 data[13] = (card->perf_stats.large_send_bytes >> 10);
6064 data[14] = card->perf_stats.large_send_cnt;
6065 data[15] = card->perf_stats.sc_dp_p;
6066 data[16] = card->perf_stats.sc_p_dp;
6067 data[17] = QETH_LOW_WATERMARK_PACK;
6068 data[18] = QETH_HIGH_WATERMARK_PACK;
6069 data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
6070 data[20] = (card->qdio.no_out_queues > 1) ?
6071 atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
6072 data[21] = (card->qdio.no_out_queues > 2) ?
6073 atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
6074 data[22] = (card->qdio.no_out_queues > 3) ?
6075 atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
6076 data[23] = card->perf_stats.inbound_time;
6077 data[24] = card->perf_stats.inbound_cnt;
6078 data[25] = card->perf_stats.inbound_do_qdio_time;
6079 data[26] = card->perf_stats.inbound_do_qdio_cnt;
6080 data[27] = card->perf_stats.outbound_handler_time;
6081 data[28] = card->perf_stats.outbound_handler_cnt;
6082 data[29] = card->perf_stats.outbound_time;
6083 data[30] = card->perf_stats.outbound_cnt;
6084 data[31] = card->perf_stats.outbound_do_qdio_time;
6085 data[32] = card->perf_stats.outbound_do_qdio_cnt;
6086 data[33] = card->perf_stats.tx_csum;
6087 data[34] = card->perf_stats.tx_lin;
6088 data[35] = card->perf_stats.tx_linfail;
6089 data[36] = card->perf_stats.cq_cnt;
6090 data[37] = card->perf_stats.cq_time;
6091 data[38] = card->perf_stats.rx_csum;
6092 }
6093 EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
6094
6095 void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
6096 {
6097 switch (stringset) {
6098 case ETH_SS_STATS:
6099 memcpy(data, &qeth_ethtool_stats_keys,
6100 sizeof(qeth_ethtool_stats_keys));
6101 break;
6102 default:
6103 WARN_ON(1);
6104 break;
6105 }
6106 }
6107 EXPORT_SYMBOL_GPL(qeth_core_get_strings);
6108
6109 void qeth_core_get_drvinfo(struct net_device *dev,
6110 struct ethtool_drvinfo *info)
6111 {
6112 struct qeth_card *card = dev->ml_priv;
6113
6114 strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
6115 sizeof(info->driver));
6116 strlcpy(info->version, "1.0", sizeof(info->version));
6117 strlcpy(info->fw_version, card->info.mcl_level,
6118 sizeof(info->fw_version));
6119 snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
6120 CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
6121 }
6122 EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
6123
6124 /* Helper function to fill 'advertising' and 'supported' which are the same. */
6125 /* Autoneg and full-duplex are supported and advertised unconditionally. */
6126 /* Always advertise and support all speeds up to specified, and only one */
6127 /* specified port type. */
6128 static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
6129 int maxspeed, int porttype)
6130 {
6131 ethtool_link_ksettings_zero_link_mode(cmd, supported);
6132 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
6133 ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
6134
6135 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
6136 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
6137
6138 switch (porttype) {
6139 case PORT_TP:
6140 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
6141 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
6142 break;
6143 case PORT_FIBRE:
6144 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
6145 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
6146 break;
6147 default:
6148 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
6149 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
6150 WARN_ON_ONCE(1);
6151 }
6152
6153 /* partially does fall through, to also select lower speeds */
6154 switch (maxspeed) {
6155 case SPEED_25000:
6156 ethtool_link_ksettings_add_link_mode(cmd, supported,
6157 25000baseSR_Full);
6158 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6159 25000baseSR_Full);
6160 break;
6161 case SPEED_10000:
6162 ethtool_link_ksettings_add_link_mode(cmd, supported,
6163 10000baseT_Full);
6164 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6165 10000baseT_Full);
6166 case SPEED_1000:
6167 ethtool_link_ksettings_add_link_mode(cmd, supported,
6168 1000baseT_Full);
6169 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6170 1000baseT_Full);
6171 ethtool_link_ksettings_add_link_mode(cmd, supported,
6172 1000baseT_Half);
6173 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6174 1000baseT_Half);
6175 case SPEED_100:
6176 ethtool_link_ksettings_add_link_mode(cmd, supported,
6177 100baseT_Full);
6178 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6179 100baseT_Full);
6180 ethtool_link_ksettings_add_link_mode(cmd, supported,
6181 100baseT_Half);
6182 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6183 100baseT_Half);
6184 case SPEED_10:
6185 ethtool_link_ksettings_add_link_mode(cmd, supported,
6186 10baseT_Full);
6187 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6188 10baseT_Full);
6189 ethtool_link_ksettings_add_link_mode(cmd, supported,
6190 10baseT_Half);
6191 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6192 10baseT_Half);
6193 /* end fallthrough */
6194 break;
6195 default:
6196 ethtool_link_ksettings_add_link_mode(cmd, supported,
6197 10baseT_Full);
6198 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6199 10baseT_Full);
6200 ethtool_link_ksettings_add_link_mode(cmd, supported,
6201 10baseT_Half);
6202 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6203 10baseT_Half);
6204 WARN_ON_ONCE(1);
6205 }
6206 }
6207
6208 int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
6209 struct ethtool_link_ksettings *cmd)
6210 {
6211 struct qeth_card *card = netdev->ml_priv;
6212 enum qeth_link_types link_type;
6213 struct carrier_info carrier_info;
6214 int rc;
6215
6216 if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
6217 link_type = QETH_LINK_TYPE_10GBIT_ETH;
6218 else
6219 link_type = card->info.link_type;
6220
6221 cmd->base.duplex = DUPLEX_FULL;
6222 cmd->base.autoneg = AUTONEG_ENABLE;
6223 cmd->base.phy_address = 0;
6224 cmd->base.mdio_support = 0;
6225 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
6226 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6227
6228 switch (link_type) {
6229 case QETH_LINK_TYPE_FAST_ETH:
6230 case QETH_LINK_TYPE_LANE_ETH100:
6231 cmd->base.speed = SPEED_100;
6232 cmd->base.port = PORT_TP;
6233 break;
6234 case QETH_LINK_TYPE_GBIT_ETH:
6235 case QETH_LINK_TYPE_LANE_ETH1000:
6236 cmd->base.speed = SPEED_1000;
6237 cmd->base.port = PORT_FIBRE;
6238 break;
6239 case QETH_LINK_TYPE_10GBIT_ETH:
6240 cmd->base.speed = SPEED_10000;
6241 cmd->base.port = PORT_FIBRE;
6242 break;
6243 case QETH_LINK_TYPE_25GBIT_ETH:
6244 cmd->base.speed = SPEED_25000;
6245 cmd->base.port = PORT_FIBRE;
6246 break;
6247 default:
6248 cmd->base.speed = SPEED_10;
6249 cmd->base.port = PORT_TP;
6250 }
6251 qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
6252
6253 /* Check if we can obtain more accurate information. */
6254 /* If QUERY_CARD_INFO command is not supported or fails, */
6255 /* just return the heuristics that was filled above. */
6256 if (!qeth_card_hw_is_reachable(card))
6257 return -ENODEV;
6258 rc = qeth_query_card_info(card, &carrier_info);
6259 if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
6260 return 0;
6261 if (rc) /* report error from the hardware operation */
6262 return rc;
6263 /* on success, fill in the information got from the hardware */
6264
6265 netdev_dbg(netdev,
6266 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
6267 carrier_info.card_type,
6268 carrier_info.port_mode,
6269 carrier_info.port_speed);
6270
6271 /* Update attributes for which we've obtained more authoritative */
6272 /* information, leave the rest the way they where filled above. */
6273 switch (carrier_info.card_type) {
6274 case CARD_INFO_TYPE_1G_COPPER_A:
6275 case CARD_INFO_TYPE_1G_COPPER_B:
6276 cmd->base.port = PORT_TP;
6277 qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
6278 break;
6279 case CARD_INFO_TYPE_1G_FIBRE_A:
6280 case CARD_INFO_TYPE_1G_FIBRE_B:
6281 cmd->base.port = PORT_FIBRE;
6282 qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
6283 break;
6284 case CARD_INFO_TYPE_10G_FIBRE_A:
6285 case CARD_INFO_TYPE_10G_FIBRE_B:
6286 cmd->base.port = PORT_FIBRE;
6287 qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
6288 break;
6289 }
6290
6291 switch (carrier_info.port_mode) {
6292 case CARD_INFO_PORTM_FULLDUPLEX:
6293 cmd->base.duplex = DUPLEX_FULL;
6294 break;
6295 case CARD_INFO_PORTM_HALFDUPLEX:
6296 cmd->base.duplex = DUPLEX_HALF;
6297 break;
6298 }
6299
6300 switch (carrier_info.port_speed) {
6301 case CARD_INFO_PORTS_10M:
6302 cmd->base.speed = SPEED_10;
6303 break;
6304 case CARD_INFO_PORTS_100M:
6305 cmd->base.speed = SPEED_100;
6306 break;
6307 case CARD_INFO_PORTS_1G:
6308 cmd->base.speed = SPEED_1000;
6309 break;
6310 case CARD_INFO_PORTS_10G:
6311 cmd->base.speed = SPEED_10000;
6312 break;
6313 case CARD_INFO_PORTS_25G:
6314 cmd->base.speed = SPEED_25000;
6315 break;
6316 }
6317
6318 return 0;
6319 }
6320 EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings);
6321
6322 /* Callback to handle checksum offload command reply from OSA card.
6323 * Verify that required features have been enabled on the card.
6324 * Return error in hdr->return_code as this value is checked by caller.
6325 *
6326 * Always returns zero to indicate no further messages from the OSA card.
6327 */
6328 static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
6329 struct qeth_reply *reply,
6330 unsigned long data)
6331 {
6332 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6333 struct qeth_checksum_cmd *chksum_cb =
6334 (struct qeth_checksum_cmd *)reply->param;
6335
6336 QETH_CARD_TEXT(card, 4, "chkdoccb");
6337 if (qeth_setassparms_inspect_rc(cmd))
6338 return 0;
6339
6340 memset(chksum_cb, 0, sizeof(*chksum_cb));
6341 if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6342 chksum_cb->supported =
6343 cmd->data.setassparms.data.chksum.supported;
6344 QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
6345 }
6346 if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
6347 chksum_cb->supported =
6348 cmd->data.setassparms.data.chksum.supported;
6349 chksum_cb->enabled =
6350 cmd->data.setassparms.data.chksum.enabled;
6351 QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
6352 QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
6353 }
6354 return 0;
6355 }
6356
6357 /* Send command to OSA card and check results. */
6358 static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
6359 enum qeth_ipa_funcs ipa_func,
6360 __u16 cmd_code, long data,
6361 struct qeth_checksum_cmd *chksum_cb,
6362 enum qeth_prot_versions prot)
6363 {
6364 struct qeth_cmd_buffer *iob;
6365
6366 QETH_CARD_TEXT(card, 4, "chkdocmd");
6367 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6368 sizeof(__u32), prot);
6369 if (!iob)
6370 return -ENOMEM;
6371
6372 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
6373 return qeth_send_ipa_cmd(card, iob, qeth_ipa_checksum_run_cmd_cb,
6374 chksum_cb);
6375 }
6376
6377 static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
6378 enum qeth_prot_versions prot)
6379 {
6380 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6381 struct qeth_checksum_cmd chksum_cb;
6382 int rc;
6383
6384 if (prot == QETH_PROT_IPV4)
6385 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6386 rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6387 &chksum_cb, prot);
6388 if (!rc) {
6389 if ((required_features & chksum_cb.supported) !=
6390 required_features)
6391 rc = -EIO;
6392 else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
6393 cstype == IPA_INBOUND_CHECKSUM)
6394 dev_warn(&card->gdev->dev,
6395 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
6396 QETH_CARD_IFNAME(card));
6397 }
6398 if (rc) {
6399 qeth_send_simple_setassparms_prot(card, cstype,
6400 IPA_CMD_ASS_STOP, 0, prot);
6401 dev_warn(&card->gdev->dev,
6402 "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n",
6403 prot, QETH_CARD_IFNAME(card));
6404 return rc;
6405 }
6406 rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6407 chksum_cb.supported, &chksum_cb,
6408 prot);
6409 if (!rc) {
6410 if ((required_features & chksum_cb.enabled) !=
6411 required_features)
6412 rc = -EIO;
6413 }
6414 if (rc) {
6415 qeth_send_simple_setassparms_prot(card, cstype,
6416 IPA_CMD_ASS_STOP, 0, prot);
6417 dev_warn(&card->gdev->dev,
6418 "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n",
6419 prot, QETH_CARD_IFNAME(card));
6420 return rc;
6421 }
6422
6423 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6424 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6425 return 0;
6426 }
6427
6428 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6429 enum qeth_prot_versions prot)
6430 {
6431 int rc = (on) ? qeth_send_checksum_on(card, cstype, prot)
6432 : qeth_send_simple_setassparms_prot(card, cstype,
6433 IPA_CMD_ASS_STOP, 0,
6434 prot);
6435 return rc ? -EIO : 0;
6436 }
6437
6438 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6439 unsigned long data)
6440 {
6441 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6442 struct qeth_tso_start_data *tso_data = reply->param;
6443
6444 if (qeth_setassparms_inspect_rc(cmd))
6445 return 0;
6446
6447 tso_data->mss = cmd->data.setassparms.data.tso.mss;
6448 tso_data->supported = cmd->data.setassparms.data.tso.supported;
6449 return 0;
6450 }
6451
6452 static int qeth_set_tso_off(struct qeth_card *card,
6453 enum qeth_prot_versions prot)
6454 {
6455 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6456 IPA_CMD_ASS_STOP, 0, prot);
6457 }
6458
6459 static int qeth_set_tso_on(struct qeth_card *card,
6460 enum qeth_prot_versions prot)
6461 {
6462 struct qeth_tso_start_data tso_data;
6463 struct qeth_cmd_buffer *iob;
6464 struct qeth_ipa_caps caps;
6465 int rc;
6466
6467 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6468 IPA_CMD_ASS_START, 0, prot);
6469 if (!iob)
6470 return -ENOMEM;
6471
6472 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6473 if (rc)
6474 return rc;
6475
6476 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6477 qeth_set_tso_off(card, prot);
6478 return -EOPNOTSUPP;
6479 }
6480
6481 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6482 IPA_CMD_ASS_ENABLE, sizeof(caps), prot);
6483 if (!iob) {
6484 qeth_set_tso_off(card, prot);
6485 return -ENOMEM;
6486 }
6487
6488 /* enable TSO capability */
6489 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6490 QETH_IPA_LARGE_SEND_TCP;
6491 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6492 if (rc) {
6493 qeth_set_tso_off(card, prot);
6494 return rc;
6495 }
6496
6497 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6498 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6499 qeth_set_tso_off(card, prot);
6500 return -EOPNOTSUPP;
6501 }
6502
6503 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6504 tso_data.mss);
6505 return 0;
6506 }
6507
6508 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6509 enum qeth_prot_versions prot)
6510 {
6511 int rc = on ? qeth_set_tso_on(card, prot) :
6512 qeth_set_tso_off(card, prot);
6513
6514 return rc ? -EIO : 0;
6515 }
6516
6517 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6518 {
6519 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6520 int rc_ipv6;
6521
6522 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6523 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6524 QETH_PROT_IPV4);
6525 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6526 /* no/one Offload Assist available, so the rc is trivial */
6527 return rc_ipv4;
6528
6529 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6530 QETH_PROT_IPV6);
6531
6532 if (on)
6533 /* enable: success if any Assist is active */
6534 return (rc_ipv6) ? rc_ipv4 : 0;
6535
6536 /* disable: failure if any Assist is still active */
6537 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6538 }
6539
6540 #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
6541 NETIF_F_IPV6_CSUM | NETIF_F_TSO6)
6542 /**
6543 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6544 * @dev: a net_device
6545 */
6546 void qeth_enable_hw_features(struct net_device *dev)
6547 {
6548 struct qeth_card *card = dev->ml_priv;
6549 netdev_features_t features;
6550
6551 rtnl_lock();
6552 features = dev->features;
6553 /* force-off any feature that needs an IPA sequence.
6554 * netdev_update_features() will restart them.
6555 */
6556 dev->features &= ~QETH_HW_FEATURES;
6557 netdev_update_features(dev);
6558 if (features != dev->features)
6559 dev_warn(&card->gdev->dev,
6560 "Device recovery failed to restore all offload features\n");
6561 rtnl_unlock();
6562 }
6563 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6564
6565 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6566 {
6567 struct qeth_card *card = dev->ml_priv;
6568 netdev_features_t changed = dev->features ^ features;
6569 int rc = 0;
6570
6571 QETH_DBF_TEXT(SETUP, 2, "setfeat");
6572 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6573
6574 if ((changed & NETIF_F_IP_CSUM)) {
6575 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6576 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
6577 if (rc)
6578 changed ^= NETIF_F_IP_CSUM;
6579 }
6580 if (changed & NETIF_F_IPV6_CSUM) {
6581 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6582 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
6583 if (rc)
6584 changed ^= NETIF_F_IPV6_CSUM;
6585 }
6586 if (changed & NETIF_F_RXCSUM) {
6587 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6588 if (rc)
6589 changed ^= NETIF_F_RXCSUM;
6590 }
6591 if (changed & NETIF_F_TSO) {
6592 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6593 QETH_PROT_IPV4);
6594 if (rc)
6595 changed ^= NETIF_F_TSO;
6596 }
6597 if (changed & NETIF_F_TSO6) {
6598 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6599 QETH_PROT_IPV6);
6600 if (rc)
6601 changed ^= NETIF_F_TSO6;
6602 }
6603
6604 /* everything changed successfully? */
6605 if ((dev->features ^ features) == changed)
6606 return 0;
6607 /* something went wrong. save changed features and return error */
6608 dev->features ^= changed;
6609 return -EIO;
6610 }
6611 EXPORT_SYMBOL_GPL(qeth_set_features);
6612
6613 netdev_features_t qeth_fix_features(struct net_device *dev,
6614 netdev_features_t features)
6615 {
6616 struct qeth_card *card = dev->ml_priv;
6617
6618 QETH_DBF_TEXT(SETUP, 2, "fixfeat");
6619 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6620 features &= ~NETIF_F_IP_CSUM;
6621 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6622 features &= ~NETIF_F_IPV6_CSUM;
6623 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6624 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6625 features &= ~NETIF_F_RXCSUM;
6626 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6627 features &= ~NETIF_F_TSO;
6628 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6629 features &= ~NETIF_F_TSO6;
6630 /* if the card isn't up, remove features that require hw changes */
6631 if (card->state == CARD_STATE_DOWN ||
6632 card->state == CARD_STATE_RECOVER)
6633 features &= ~QETH_HW_FEATURES;
6634 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6635 return features;
6636 }
6637 EXPORT_SYMBOL_GPL(qeth_fix_features);
6638
6639 netdev_features_t qeth_features_check(struct sk_buff *skb,
6640 struct net_device *dev,
6641 netdev_features_t features)
6642 {
6643 /* GSO segmentation builds skbs with
6644 * a (small) linear part for the headers, and
6645 * page frags for the data.
6646 * Compared to a linear skb, the header-only part consumes an
6647 * additional buffer element. This reduces buffer utilization, and
6648 * hurts throughput. So compress small segments into one element.
6649 */
6650 if (netif_needs_gso(skb, features)) {
6651 /* match skb_segment(): */
6652 unsigned int doffset = skb->data - skb_mac_header(skb);
6653 unsigned int hsize = skb_shinfo(skb)->gso_size;
6654 unsigned int hroom = skb_headroom(skb);
6655
6656 /* linearize only if resulting skb allocations are order-0: */
6657 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6658 features &= ~NETIF_F_SG;
6659 }
6660
6661 return vlan_features_check(skb, features);
6662 }
6663 EXPORT_SYMBOL_GPL(qeth_features_check);
6664
6665 static int __init qeth_core_init(void)
6666 {
6667 int rc;
6668
6669 pr_info("loading core functions\n");
6670
6671 qeth_wq = create_singlethread_workqueue("qeth_wq");
6672 if (!qeth_wq) {
6673 rc = -ENOMEM;
6674 goto out_err;
6675 }
6676
6677 rc = qeth_register_dbf_views();
6678 if (rc)
6679 goto dbf_err;
6680 qeth_core_root_dev = root_device_register("qeth");
6681 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6682 if (rc)
6683 goto register_err;
6684 qeth_core_header_cache =
6685 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
6686 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
6687 0, NULL);
6688 if (!qeth_core_header_cache) {
6689 rc = -ENOMEM;
6690 goto slab_err;
6691 }
6692 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
6693 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
6694 if (!qeth_qdio_outbuf_cache) {
6695 rc = -ENOMEM;
6696 goto cqslab_err;
6697 }
6698 rc = ccw_driver_register(&qeth_ccw_driver);
6699 if (rc)
6700 goto ccw_err;
6701 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
6702 if (rc)
6703 goto ccwgroup_err;
6704
6705 return 0;
6706
6707 ccwgroup_err:
6708 ccw_driver_unregister(&qeth_ccw_driver);
6709 ccw_err:
6710 kmem_cache_destroy(qeth_qdio_outbuf_cache);
6711 cqslab_err:
6712 kmem_cache_destroy(qeth_core_header_cache);
6713 slab_err:
6714 root_device_unregister(qeth_core_root_dev);
6715 register_err:
6716 qeth_unregister_dbf_views();
6717 dbf_err:
6718 destroy_workqueue(qeth_wq);
6719 out_err:
6720 pr_err("Initializing the qeth device driver failed\n");
6721 return rc;
6722 }
6723
6724 static void __exit qeth_core_exit(void)
6725 {
6726 qeth_clear_dbf_list();
6727 destroy_workqueue(qeth_wq);
6728 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
6729 ccw_driver_unregister(&qeth_ccw_driver);
6730 kmem_cache_destroy(qeth_qdio_outbuf_cache);
6731 kmem_cache_destroy(qeth_core_header_cache);
6732 root_device_unregister(qeth_core_root_dev);
6733 qeth_unregister_dbf_views();
6734 pr_info("core functions removed\n");
6735 }
6736
6737 module_init(qeth_core_init);
6738 module_exit(qeth_core_exit);
6739 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
6740 MODULE_DESCRIPTION("qeth core functions");
6741 MODULE_LICENSE("GPL");