1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
22 #include <linux/tcp.h>
23 #include <linux/mii.h>
25 #include <linux/kthread.h>
26 #include <linux/slab.h>
27 #include <linux/if_vlan.h>
28 #include <linux/netdevice.h>
29 #include <linux/netdev_features.h>
30 #include <linux/rcutree.h>
31 #include <linux/skbuff.h>
32 #include <linux/vmalloc.h>
34 #include <net/iucv/af_iucv.h>
35 #include <net/dsfield.h>
37 #include <asm/ebcdic.h>
38 #include <asm/chpid.h>
39 #include <asm/sysinfo.h>
42 #include <asm/ccwdev.h>
43 #include <asm/cpcmd.h>
45 #include "qeth_core.h"
47 struct qeth_dbf_info qeth_dbf
[QETH_DBF_INFOS
] = {
48 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
50 [QETH_DBF_SETUP
] = {"qeth_setup",
51 8, 1, 8, 5, &debug_hex_ascii_view
, NULL
},
52 [QETH_DBF_MSG
] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
53 &debug_sprintf_view
, NULL
},
54 [QETH_DBF_CTRL
] = {"qeth_control",
55 8, 1, QETH_DBF_CTRL_LEN
, 5, &debug_hex_ascii_view
, NULL
},
57 EXPORT_SYMBOL_GPL(qeth_dbf
);
59 struct kmem_cache
*qeth_core_header_cache
;
60 EXPORT_SYMBOL_GPL(qeth_core_header_cache
);
61 static struct kmem_cache
*qeth_qdio_outbuf_cache
;
63 static struct device
*qeth_core_root_dev
;
64 static struct dentry
*qeth_debugfs_root
;
65 static struct lock_class_key qdio_out_skb_queue_key
;
67 static void qeth_issue_next_read_cb(struct qeth_card
*card
,
68 struct qeth_cmd_buffer
*iob
,
69 unsigned int data_length
);
70 static int qeth_qdio_establish(struct qeth_card
*);
71 static void qeth_free_qdio_queues(struct qeth_card
*card
);
72 static void qeth_notify_skbs(struct qeth_qdio_out_q
*queue
,
73 struct qeth_qdio_out_buffer
*buf
,
74 enum iucv_tx_notify notification
);
75 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer
*buf
, bool error
,
77 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q
*, int);
79 static void qeth_close_dev_handler(struct work_struct
*work
)
81 struct qeth_card
*card
;
83 card
= container_of(work
, struct qeth_card
, close_dev_work
);
84 QETH_CARD_TEXT(card
, 2, "cldevhdl");
85 ccwgroup_set_offline(card
->gdev
);
88 static const char *qeth_get_cardname(struct qeth_card
*card
)
90 if (IS_VM_NIC(card
)) {
91 switch (card
->info
.type
) {
92 case QETH_CARD_TYPE_OSD
:
93 return " Virtual NIC QDIO";
94 case QETH_CARD_TYPE_IQD
:
95 return " Virtual NIC Hiper";
96 case QETH_CARD_TYPE_OSM
:
97 return " Virtual NIC QDIO - OSM";
98 case QETH_CARD_TYPE_OSX
:
99 return " Virtual NIC QDIO - OSX";
104 switch (card
->info
.type
) {
105 case QETH_CARD_TYPE_OSD
:
106 return " OSD Express";
107 case QETH_CARD_TYPE_IQD
:
108 return " HiperSockets";
109 case QETH_CARD_TYPE_OSN
:
111 case QETH_CARD_TYPE_OSM
:
113 case QETH_CARD_TYPE_OSX
:
122 /* max length to be returned: 14 */
123 const char *qeth_get_cardname_short(struct qeth_card
*card
)
125 if (IS_VM_NIC(card
)) {
126 switch (card
->info
.type
) {
127 case QETH_CARD_TYPE_OSD
:
128 return "Virt.NIC QDIO";
129 case QETH_CARD_TYPE_IQD
:
130 return "Virt.NIC Hiper";
131 case QETH_CARD_TYPE_OSM
:
132 return "Virt.NIC OSM";
133 case QETH_CARD_TYPE_OSX
:
134 return "Virt.NIC OSX";
139 switch (card
->info
.type
) {
140 case QETH_CARD_TYPE_OSD
:
141 switch (card
->info
.link_type
) {
142 case QETH_LINK_TYPE_FAST_ETH
:
144 case QETH_LINK_TYPE_HSTR
:
146 case QETH_LINK_TYPE_GBIT_ETH
:
148 case QETH_LINK_TYPE_10GBIT_ETH
:
150 case QETH_LINK_TYPE_25GBIT_ETH
:
152 case QETH_LINK_TYPE_LANE_ETH100
:
153 return "OSD_FE_LANE";
154 case QETH_LINK_TYPE_LANE_TR
:
155 return "OSD_TR_LANE";
156 case QETH_LINK_TYPE_LANE_ETH1000
:
157 return "OSD_GbE_LANE";
158 case QETH_LINK_TYPE_LANE
:
159 return "OSD_ATM_LANE";
161 return "OSD_Express";
163 case QETH_CARD_TYPE_IQD
:
164 return "HiperSockets";
165 case QETH_CARD_TYPE_OSN
:
167 case QETH_CARD_TYPE_OSM
:
169 case QETH_CARD_TYPE_OSX
:
178 void qeth_set_allowed_threads(struct qeth_card
*card
, unsigned long threads
,
179 int clear_start_mask
)
183 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
184 card
->thread_allowed_mask
= threads
;
185 if (clear_start_mask
)
186 card
->thread_start_mask
&= threads
;
187 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
188 wake_up(&card
->wait_q
);
190 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads
);
192 int qeth_threads_running(struct qeth_card
*card
, unsigned long threads
)
197 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
198 rc
= (card
->thread_running_mask
& threads
);
199 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
202 EXPORT_SYMBOL_GPL(qeth_threads_running
);
204 static void qeth_clear_working_pool_list(struct qeth_card
*card
)
206 struct qeth_buffer_pool_entry
*pool_entry
, *tmp
;
207 struct qeth_qdio_q
*queue
= card
->qdio
.in_q
;
210 QETH_CARD_TEXT(card
, 5, "clwrklst");
211 list_for_each_entry_safe(pool_entry
, tmp
,
212 &card
->qdio
.in_buf_pool
.entry_list
, list
)
213 list_del(&pool_entry
->list
);
215 for (i
= 0; i
< ARRAY_SIZE(queue
->bufs
); i
++)
216 queue
->bufs
[i
].pool_entry
= NULL
;
219 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry
*entry
)
223 for (i
= 0; i
< ARRAY_SIZE(entry
->elements
); i
++) {
224 if (entry
->elements
[i
])
225 __free_page(entry
->elements
[i
]);
231 static void qeth_free_buffer_pool(struct qeth_card
*card
)
233 struct qeth_buffer_pool_entry
*entry
, *tmp
;
235 list_for_each_entry_safe(entry
, tmp
, &card
->qdio
.init_pool
.entry_list
,
237 list_del(&entry
->init_list
);
238 qeth_free_pool_entry(entry
);
242 static struct qeth_buffer_pool_entry
*qeth_alloc_pool_entry(unsigned int pages
)
244 struct qeth_buffer_pool_entry
*entry
;
247 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
251 for (i
= 0; i
< pages
; i
++) {
252 entry
->elements
[i
] = __dev_alloc_page(GFP_KERNEL
);
254 if (!entry
->elements
[i
]) {
255 qeth_free_pool_entry(entry
);
263 static int qeth_alloc_buffer_pool(struct qeth_card
*card
)
265 unsigned int buf_elements
= QETH_MAX_BUFFER_ELEMENTS(card
);
268 QETH_CARD_TEXT(card
, 5, "alocpool");
269 for (i
= 0; i
< card
->qdio
.init_pool
.buf_count
; ++i
) {
270 struct qeth_buffer_pool_entry
*entry
;
272 entry
= qeth_alloc_pool_entry(buf_elements
);
274 qeth_free_buffer_pool(card
);
278 list_add(&entry
->init_list
, &card
->qdio
.init_pool
.entry_list
);
283 int qeth_resize_buffer_pool(struct qeth_card
*card
, unsigned int count
)
285 unsigned int buf_elements
= QETH_MAX_BUFFER_ELEMENTS(card
);
286 struct qeth_qdio_buffer_pool
*pool
= &card
->qdio
.init_pool
;
287 struct qeth_buffer_pool_entry
*entry
, *tmp
;
288 int delta
= count
- pool
->buf_count
;
291 QETH_CARD_TEXT(card
, 2, "realcbp");
293 /* Defer until queue is allocated: */
294 if (!card
->qdio
.in_q
)
297 /* Remove entries from the pool: */
299 entry
= list_first_entry(&pool
->entry_list
,
300 struct qeth_buffer_pool_entry
,
302 list_del(&entry
->init_list
);
303 qeth_free_pool_entry(entry
);
308 /* Allocate additional entries: */
310 entry
= qeth_alloc_pool_entry(buf_elements
);
312 list_for_each_entry_safe(entry
, tmp
, &entries
,
314 list_del(&entry
->init_list
);
315 qeth_free_pool_entry(entry
);
321 list_add(&entry
->init_list
, &entries
);
326 list_splice(&entries
, &pool
->entry_list
);
329 card
->qdio
.in_buf_pool
.buf_count
= count
;
330 pool
->buf_count
= count
;
333 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool
);
335 static void qeth_free_qdio_queue(struct qeth_qdio_q
*q
)
340 qdio_free_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
344 static struct qeth_qdio_q
*qeth_alloc_qdio_queue(void)
346 struct qeth_qdio_q
*q
= kzalloc(sizeof(*q
), GFP_KERNEL
);
352 if (qdio_alloc_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
)) {
357 for (i
= 0; i
< QDIO_MAX_BUFFERS_PER_Q
; ++i
)
358 q
->bufs
[i
].buffer
= q
->qdio_bufs
[i
];
360 QETH_DBF_HEX(SETUP
, 2, &q
, sizeof(void *));
364 static int qeth_cq_init(struct qeth_card
*card
)
368 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
369 QETH_CARD_TEXT(card
, 2, "cqinit");
370 qdio_reset_buffers(card
->qdio
.c_q
->qdio_bufs
,
371 QDIO_MAX_BUFFERS_PER_Q
);
372 card
->qdio
.c_q
->next_buf_to_init
= 127;
373 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
,
374 card
->qdio
.no_in_queues
- 1, 0,
377 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
386 static int qeth_alloc_cq(struct qeth_card
*card
)
390 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
392 struct qdio_outbuf_state
*outbuf_states
;
394 QETH_CARD_TEXT(card
, 2, "cqon");
395 card
->qdio
.c_q
= qeth_alloc_qdio_queue();
396 if (!card
->qdio
.c_q
) {
400 card
->qdio
.no_in_queues
= 2;
401 card
->qdio
.out_bufstates
=
402 kcalloc(card
->qdio
.no_out_queues
*
403 QDIO_MAX_BUFFERS_PER_Q
,
404 sizeof(struct qdio_outbuf_state
),
406 outbuf_states
= card
->qdio
.out_bufstates
;
407 if (outbuf_states
== NULL
) {
411 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
412 card
->qdio
.out_qs
[i
]->bufstates
= outbuf_states
;
413 outbuf_states
+= QDIO_MAX_BUFFERS_PER_Q
;
416 QETH_CARD_TEXT(card
, 2, "nocq");
417 card
->qdio
.c_q
= NULL
;
418 card
->qdio
.no_in_queues
= 1;
420 QETH_CARD_TEXT_(card
, 2, "iqc%d", card
->qdio
.no_in_queues
);
425 qeth_free_qdio_queue(card
->qdio
.c_q
);
426 card
->qdio
.c_q
= NULL
;
428 dev_err(&card
->gdev
->dev
, "Failed to create completion queue\n");
432 static void qeth_free_cq(struct qeth_card
*card
)
434 if (card
->qdio
.c_q
) {
435 --card
->qdio
.no_in_queues
;
436 qeth_free_qdio_queue(card
->qdio
.c_q
);
437 card
->qdio
.c_q
= NULL
;
439 kfree(card
->qdio
.out_bufstates
);
440 card
->qdio
.out_bufstates
= NULL
;
443 static enum iucv_tx_notify
qeth_compute_cq_notification(int sbalf15
,
446 enum iucv_tx_notify n
;
450 n
= delayed
? TX_NOTIFY_DELAYED_OK
: TX_NOTIFY_OK
;
456 n
= delayed
? TX_NOTIFY_DELAYED_UNREACHABLE
:
457 TX_NOTIFY_UNREACHABLE
;
460 n
= delayed
? TX_NOTIFY_DELAYED_GENERALERROR
:
461 TX_NOTIFY_GENERALERROR
;
468 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q
*q
, int bidx
,
471 if (q
->card
->options
.cq
!= QETH_CQ_ENABLED
)
474 if (q
->bufs
[bidx
]->next_pending
!= NULL
) {
475 struct qeth_qdio_out_buffer
*head
= q
->bufs
[bidx
];
476 struct qeth_qdio_out_buffer
*c
= q
->bufs
[bidx
]->next_pending
;
479 if (forced_cleanup
||
480 atomic_read(&c
->state
) ==
481 QETH_QDIO_BUF_HANDLED_DELAYED
) {
482 struct qeth_qdio_out_buffer
*f
= c
;
484 QETH_CARD_TEXT(f
->q
->card
, 5, "fp");
485 QETH_CARD_TEXT_(f
->q
->card
, 5, "%lx", (long) f
);
486 /* release here to avoid interleaving between
487 outbound tasklet and inbound tasklet
488 regarding notifications and lifecycle */
489 qeth_tx_complete_buf(c
, forced_cleanup
, 0);
492 WARN_ON_ONCE(head
->next_pending
!= f
);
493 head
->next_pending
= c
;
494 kmem_cache_free(qeth_qdio_outbuf_cache
, f
);
502 if (forced_cleanup
&& (atomic_read(&(q
->bufs
[bidx
]->state
)) ==
503 QETH_QDIO_BUF_HANDLED_DELAYED
)) {
504 /* for recovery situations */
505 qeth_init_qdio_out_buf(q
, bidx
);
506 QETH_CARD_TEXT(q
->card
, 2, "clprecov");
510 static void qeth_qdio_handle_aob(struct qeth_card
*card
,
511 unsigned long phys_aob_addr
)
514 struct qeth_qdio_out_buffer
*buffer
;
515 enum iucv_tx_notify notification
;
518 aob
= (struct qaob
*) phys_to_virt(phys_aob_addr
);
519 QETH_CARD_TEXT(card
, 5, "haob");
520 QETH_CARD_TEXT_(card
, 5, "%lx", phys_aob_addr
);
521 buffer
= (struct qeth_qdio_out_buffer
*) aob
->user1
;
522 QETH_CARD_TEXT_(card
, 5, "%lx", aob
->user1
);
524 if (atomic_cmpxchg(&buffer
->state
, QETH_QDIO_BUF_PRIMED
,
525 QETH_QDIO_BUF_IN_CQ
) == QETH_QDIO_BUF_PRIMED
) {
526 notification
= TX_NOTIFY_OK
;
528 WARN_ON_ONCE(atomic_read(&buffer
->state
) !=
529 QETH_QDIO_BUF_PENDING
);
530 atomic_set(&buffer
->state
, QETH_QDIO_BUF_IN_CQ
);
531 notification
= TX_NOTIFY_DELAYED_OK
;
534 if (aob
->aorc
!= 0) {
535 QETH_CARD_TEXT_(card
, 2, "aorc%02X", aob
->aorc
);
536 notification
= qeth_compute_cq_notification(aob
->aorc
, 1);
538 qeth_notify_skbs(buffer
->q
, buffer
, notification
);
540 /* Free dangling allocations. The attached skbs are handled by
541 * qeth_cleanup_handled_pending().
544 i
< aob
->sb_count
&& i
< QETH_MAX_BUFFER_ELEMENTS(card
);
546 void *data
= phys_to_virt(aob
->sba
[i
]);
548 if (data
&& buffer
->is_header
[i
])
549 kmem_cache_free(qeth_core_header_cache
, data
);
551 atomic_set(&buffer
->state
, QETH_QDIO_BUF_HANDLED_DELAYED
);
553 qdio_release_aob(aob
);
556 static void qeth_setup_ccw(struct ccw1
*ccw
, u8 cmd_code
, u8 flags
, u32 len
,
559 ccw
->cmd_code
= cmd_code
;
560 ccw
->flags
= flags
| CCW_FLAG_SLI
;
562 ccw
->cda
= (__u32
) __pa(data
);
565 static int __qeth_issue_next_read(struct qeth_card
*card
)
567 struct qeth_cmd_buffer
*iob
= card
->read_cmd
;
568 struct qeth_channel
*channel
= iob
->channel
;
569 struct ccw1
*ccw
= __ccw_from_cmd(iob
);
572 QETH_CARD_TEXT(card
, 5, "issnxrd");
573 if (channel
->state
!= CH_STATE_UP
)
576 memset(iob
->data
, 0, iob
->length
);
577 qeth_setup_ccw(ccw
, CCW_CMD_READ
, 0, iob
->length
, iob
->data
);
578 iob
->callback
= qeth_issue_next_read_cb
;
579 /* keep the cmd alive after completion: */
582 QETH_CARD_TEXT(card
, 6, "noirqpnd");
583 rc
= ccw_device_start(channel
->ccwdev
, ccw
, (addr_t
) iob
, 0, 0);
585 channel
->active_cmd
= iob
;
587 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
588 rc
, CARD_DEVID(card
));
589 qeth_unlock_channel(card
, channel
);
591 card
->read_or_write_problem
= 1;
592 qeth_schedule_recovery(card
);
597 static int qeth_issue_next_read(struct qeth_card
*card
)
601 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card
)));
602 ret
= __qeth_issue_next_read(card
);
603 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card
)));
608 static void qeth_enqueue_cmd(struct qeth_card
*card
,
609 struct qeth_cmd_buffer
*iob
)
611 spin_lock_irq(&card
->lock
);
612 list_add_tail(&iob
->list
, &card
->cmd_waiter_list
);
613 spin_unlock_irq(&card
->lock
);
616 static void qeth_dequeue_cmd(struct qeth_card
*card
,
617 struct qeth_cmd_buffer
*iob
)
619 spin_lock_irq(&card
->lock
);
620 list_del(&iob
->list
);
621 spin_unlock_irq(&card
->lock
);
624 void qeth_notify_cmd(struct qeth_cmd_buffer
*iob
, int reason
)
627 complete(&iob
->done
);
629 EXPORT_SYMBOL_GPL(qeth_notify_cmd
);
631 static void qeth_flush_local_addrs4(struct qeth_card
*card
)
633 struct qeth_local_addr
*addr
;
634 struct hlist_node
*tmp
;
637 spin_lock_irq(&card
->local_addrs4_lock
);
638 hash_for_each_safe(card
->local_addrs4
, i
, tmp
, addr
, hnode
) {
639 hash_del_rcu(&addr
->hnode
);
640 kfree_rcu(addr
, rcu
);
642 spin_unlock_irq(&card
->local_addrs4_lock
);
645 static void qeth_flush_local_addrs6(struct qeth_card
*card
)
647 struct qeth_local_addr
*addr
;
648 struct hlist_node
*tmp
;
651 spin_lock_irq(&card
->local_addrs6_lock
);
652 hash_for_each_safe(card
->local_addrs6
, i
, tmp
, addr
, hnode
) {
653 hash_del_rcu(&addr
->hnode
);
654 kfree_rcu(addr
, rcu
);
656 spin_unlock_irq(&card
->local_addrs6_lock
);
659 static void qeth_flush_local_addrs(struct qeth_card
*card
)
661 qeth_flush_local_addrs4(card
);
662 qeth_flush_local_addrs6(card
);
665 static void qeth_add_local_addrs4(struct qeth_card
*card
,
666 struct qeth_ipacmd_local_addrs4
*cmd
)
670 if (cmd
->addr_length
!=
671 sizeof_field(struct qeth_ipacmd_local_addr4
, addr
)) {
672 dev_err_ratelimited(&card
->gdev
->dev
,
673 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
678 spin_lock(&card
->local_addrs4_lock
);
679 for (i
= 0; i
< cmd
->count
; i
++) {
680 unsigned int key
= ipv4_addr_hash(cmd
->addrs
[i
].addr
);
681 struct qeth_local_addr
*addr
;
682 bool duplicate
= false;
684 hash_for_each_possible(card
->local_addrs4
, addr
, hnode
, key
) {
685 if (addr
->addr
.s6_addr32
[3] == cmd
->addrs
[i
].addr
) {
694 addr
= kmalloc(sizeof(*addr
), GFP_ATOMIC
);
696 dev_err(&card
->gdev
->dev
,
697 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
698 &cmd
->addrs
[i
].addr
);
702 ipv6_addr_set(&addr
->addr
, 0, 0, 0, cmd
->addrs
[i
].addr
);
703 hash_add_rcu(card
->local_addrs4
, &addr
->hnode
, key
);
705 spin_unlock(&card
->local_addrs4_lock
);
708 static void qeth_add_local_addrs6(struct qeth_card
*card
,
709 struct qeth_ipacmd_local_addrs6
*cmd
)
713 if (cmd
->addr_length
!=
714 sizeof_field(struct qeth_ipacmd_local_addr6
, addr
)) {
715 dev_err_ratelimited(&card
->gdev
->dev
,
716 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
721 spin_lock(&card
->local_addrs6_lock
);
722 for (i
= 0; i
< cmd
->count
; i
++) {
723 u32 key
= ipv6_addr_hash(&cmd
->addrs
[i
].addr
);
724 struct qeth_local_addr
*addr
;
725 bool duplicate
= false;
727 hash_for_each_possible(card
->local_addrs6
, addr
, hnode
, key
) {
728 if (ipv6_addr_equal(&addr
->addr
, &cmd
->addrs
[i
].addr
)) {
737 addr
= kmalloc(sizeof(*addr
), GFP_ATOMIC
);
739 dev_err(&card
->gdev
->dev
,
740 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
741 &cmd
->addrs
[i
].addr
);
745 addr
->addr
= cmd
->addrs
[i
].addr
;
746 hash_add_rcu(card
->local_addrs6
, &addr
->hnode
, key
);
748 spin_unlock(&card
->local_addrs6_lock
);
751 static void qeth_del_local_addrs4(struct qeth_card
*card
,
752 struct qeth_ipacmd_local_addrs4
*cmd
)
756 if (cmd
->addr_length
!=
757 sizeof_field(struct qeth_ipacmd_local_addr4
, addr
)) {
758 dev_err_ratelimited(&card
->gdev
->dev
,
759 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
764 spin_lock(&card
->local_addrs4_lock
);
765 for (i
= 0; i
< cmd
->count
; i
++) {
766 struct qeth_ipacmd_local_addr4
*addr
= &cmd
->addrs
[i
];
767 unsigned int key
= ipv4_addr_hash(addr
->addr
);
768 struct qeth_local_addr
*tmp
;
770 hash_for_each_possible(card
->local_addrs4
, tmp
, hnode
, key
) {
771 if (tmp
->addr
.s6_addr32
[3] == addr
->addr
) {
772 hash_del_rcu(&tmp
->hnode
);
778 spin_unlock(&card
->local_addrs4_lock
);
781 static void qeth_del_local_addrs6(struct qeth_card
*card
,
782 struct qeth_ipacmd_local_addrs6
*cmd
)
786 if (cmd
->addr_length
!=
787 sizeof_field(struct qeth_ipacmd_local_addr6
, addr
)) {
788 dev_err_ratelimited(&card
->gdev
->dev
,
789 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
794 spin_lock(&card
->local_addrs6_lock
);
795 for (i
= 0; i
< cmd
->count
; i
++) {
796 struct qeth_ipacmd_local_addr6
*addr
= &cmd
->addrs
[i
];
797 u32 key
= ipv6_addr_hash(&addr
->addr
);
798 struct qeth_local_addr
*tmp
;
800 hash_for_each_possible(card
->local_addrs6
, tmp
, hnode
, key
) {
801 if (ipv6_addr_equal(&tmp
->addr
, &addr
->addr
)) {
802 hash_del_rcu(&tmp
->hnode
);
808 spin_unlock(&card
->local_addrs6_lock
);
811 static bool qeth_next_hop_is_local_v4(struct qeth_card
*card
,
814 struct qeth_local_addr
*tmp
;
815 bool is_local
= false;
819 if (hash_empty(card
->local_addrs4
))
823 next_hop
= qeth_next_hop_v4_rcu(skb
, qeth_dst_check_rcu(skb
, 4));
824 key
= ipv4_addr_hash(next_hop
);
826 hash_for_each_possible_rcu(card
->local_addrs4
, tmp
, hnode
, key
) {
827 if (tmp
->addr
.s6_addr32
[3] == next_hop
) {
837 static bool qeth_next_hop_is_local_v6(struct qeth_card
*card
,
840 struct qeth_local_addr
*tmp
;
841 struct in6_addr
*next_hop
;
842 bool is_local
= false;
845 if (hash_empty(card
->local_addrs6
))
849 next_hop
= qeth_next_hop_v6_rcu(skb
, qeth_dst_check_rcu(skb
, 6));
850 key
= ipv6_addr_hash(next_hop
);
852 hash_for_each_possible_rcu(card
->local_addrs6
, tmp
, hnode
, key
) {
853 if (ipv6_addr_equal(&tmp
->addr
, next_hop
)) {
863 static int qeth_debugfs_local_addr_show(struct seq_file
*m
, void *v
)
865 struct qeth_card
*card
= m
->private;
866 struct qeth_local_addr
*tmp
;
870 hash_for_each_rcu(card
->local_addrs4
, i
, tmp
, hnode
)
871 seq_printf(m
, "%pI4\n", &tmp
->addr
.s6_addr32
[3]);
872 hash_for_each_rcu(card
->local_addrs6
, i
, tmp
, hnode
)
873 seq_printf(m
, "%pI6c\n", &tmp
->addr
);
879 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr
);
881 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd
*cmd
, int rc
,
882 struct qeth_card
*card
)
884 const char *ipa_name
;
885 int com
= cmd
->hdr
.command
;
887 ipa_name
= qeth_get_ipa_cmd_name(com
);
890 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
891 ipa_name
, com
, CARD_DEVID(card
), rc
,
892 qeth_get_ipa_msg(rc
));
894 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
895 ipa_name
, com
, CARD_DEVID(card
));
898 static struct qeth_ipa_cmd
*qeth_check_ipa_data(struct qeth_card
*card
,
899 struct qeth_ipa_cmd
*cmd
)
901 QETH_CARD_TEXT(card
, 5, "chkipad");
903 if (IS_IPA_REPLY(cmd
)) {
904 if (cmd
->hdr
.command
!= IPA_CMD_SETCCID
&&
905 cmd
->hdr
.command
!= IPA_CMD_DELCCID
&&
906 cmd
->hdr
.command
!= IPA_CMD_MODCCID
&&
907 cmd
->hdr
.command
!= IPA_CMD_SET_DIAG_ASS
)
908 qeth_issue_ipa_msg(cmd
, cmd
->hdr
.return_code
, card
);
912 /* handle unsolicited event: */
913 switch (cmd
->hdr
.command
) {
914 case IPA_CMD_STOPLAN
:
915 if (cmd
->hdr
.return_code
== IPA_RC_VEPA_TO_VEB_TRANSITION
) {
916 dev_err(&card
->gdev
->dev
,
917 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
918 netdev_name(card
->dev
));
919 schedule_work(&card
->close_dev_work
);
921 dev_warn(&card
->gdev
->dev
,
922 "The link for interface %s on CHPID 0x%X failed\n",
923 netdev_name(card
->dev
), card
->info
.chpid
);
924 qeth_issue_ipa_msg(cmd
, cmd
->hdr
.return_code
, card
);
925 netif_carrier_off(card
->dev
);
928 case IPA_CMD_STARTLAN
:
929 dev_info(&card
->gdev
->dev
,
930 "The link for %s on CHPID 0x%X has been restored\n",
931 netdev_name(card
->dev
), card
->info
.chpid
);
932 if (card
->info
.hwtrap
)
933 card
->info
.hwtrap
= 2;
934 qeth_schedule_recovery(card
);
936 case IPA_CMD_SETBRIDGEPORT_IQD
:
937 case IPA_CMD_SETBRIDGEPORT_OSA
:
938 case IPA_CMD_ADDRESS_CHANGE_NOTIF
:
939 if (card
->discipline
->control_event_handler(card
, cmd
))
942 case IPA_CMD_MODCCID
:
944 case IPA_CMD_REGISTER_LOCAL_ADDR
:
945 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
946 qeth_add_local_addrs4(card
, &cmd
->data
.local_addrs4
);
947 else if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
948 qeth_add_local_addrs6(card
, &cmd
->data
.local_addrs6
);
950 QETH_CARD_TEXT(card
, 3, "irla");
952 case IPA_CMD_UNREGISTER_LOCAL_ADDR
:
953 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
954 qeth_del_local_addrs4(card
, &cmd
->data
.local_addrs4
);
955 else if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
956 qeth_del_local_addrs6(card
, &cmd
->data
.local_addrs6
);
958 QETH_CARD_TEXT(card
, 3, "urla");
961 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
966 static void qeth_clear_ipacmd_list(struct qeth_card
*card
)
968 struct qeth_cmd_buffer
*iob
;
971 QETH_CARD_TEXT(card
, 4, "clipalst");
973 spin_lock_irqsave(&card
->lock
, flags
);
974 list_for_each_entry(iob
, &card
->cmd_waiter_list
, list
)
975 qeth_notify_cmd(iob
, -ECANCELED
);
976 spin_unlock_irqrestore(&card
->lock
, flags
);
979 static int qeth_check_idx_response(struct qeth_card
*card
,
980 unsigned char *buffer
)
982 QETH_DBF_HEX(CTRL
, 2, buffer
, QETH_DBF_CTRL_LEN
);
983 if ((buffer
[2] & QETH_IDX_TERMINATE_MASK
) == QETH_IDX_TERMINATE
) {
984 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
986 QETH_CARD_TEXT(card
, 2, "ckidxres");
987 QETH_CARD_TEXT(card
, 2, " idxterm");
988 QETH_CARD_TEXT_(card
, 2, "rc%x", buffer
[4]);
989 if (buffer
[4] == QETH_IDX_TERM_BAD_TRANSPORT
||
990 buffer
[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM
) {
991 dev_err(&card
->gdev
->dev
,
992 "The device does not support the configured transport mode\n");
993 return -EPROTONOSUPPORT
;
1000 void qeth_put_cmd(struct qeth_cmd_buffer
*iob
)
1002 if (refcount_dec_and_test(&iob
->ref_count
)) {
1007 EXPORT_SYMBOL_GPL(qeth_put_cmd
);
1009 static void qeth_release_buffer_cb(struct qeth_card
*card
,
1010 struct qeth_cmd_buffer
*iob
,
1011 unsigned int data_length
)
1016 static void qeth_cancel_cmd(struct qeth_cmd_buffer
*iob
, int rc
)
1018 qeth_notify_cmd(iob
, rc
);
1022 struct qeth_cmd_buffer
*qeth_alloc_cmd(struct qeth_channel
*channel
,
1023 unsigned int length
, unsigned int ccws
,
1026 struct qeth_cmd_buffer
*iob
;
1028 if (length
> QETH_BUFSIZE
)
1031 iob
= kzalloc(sizeof(*iob
), GFP_KERNEL
);
1035 iob
->data
= kzalloc(ALIGN(length
, 8) + ccws
* sizeof(struct ccw1
),
1036 GFP_KERNEL
| GFP_DMA
);
1042 init_completion(&iob
->done
);
1043 spin_lock_init(&iob
->lock
);
1044 INIT_LIST_HEAD(&iob
->list
);
1045 refcount_set(&iob
->ref_count
, 1);
1046 iob
->channel
= channel
;
1047 iob
->timeout
= timeout
;
1048 iob
->length
= length
;
1051 EXPORT_SYMBOL_GPL(qeth_alloc_cmd
);
1053 static void qeth_issue_next_read_cb(struct qeth_card
*card
,
1054 struct qeth_cmd_buffer
*iob
,
1055 unsigned int data_length
)
1057 struct qeth_cmd_buffer
*request
= NULL
;
1058 struct qeth_ipa_cmd
*cmd
= NULL
;
1059 struct qeth_reply
*reply
= NULL
;
1060 struct qeth_cmd_buffer
*tmp
;
1061 unsigned long flags
;
1064 QETH_CARD_TEXT(card
, 4, "sndctlcb");
1065 rc
= qeth_check_idx_response(card
, iob
->data
);
1070 qeth_schedule_recovery(card
);
1073 qeth_clear_ipacmd_list(card
);
1077 cmd
= __ipa_reply(iob
);
1079 cmd
= qeth_check_ipa_data(card
, cmd
);
1082 if (IS_OSN(card
) && card
->osn_info
.assist_cb
&&
1083 cmd
->hdr
.command
!= IPA_CMD_STARTLAN
) {
1084 card
->osn_info
.assist_cb(card
->dev
, cmd
);
1089 /* match against pending cmd requests */
1090 spin_lock_irqsave(&card
->lock
, flags
);
1091 list_for_each_entry(tmp
, &card
->cmd_waiter_list
, list
) {
1092 if (tmp
->match
&& tmp
->match(tmp
, iob
)) {
1094 /* take the object outside the lock */
1095 qeth_get_cmd(request
);
1099 spin_unlock_irqrestore(&card
->lock
, flags
);
1104 reply
= &request
->reply
;
1105 if (!reply
->callback
) {
1110 spin_lock_irqsave(&request
->lock
, flags
);
1112 /* Bail out when the requestor has already left: */
1115 rc
= reply
->callback(card
, reply
, cmd
? (unsigned long)cmd
:
1116 (unsigned long)iob
);
1117 spin_unlock_irqrestore(&request
->lock
, flags
);
1121 qeth_notify_cmd(request
, rc
);
1122 qeth_put_cmd(request
);
1124 memcpy(&card
->seqno
.pdu_hdr_ack
,
1125 QETH_PDU_HEADER_SEQ_NO(iob
->data
),
1126 QETH_SEQ_NO_LENGTH
);
1127 __qeth_issue_next_read(card
);
1132 static int qeth_set_thread_start_bit(struct qeth_card
*card
,
1133 unsigned long thread
)
1135 unsigned long flags
;
1138 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1139 if (!(card
->thread_allowed_mask
& thread
))
1141 else if (card
->thread_start_mask
& thread
)
1144 card
->thread_start_mask
|= thread
;
1145 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1150 static void qeth_clear_thread_start_bit(struct qeth_card
*card
,
1151 unsigned long thread
)
1153 unsigned long flags
;
1155 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1156 card
->thread_start_mask
&= ~thread
;
1157 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1158 wake_up(&card
->wait_q
);
1161 static void qeth_clear_thread_running_bit(struct qeth_card
*card
,
1162 unsigned long thread
)
1164 unsigned long flags
;
1166 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1167 card
->thread_running_mask
&= ~thread
;
1168 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1169 wake_up_all(&card
->wait_q
);
1172 static int __qeth_do_run_thread(struct qeth_card
*card
, unsigned long thread
)
1174 unsigned long flags
;
1177 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1178 if (card
->thread_start_mask
& thread
) {
1179 if ((card
->thread_allowed_mask
& thread
) &&
1180 !(card
->thread_running_mask
& thread
)) {
1182 card
->thread_start_mask
&= ~thread
;
1183 card
->thread_running_mask
|= thread
;
1187 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1191 static int qeth_do_run_thread(struct qeth_card
*card
, unsigned long thread
)
1195 wait_event(card
->wait_q
,
1196 (rc
= __qeth_do_run_thread(card
, thread
)) >= 0);
1200 int qeth_schedule_recovery(struct qeth_card
*card
)
1204 QETH_CARD_TEXT(card
, 2, "startrec");
1206 rc
= qeth_set_thread_start_bit(card
, QETH_RECOVER_THREAD
);
1208 schedule_work(&card
->kernel_thread_starter
);
1213 static int qeth_get_problem(struct qeth_card
*card
, struct ccw_device
*cdev
,
1219 sense
= (char *) irb
->ecw
;
1220 cstat
= irb
->scsw
.cmd
.cstat
;
1221 dstat
= irb
->scsw
.cmd
.dstat
;
1223 if (cstat
& (SCHN_STAT_CHN_CTRL_CHK
| SCHN_STAT_INTF_CTRL_CHK
|
1224 SCHN_STAT_CHN_DATA_CHK
| SCHN_STAT_CHAIN_CHECK
|
1225 SCHN_STAT_PROT_CHECK
| SCHN_STAT_PROG_CHECK
)) {
1226 QETH_CARD_TEXT(card
, 2, "CGENCHK");
1227 dev_warn(&cdev
->dev
, "The qeth device driver "
1228 "failed to recover an error on the device\n");
1229 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1230 CCW_DEVID(cdev
), dstat
, cstat
);
1231 print_hex_dump(KERN_WARNING
, "qeth: irb ", DUMP_PREFIX_OFFSET
,
1236 if (dstat
& DEV_STAT_UNIT_CHECK
) {
1237 if (sense
[SENSE_RESETTING_EVENT_BYTE
] &
1238 SENSE_RESETTING_EVENT_FLAG
) {
1239 QETH_CARD_TEXT(card
, 2, "REVIND");
1242 if (sense
[SENSE_COMMAND_REJECT_BYTE
] &
1243 SENSE_COMMAND_REJECT_FLAG
) {
1244 QETH_CARD_TEXT(card
, 2, "CMDREJi");
1247 if ((sense
[2] == 0xaf) && (sense
[3] == 0xfe)) {
1248 QETH_CARD_TEXT(card
, 2, "AFFE");
1251 if ((!sense
[0]) && (!sense
[1]) && (!sense
[2]) && (!sense
[3])) {
1252 QETH_CARD_TEXT(card
, 2, "ZEROSEN");
1255 QETH_CARD_TEXT(card
, 2, "DGENCHK");
1261 static int qeth_check_irb_error(struct qeth_card
*card
, struct ccw_device
*cdev
,
1267 switch (PTR_ERR(irb
)) {
1269 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1271 QETH_CARD_TEXT(card
, 2, "ckirberr");
1272 QETH_CARD_TEXT_(card
, 2, " rc%d", -EIO
);
1275 dev_warn(&cdev
->dev
, "A hardware operation timed out"
1276 " on the device\n");
1277 QETH_CARD_TEXT(card
, 2, "ckirberr");
1278 QETH_CARD_TEXT_(card
, 2, " rc%d", -ETIMEDOUT
);
1281 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1282 PTR_ERR(irb
), CCW_DEVID(cdev
));
1283 QETH_CARD_TEXT(card
, 2, "ckirberr");
1284 QETH_CARD_TEXT(card
, 2, " rc???");
1285 return PTR_ERR(irb
);
1289 static void qeth_irq(struct ccw_device
*cdev
, unsigned long intparm
,
1294 struct qeth_cmd_buffer
*iob
= NULL
;
1295 struct ccwgroup_device
*gdev
;
1296 struct qeth_channel
*channel
;
1297 struct qeth_card
*card
;
1299 /* while we hold the ccwdev lock, this stays valid: */
1300 gdev
= dev_get_drvdata(&cdev
->dev
);
1301 card
= dev_get_drvdata(&gdev
->dev
);
1303 QETH_CARD_TEXT(card
, 5, "irq");
1305 if (card
->read
.ccwdev
== cdev
) {
1306 channel
= &card
->read
;
1307 QETH_CARD_TEXT(card
, 5, "read");
1308 } else if (card
->write
.ccwdev
== cdev
) {
1309 channel
= &card
->write
;
1310 QETH_CARD_TEXT(card
, 5, "write");
1312 channel
= &card
->data
;
1313 QETH_CARD_TEXT(card
, 5, "data");
1317 QETH_CARD_TEXT(card
, 5, "irqunsol");
1318 } else if ((addr_t
)intparm
!= (addr_t
)channel
->active_cmd
) {
1319 QETH_CARD_TEXT(card
, 5, "irqunexp");
1322 "Received IRQ with intparm %lx, expected %px\n",
1323 intparm
, channel
->active_cmd
);
1324 if (channel
->active_cmd
)
1325 qeth_cancel_cmd(channel
->active_cmd
, -EIO
);
1327 iob
= (struct qeth_cmd_buffer
*) (addr_t
)intparm
;
1330 channel
->active_cmd
= NULL
;
1331 qeth_unlock_channel(card
, channel
);
1333 rc
= qeth_check_irb_error(card
, cdev
, irb
);
1335 /* IO was terminated, free its resources. */
1337 qeth_cancel_cmd(iob
, rc
);
1341 if (irb
->scsw
.cmd
.fctl
& SCSW_FCTL_CLEAR_FUNC
) {
1342 channel
->state
= CH_STATE_STOPPED
;
1343 wake_up(&card
->wait_q
);
1346 if (irb
->scsw
.cmd
.fctl
& SCSW_FCTL_HALT_FUNC
) {
1347 channel
->state
= CH_STATE_HALTED
;
1348 wake_up(&card
->wait_q
);
1351 if (iob
&& (irb
->scsw
.cmd
.fctl
& (SCSW_FCTL_CLEAR_FUNC
|
1352 SCSW_FCTL_HALT_FUNC
))) {
1353 qeth_cancel_cmd(iob
, -ECANCELED
);
1357 cstat
= irb
->scsw
.cmd
.cstat
;
1358 dstat
= irb
->scsw
.cmd
.dstat
;
1360 if ((dstat
& DEV_STAT_UNIT_EXCEP
) ||
1361 (dstat
& DEV_STAT_UNIT_CHECK
) ||
1363 if (irb
->esw
.esw0
.erw
.cons
) {
1364 dev_warn(&channel
->ccwdev
->dev
,
1365 "The qeth device driver failed to recover "
1366 "an error on the device\n");
1367 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1368 CCW_DEVID(channel
->ccwdev
), cstat
,
1370 print_hex_dump(KERN_WARNING
, "qeth: irb ",
1371 DUMP_PREFIX_OFFSET
, 16, 1, irb
, 32, 1);
1372 print_hex_dump(KERN_WARNING
, "qeth: sense data ",
1373 DUMP_PREFIX_OFFSET
, 16, 1, irb
->ecw
, 32, 1);
1376 rc
= qeth_get_problem(card
, cdev
, irb
);
1378 card
->read_or_write_problem
= 1;
1380 qeth_cancel_cmd(iob
, rc
);
1381 qeth_clear_ipacmd_list(card
);
1382 qeth_schedule_recovery(card
);
1389 if (irb
->scsw
.cmd
.count
> iob
->length
) {
1390 qeth_cancel_cmd(iob
, -EIO
);
1394 iob
->callback(card
, iob
,
1395 iob
->length
- irb
->scsw
.cmd
.count
);
1399 static void qeth_notify_skbs(struct qeth_qdio_out_q
*q
,
1400 struct qeth_qdio_out_buffer
*buf
,
1401 enum iucv_tx_notify notification
)
1403 struct sk_buff
*skb
;
1405 skb_queue_walk(&buf
->skb_list
, skb
) {
1406 QETH_CARD_TEXT_(q
->card
, 5, "skbn%d", notification
);
1407 QETH_CARD_TEXT_(q
->card
, 5, "%lx", (long) skb
);
1408 if (skb
->protocol
== htons(ETH_P_AF_IUCV
) && skb
->sk
)
1409 iucv_sk(skb
->sk
)->sk_txnotify(skb
, notification
);
1413 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer
*buf
, bool error
,
1416 struct qeth_qdio_out_q
*queue
= buf
->q
;
1417 struct sk_buff
*skb
;
1419 /* release may never happen from within CQ tasklet scope */
1420 WARN_ON_ONCE(atomic_read(&buf
->state
) == QETH_QDIO_BUF_IN_CQ
);
1422 if (atomic_read(&buf
->state
) == QETH_QDIO_BUF_PENDING
)
1423 qeth_notify_skbs(queue
, buf
, TX_NOTIFY_GENERALERROR
);
1426 if (buf
->next_element_to_fill
== 0)
1429 QETH_TXQ_STAT_INC(queue
, bufs
);
1430 QETH_TXQ_STAT_ADD(queue
, buf_elements
, buf
->next_element_to_fill
);
1432 QETH_TXQ_STAT_ADD(queue
, tx_errors
, buf
->frames
);
1434 QETH_TXQ_STAT_ADD(queue
, tx_packets
, buf
->frames
);
1435 QETH_TXQ_STAT_ADD(queue
, tx_bytes
, buf
->bytes
);
1438 while ((skb
= __skb_dequeue(&buf
->skb_list
)) != NULL
) {
1439 unsigned int bytes
= qdisc_pkt_len(skb
);
1440 bool is_tso
= skb_is_gso(skb
);
1441 unsigned int packets
;
1443 packets
= is_tso
? skb_shinfo(skb
)->gso_segs
: 1;
1445 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1446 QETH_TXQ_STAT_ADD(queue
, skbs_csum
, packets
);
1447 if (skb_is_nonlinear(skb
))
1448 QETH_TXQ_STAT_INC(queue
, skbs_sg
);
1450 QETH_TXQ_STAT_INC(queue
, skbs_tso
);
1451 QETH_TXQ_STAT_ADD(queue
, tso_bytes
, bytes
);
1455 napi_consume_skb(skb
, budget
);
1459 static void qeth_clear_output_buffer(struct qeth_qdio_out_q
*queue
,
1460 struct qeth_qdio_out_buffer
*buf
,
1461 bool error
, int budget
)
1465 /* is PCI flag set on buffer? */
1466 if (buf
->buffer
->element
[0].sflags
& SBAL_SFLAGS0_PCI_REQ
)
1467 atomic_dec(&queue
->set_pci_flags_count
);
1469 qeth_tx_complete_buf(buf
, error
, budget
);
1471 for (i
= 0; i
< queue
->max_elements
; ++i
) {
1472 void *data
= phys_to_virt(buf
->buffer
->element
[i
].addr
);
1474 if (data
&& buf
->is_header
[i
])
1475 kmem_cache_free(qeth_core_header_cache
, data
);
1476 buf
->is_header
[i
] = 0;
1479 qeth_scrub_qdio_buffer(buf
->buffer
, queue
->max_elements
);
1480 buf
->next_element_to_fill
= 0;
1483 atomic_set(&buf
->state
, QETH_QDIO_BUF_EMPTY
);
1486 static void qeth_drain_output_queue(struct qeth_qdio_out_q
*q
, bool free
)
1490 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
) {
1493 qeth_cleanup_handled_pending(q
, j
, 1);
1494 qeth_clear_output_buffer(q
, q
->bufs
[j
], true, 0);
1496 kmem_cache_free(qeth_qdio_outbuf_cache
, q
->bufs
[j
]);
1502 static void qeth_drain_output_queues(struct qeth_card
*card
)
1506 QETH_CARD_TEXT(card
, 2, "clearqdbf");
1507 /* clear outbound buffers to free skbs */
1508 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
1509 if (card
->qdio
.out_qs
[i
])
1510 qeth_drain_output_queue(card
->qdio
.out_qs
[i
], false);
1514 static void qeth_osa_set_output_queues(struct qeth_card
*card
, bool single
)
1516 unsigned int max
= single
? 1 : card
->dev
->num_tx_queues
;
1518 if (card
->qdio
.no_out_queues
== max
)
1521 if (atomic_read(&card
->qdio
.state
) != QETH_QDIO_UNINITIALIZED
)
1522 qeth_free_qdio_queues(card
);
1524 if (max
== 1 && card
->qdio
.do_prio_queueing
!= QETH_PRIOQ_DEFAULT
)
1525 dev_info(&card
->gdev
->dev
, "Priority Queueing not supported\n");
1527 card
->qdio
.no_out_queues
= max
;
1530 static int qeth_update_from_chp_desc(struct qeth_card
*card
)
1532 struct ccw_device
*ccwdev
;
1533 struct channel_path_desc_fmt0
*chp_dsc
;
1535 QETH_CARD_TEXT(card
, 2, "chp_desc");
1537 ccwdev
= card
->data
.ccwdev
;
1538 chp_dsc
= ccw_device_get_chp_desc(ccwdev
, 0);
1542 card
->info
.func_level
= 0x4100 + chp_dsc
->desc
;
1544 if (IS_OSD(card
) || IS_OSX(card
))
1545 /* CHPP field bit 6 == 1 -> single queue */
1546 qeth_osa_set_output_queues(card
, chp_dsc
->chpp
& 0x02);
1549 QETH_CARD_TEXT_(card
, 2, "nr:%x", card
->qdio
.no_out_queues
);
1550 QETH_CARD_TEXT_(card
, 2, "lvl:%02x", card
->info
.func_level
);
1554 static void qeth_init_qdio_info(struct qeth_card
*card
)
1556 QETH_CARD_TEXT(card
, 4, "intqdinf");
1557 atomic_set(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
);
1558 card
->qdio
.do_prio_queueing
= QETH_PRIOQ_DEFAULT
;
1559 card
->qdio
.default_out_queue
= QETH_DEFAULT_QUEUE
;
1562 card
->qdio
.no_in_queues
= 1;
1563 card
->qdio
.in_buf_size
= QETH_IN_BUF_SIZE_DEFAULT
;
1565 card
->qdio
.init_pool
.buf_count
= QETH_IN_BUF_COUNT_HSDEFAULT
;
1567 card
->qdio
.init_pool
.buf_count
= QETH_IN_BUF_COUNT_DEFAULT
;
1568 card
->qdio
.in_buf_pool
.buf_count
= card
->qdio
.init_pool
.buf_count
;
1569 INIT_LIST_HEAD(&card
->qdio
.in_buf_pool
.entry_list
);
1570 INIT_LIST_HEAD(&card
->qdio
.init_pool
.entry_list
);
1573 static void qeth_set_initial_options(struct qeth_card
*card
)
1575 card
->options
.route4
.type
= NO_ROUTER
;
1576 card
->options
.route6
.type
= NO_ROUTER
;
1577 card
->options
.isolation
= ISOLATION_MODE_NONE
;
1578 card
->options
.cq
= QETH_CQ_DISABLED
;
1579 card
->options
.layer
= QETH_DISCIPLINE_UNDETERMINED
;
1582 static int qeth_do_start_thread(struct qeth_card
*card
, unsigned long thread
)
1584 unsigned long flags
;
1587 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1588 QETH_CARD_TEXT_(card
, 4, " %02x%02x%02x",
1589 (u8
) card
->thread_start_mask
,
1590 (u8
) card
->thread_allowed_mask
,
1591 (u8
) card
->thread_running_mask
);
1592 rc
= (card
->thread_start_mask
& thread
);
1593 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1597 static int qeth_do_reset(void *data
);
1598 static void qeth_start_kernel_thread(struct work_struct
*work
)
1600 struct task_struct
*ts
;
1601 struct qeth_card
*card
= container_of(work
, struct qeth_card
,
1602 kernel_thread_starter
);
1603 QETH_CARD_TEXT(card
, 2, "strthrd");
1605 if (card
->read
.state
!= CH_STATE_UP
&&
1606 card
->write
.state
!= CH_STATE_UP
)
1608 if (qeth_do_start_thread(card
, QETH_RECOVER_THREAD
)) {
1609 ts
= kthread_run(qeth_do_reset
, card
, "qeth_recover");
1611 qeth_clear_thread_start_bit(card
, QETH_RECOVER_THREAD
);
1612 qeth_clear_thread_running_bit(card
,
1613 QETH_RECOVER_THREAD
);
1618 static void qeth_buffer_reclaim_work(struct work_struct
*);
1619 static void qeth_setup_card(struct qeth_card
*card
)
1621 QETH_CARD_TEXT(card
, 2, "setupcrd");
1623 card
->info
.type
= CARD_RDEV(card
)->id
.driver_info
;
1624 card
->state
= CARD_STATE_DOWN
;
1625 spin_lock_init(&card
->lock
);
1626 spin_lock_init(&card
->thread_mask_lock
);
1627 mutex_init(&card
->conf_mutex
);
1628 mutex_init(&card
->discipline_mutex
);
1629 INIT_WORK(&card
->kernel_thread_starter
, qeth_start_kernel_thread
);
1630 INIT_LIST_HEAD(&card
->cmd_waiter_list
);
1631 init_waitqueue_head(&card
->wait_q
);
1632 qeth_set_initial_options(card
);
1633 /* IP address takeover */
1634 INIT_LIST_HEAD(&card
->ipato
.entries
);
1635 qeth_init_qdio_info(card
);
1636 INIT_DELAYED_WORK(&card
->buffer_reclaim_work
, qeth_buffer_reclaim_work
);
1637 INIT_WORK(&card
->close_dev_work
, qeth_close_dev_handler
);
1638 hash_init(card
->rx_mode_addrs
);
1639 hash_init(card
->local_addrs4
);
1640 hash_init(card
->local_addrs6
);
1641 spin_lock_init(&card
->local_addrs4_lock
);
1642 spin_lock_init(&card
->local_addrs6_lock
);
1645 static void qeth_core_sl_print(struct seq_file
*m
, struct service_level
*slr
)
1647 struct qeth_card
*card
= container_of(slr
, struct qeth_card
,
1648 qeth_service_level
);
1649 if (card
->info
.mcl_level
[0])
1650 seq_printf(m
, "qeth: %s firmware level %s\n",
1651 CARD_BUS_ID(card
), card
->info
.mcl_level
);
1654 static struct qeth_card
*qeth_alloc_card(struct ccwgroup_device
*gdev
)
1656 struct qeth_card
*card
;
1658 QETH_DBF_TEXT(SETUP
, 2, "alloccrd");
1659 card
= kzalloc(sizeof(*card
), GFP_KERNEL
);
1662 QETH_DBF_HEX(SETUP
, 2, &card
, sizeof(void *));
1665 dev_set_drvdata(&gdev
->dev
, card
);
1666 CARD_RDEV(card
) = gdev
->cdev
[0];
1667 CARD_WDEV(card
) = gdev
->cdev
[1];
1668 CARD_DDEV(card
) = gdev
->cdev
[2];
1670 card
->event_wq
= alloc_ordered_workqueue("%s_event", 0,
1671 dev_name(&gdev
->dev
));
1672 if (!card
->event_wq
)
1675 card
->read_cmd
= qeth_alloc_cmd(&card
->read
, QETH_BUFSIZE
, 1, 0);
1676 if (!card
->read_cmd
)
1679 card
->debugfs
= debugfs_create_dir(dev_name(&gdev
->dev
),
1681 debugfs_create_file("local_addrs", 0400, card
->debugfs
, card
,
1682 &qeth_debugfs_local_addr_fops
);
1684 card
->qeth_service_level
.seq_print
= qeth_core_sl_print
;
1685 register_service_level(&card
->qeth_service_level
);
1689 destroy_workqueue(card
->event_wq
);
1691 dev_set_drvdata(&gdev
->dev
, NULL
);
1697 static int qeth_clear_channel(struct qeth_card
*card
,
1698 struct qeth_channel
*channel
)
1702 QETH_CARD_TEXT(card
, 3, "clearch");
1703 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
1704 rc
= ccw_device_clear(channel
->ccwdev
, (addr_t
)channel
->active_cmd
);
1705 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
1709 rc
= wait_event_interruptible_timeout(card
->wait_q
,
1710 channel
->state
== CH_STATE_STOPPED
, QETH_TIMEOUT
);
1711 if (rc
== -ERESTARTSYS
)
1713 if (channel
->state
!= CH_STATE_STOPPED
)
1715 channel
->state
= CH_STATE_DOWN
;
1719 static int qeth_halt_channel(struct qeth_card
*card
,
1720 struct qeth_channel
*channel
)
1724 QETH_CARD_TEXT(card
, 3, "haltch");
1725 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
1726 rc
= ccw_device_halt(channel
->ccwdev
, (addr_t
)channel
->active_cmd
);
1727 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
1731 rc
= wait_event_interruptible_timeout(card
->wait_q
,
1732 channel
->state
== CH_STATE_HALTED
, QETH_TIMEOUT
);
1733 if (rc
== -ERESTARTSYS
)
1735 if (channel
->state
!= CH_STATE_HALTED
)
1740 static int qeth_stop_channel(struct qeth_channel
*channel
)
1742 struct ccw_device
*cdev
= channel
->ccwdev
;
1745 rc
= ccw_device_set_offline(cdev
);
1747 spin_lock_irq(get_ccwdev_lock(cdev
));
1748 if (channel
->active_cmd
) {
1749 dev_err(&cdev
->dev
, "Stopped channel while cmd %px was still active\n",
1750 channel
->active_cmd
);
1751 channel
->active_cmd
= NULL
;
1753 cdev
->handler
= NULL
;
1754 spin_unlock_irq(get_ccwdev_lock(cdev
));
1759 static int qeth_start_channel(struct qeth_channel
*channel
)
1761 struct ccw_device
*cdev
= channel
->ccwdev
;
1764 channel
->state
= CH_STATE_DOWN
;
1765 atomic_set(&channel
->irq_pending
, 0);
1767 spin_lock_irq(get_ccwdev_lock(cdev
));
1768 cdev
->handler
= qeth_irq
;
1769 spin_unlock_irq(get_ccwdev_lock(cdev
));
1771 rc
= ccw_device_set_online(cdev
);
1778 spin_lock_irq(get_ccwdev_lock(cdev
));
1779 cdev
->handler
= NULL
;
1780 spin_unlock_irq(get_ccwdev_lock(cdev
));
1784 static int qeth_halt_channels(struct qeth_card
*card
)
1786 int rc1
= 0, rc2
= 0, rc3
= 0;
1788 QETH_CARD_TEXT(card
, 3, "haltchs");
1789 rc1
= qeth_halt_channel(card
, &card
->read
);
1790 rc2
= qeth_halt_channel(card
, &card
->write
);
1791 rc3
= qeth_halt_channel(card
, &card
->data
);
1799 static int qeth_clear_channels(struct qeth_card
*card
)
1801 int rc1
= 0, rc2
= 0, rc3
= 0;
1803 QETH_CARD_TEXT(card
, 3, "clearchs");
1804 rc1
= qeth_clear_channel(card
, &card
->read
);
1805 rc2
= qeth_clear_channel(card
, &card
->write
);
1806 rc3
= qeth_clear_channel(card
, &card
->data
);
1814 static int qeth_clear_halt_card(struct qeth_card
*card
, int halt
)
1818 QETH_CARD_TEXT(card
, 3, "clhacrd");
1821 rc
= qeth_halt_channels(card
);
1824 return qeth_clear_channels(card
);
1827 static int qeth_qdio_clear_card(struct qeth_card
*card
, int use_halt
)
1831 QETH_CARD_TEXT(card
, 3, "qdioclr");
1832 switch (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_ESTABLISHED
,
1833 QETH_QDIO_CLEANING
)) {
1834 case QETH_QDIO_ESTABLISHED
:
1836 rc
= qdio_shutdown(CARD_DDEV(card
),
1837 QDIO_FLAG_CLEANUP_USING_HALT
);
1839 rc
= qdio_shutdown(CARD_DDEV(card
),
1840 QDIO_FLAG_CLEANUP_USING_CLEAR
);
1842 QETH_CARD_TEXT_(card
, 3, "1err%d", rc
);
1843 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
1845 case QETH_QDIO_CLEANING
:
1850 rc
= qeth_clear_halt_card(card
, use_halt
);
1852 QETH_CARD_TEXT_(card
, 3, "2err%d", rc
);
1856 static enum qeth_discipline_id
qeth_vm_detect_layer(struct qeth_card
*card
)
1858 enum qeth_discipline_id disc
= QETH_DISCIPLINE_UNDETERMINED
;
1859 struct diag26c_vnic_resp
*response
= NULL
;
1860 struct diag26c_vnic_req
*request
= NULL
;
1861 struct ccw_dev_id id
;
1865 QETH_CARD_TEXT(card
, 2, "vmlayer");
1867 cpcmd("QUERY USERID", userid
, sizeof(userid
), &rc
);
1871 request
= kzalloc(sizeof(*request
), GFP_KERNEL
| GFP_DMA
);
1872 response
= kzalloc(sizeof(*response
), GFP_KERNEL
| GFP_DMA
);
1873 if (!request
|| !response
) {
1878 ccw_device_get_id(CARD_RDEV(card
), &id
);
1879 request
->resp_buf_len
= sizeof(*response
);
1880 request
->resp_version
= DIAG26C_VERSION6_VM65918
;
1881 request
->req_format
= DIAG26C_VNIC_INFO
;
1883 memcpy(&request
->sys_name
, userid
, 8);
1884 request
->devno
= id
.devno
;
1886 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
1887 rc
= diag26c(request
, response
, DIAG26C_PORT_VNIC
);
1888 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
1891 QETH_DBF_HEX(CTRL
, 2, response
, sizeof(*response
));
1893 if (request
->resp_buf_len
< sizeof(*response
) ||
1894 response
->version
!= request
->resp_version
) {
1899 if (response
->protocol
== VNIC_INFO_PROT_L2
)
1900 disc
= QETH_DISCIPLINE_LAYER2
;
1901 else if (response
->protocol
== VNIC_INFO_PROT_L3
)
1902 disc
= QETH_DISCIPLINE_LAYER3
;
1908 QETH_CARD_TEXT_(card
, 2, "err%x", rc
);
1912 /* Determine whether the device requires a specific layer discipline */
1913 static enum qeth_discipline_id
qeth_enforce_discipline(struct qeth_card
*card
)
1915 enum qeth_discipline_id disc
= QETH_DISCIPLINE_UNDETERMINED
;
1917 if (IS_OSM(card
) || IS_OSN(card
))
1918 disc
= QETH_DISCIPLINE_LAYER2
;
1919 else if (IS_VM_NIC(card
))
1920 disc
= IS_IQD(card
) ? QETH_DISCIPLINE_LAYER3
:
1921 qeth_vm_detect_layer(card
);
1924 case QETH_DISCIPLINE_LAYER2
:
1925 QETH_CARD_TEXT(card
, 3, "force l2");
1927 case QETH_DISCIPLINE_LAYER3
:
1928 QETH_CARD_TEXT(card
, 3, "force l3");
1931 QETH_CARD_TEXT(card
, 3, "force no");
1937 static void qeth_set_blkt_defaults(struct qeth_card
*card
)
1939 QETH_CARD_TEXT(card
, 2, "cfgblkt");
1941 if (card
->info
.use_v1_blkt
) {
1942 card
->info
.blkt
.time_total
= 0;
1943 card
->info
.blkt
.inter_packet
= 0;
1944 card
->info
.blkt
.inter_packet_jumbo
= 0;
1946 card
->info
.blkt
.time_total
= 250;
1947 card
->info
.blkt
.inter_packet
= 5;
1948 card
->info
.blkt
.inter_packet_jumbo
= 15;
1952 static void qeth_idx_init(struct qeth_card
*card
)
1954 memset(&card
->seqno
, 0, sizeof(card
->seqno
));
1956 card
->token
.issuer_rm_w
= 0x00010103UL
;
1957 card
->token
.cm_filter_w
= 0x00010108UL
;
1958 card
->token
.cm_connection_w
= 0x0001010aUL
;
1959 card
->token
.ulp_filter_w
= 0x0001010bUL
;
1960 card
->token
.ulp_connection_w
= 0x0001010dUL
;
1962 switch (card
->info
.type
) {
1963 case QETH_CARD_TYPE_IQD
:
1964 card
->info
.func_level
= QETH_IDX_FUNC_LEVEL_IQD
;
1966 case QETH_CARD_TYPE_OSD
:
1967 case QETH_CARD_TYPE_OSN
:
1968 card
->info
.func_level
= QETH_IDX_FUNC_LEVEL_OSD
;
1975 static void qeth_idx_finalize_cmd(struct qeth_card
*card
,
1976 struct qeth_cmd_buffer
*iob
)
1978 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob
->data
), &card
->seqno
.trans_hdr
,
1979 QETH_SEQ_NO_LENGTH
);
1980 if (iob
->channel
== &card
->write
)
1981 card
->seqno
.trans_hdr
++;
1984 static int qeth_peer_func_level(int level
)
1986 if ((level
& 0xff) == 8)
1987 return (level
& 0xff) + 0x400;
1988 if (((level
>> 8) & 3) == 1)
1989 return (level
& 0xff) + 0x200;
1993 static void qeth_mpc_finalize_cmd(struct qeth_card
*card
,
1994 struct qeth_cmd_buffer
*iob
)
1996 qeth_idx_finalize_cmd(card
, iob
);
1998 memcpy(QETH_PDU_HEADER_SEQ_NO(iob
->data
),
1999 &card
->seqno
.pdu_hdr
, QETH_SEQ_NO_LENGTH
);
2000 card
->seqno
.pdu_hdr
++;
2001 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob
->data
),
2002 &card
->seqno
.pdu_hdr_ack
, QETH_SEQ_NO_LENGTH
);
2004 iob
->callback
= qeth_release_buffer_cb
;
2007 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer
*iob
,
2008 struct qeth_cmd_buffer
*reply
)
2010 /* MPC cmds are issued strictly in sequence. */
2011 return !IS_IPA(reply
->data
);
2014 static struct qeth_cmd_buffer
*qeth_mpc_alloc_cmd(struct qeth_card
*card
,
2016 unsigned int data_length
)
2018 struct qeth_cmd_buffer
*iob
;
2020 iob
= qeth_alloc_cmd(&card
->write
, data_length
, 1, QETH_TIMEOUT
);
2024 memcpy(iob
->data
, data
, data_length
);
2025 qeth_setup_ccw(__ccw_from_cmd(iob
), CCW_CMD_WRITE
, 0, data_length
,
2027 iob
->finalize
= qeth_mpc_finalize_cmd
;
2028 iob
->match
= qeth_mpc_match_reply
;
2033 * qeth_send_control_data() - send control command to the card
2034 * @card: qeth_card structure pointer
2035 * @iob: qeth_cmd_buffer pointer
2036 * @reply_cb: callback function pointer
2037 * @cb_card: pointer to the qeth_card structure
2038 * @cb_reply: pointer to the qeth_reply structure
2039 * @cb_cmd: pointer to the original iob for non-IPA
2040 * commands, or to the qeth_ipa_cmd structure
2041 * for the IPA commands.
2042 * @reply_param: private pointer passed to the callback
2044 * Callback function gets called one or more times, with cb_cmd
2045 * pointing to the response returned by the hardware. Callback
2046 * function must return
2047 * > 0 if more reply blocks are expected,
2048 * 0 if the last or only reply block is received, and
2050 * Callback function can get the value of the reply_param pointer from the
2051 * field 'param' of the structure qeth_reply.
2054 static int qeth_send_control_data(struct qeth_card
*card
,
2055 struct qeth_cmd_buffer
*iob
,
2056 int (*reply_cb
)(struct qeth_card
*cb_card
,
2057 struct qeth_reply
*cb_reply
,
2058 unsigned long cb_cmd
),
2061 struct qeth_channel
*channel
= iob
->channel
;
2062 struct qeth_reply
*reply
= &iob
->reply
;
2063 long timeout
= iob
->timeout
;
2066 QETH_CARD_TEXT(card
, 2, "sendctl");
2068 reply
->callback
= reply_cb
;
2069 reply
->param
= reply_param
;
2071 timeout
= wait_event_interruptible_timeout(card
->wait_q
,
2072 qeth_trylock_channel(channel
),
2076 return (timeout
== -ERESTARTSYS
) ? -EINTR
: -ETIME
;
2080 iob
->finalize(card
, iob
);
2081 QETH_DBF_HEX(CTRL
, 2, iob
->data
, min(iob
->length
, QETH_DBF_CTRL_LEN
));
2083 qeth_enqueue_cmd(card
, iob
);
2085 /* This pairs with iob->callback, and keeps the iob alive after IO: */
2088 QETH_CARD_TEXT(card
, 6, "noirqpnd");
2089 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
2090 rc
= ccw_device_start_timeout(channel
->ccwdev
, __ccw_from_cmd(iob
),
2091 (addr_t
) iob
, 0, 0, timeout
);
2093 channel
->active_cmd
= iob
;
2094 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
2096 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2097 CARD_DEVID(card
), rc
);
2098 QETH_CARD_TEXT_(card
, 2, " err%d", rc
);
2099 qeth_dequeue_cmd(card
, iob
);
2101 qeth_unlock_channel(card
, channel
);
2105 timeout
= wait_for_completion_interruptible_timeout(&iob
->done
,
2108 rc
= (timeout
== -ERESTARTSYS
) ? -EINTR
: -ETIME
;
2110 qeth_dequeue_cmd(card
, iob
);
2113 /* Wait until the callback for a late reply has completed: */
2114 spin_lock_irq(&iob
->lock
);
2116 /* Zap any callback that's still pending: */
2118 spin_unlock_irq(&iob
->lock
);
2129 struct qeth_node_desc
{
2130 struct node_descriptor nd1
;
2131 struct node_descriptor nd2
;
2132 struct node_descriptor nd3
;
2135 static void qeth_read_conf_data_cb(struct qeth_card
*card
,
2136 struct qeth_cmd_buffer
*iob
,
2137 unsigned int data_length
)
2139 struct qeth_node_desc
*nd
= (struct qeth_node_desc
*) iob
->data
;
2143 QETH_CARD_TEXT(card
, 2, "cfgunit");
2145 if (data_length
< sizeof(*nd
)) {
2150 card
->info
.is_vm_nic
= nd
->nd1
.plant
[0] == _ascebc
['V'] &&
2151 nd
->nd1
.plant
[1] == _ascebc
['M'];
2152 tag
= (u8
*)&nd
->nd1
.tag
;
2153 card
->info
.chpid
= tag
[0];
2154 card
->info
.unit_addr2
= tag
[1];
2156 tag
= (u8
*)&nd
->nd2
.tag
;
2157 card
->info
.cula
= tag
[1];
2159 card
->info
.use_v1_blkt
= nd
->nd3
.model
[0] == 0xF0 &&
2160 nd
->nd3
.model
[1] == 0xF0 &&
2161 nd
->nd3
.model
[2] >= 0xF1 &&
2162 nd
->nd3
.model
[2] <= 0xF4;
2165 qeth_notify_cmd(iob
, rc
);
2169 static int qeth_read_conf_data(struct qeth_card
*card
)
2171 struct qeth_channel
*channel
= &card
->data
;
2172 struct qeth_cmd_buffer
*iob
;
2175 /* scan for RCD command in extended SenseID data */
2176 ciw
= ccw_device_get_ciw(channel
->ccwdev
, CIW_TYPE_RCD
);
2177 if (!ciw
|| ciw
->cmd
== 0)
2179 if (ciw
->count
< sizeof(struct qeth_node_desc
))
2182 iob
= qeth_alloc_cmd(channel
, ciw
->count
, 1, QETH_RCD_TIMEOUT
);
2186 iob
->callback
= qeth_read_conf_data_cb
;
2187 qeth_setup_ccw(__ccw_from_cmd(iob
), ciw
->cmd
, 0, iob
->length
,
2190 return qeth_send_control_data(card
, iob
, NULL
, NULL
);
2193 static int qeth_idx_check_activate_response(struct qeth_card
*card
,
2194 struct qeth_channel
*channel
,
2195 struct qeth_cmd_buffer
*iob
)
2199 rc
= qeth_check_idx_response(card
, iob
->data
);
2203 if (QETH_IS_IDX_ACT_POS_REPLY(iob
->data
))
2206 /* negative reply: */
2207 QETH_CARD_TEXT_(card
, 2, "idxneg%c",
2208 QETH_IDX_ACT_CAUSE_CODE(iob
->data
));
2210 switch (QETH_IDX_ACT_CAUSE_CODE(iob
->data
)) {
2211 case QETH_IDX_ACT_ERR_EXCL
:
2212 dev_err(&channel
->ccwdev
->dev
,
2213 "The adapter is used exclusively by another host\n");
2215 case QETH_IDX_ACT_ERR_AUTH
:
2216 case QETH_IDX_ACT_ERR_AUTH_USER
:
2217 dev_err(&channel
->ccwdev
->dev
,
2218 "Setting the device online failed because of insufficient authorization\n");
2221 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2222 CCW_DEVID(channel
->ccwdev
));
2227 static void qeth_idx_activate_read_channel_cb(struct qeth_card
*card
,
2228 struct qeth_cmd_buffer
*iob
,
2229 unsigned int data_length
)
2231 struct qeth_channel
*channel
= iob
->channel
;
2235 QETH_CARD_TEXT(card
, 2, "idxrdcb");
2237 rc
= qeth_idx_check_activate_response(card
, channel
, iob
);
2241 memcpy(&peer_level
, QETH_IDX_ACT_FUNC_LEVEL(iob
->data
), 2);
2242 if (peer_level
!= qeth_peer_func_level(card
->info
.func_level
)) {
2243 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2244 CCW_DEVID(channel
->ccwdev
),
2245 card
->info
.func_level
, peer_level
);
2250 memcpy(&card
->token
.issuer_rm_r
,
2251 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob
->data
),
2252 QETH_MPC_TOKEN_LENGTH
);
2253 memcpy(&card
->info
.mcl_level
[0],
2254 QETH_IDX_REPLY_LEVEL(iob
->data
), QETH_MCL_LENGTH
);
2257 qeth_notify_cmd(iob
, rc
);
2261 static void qeth_idx_activate_write_channel_cb(struct qeth_card
*card
,
2262 struct qeth_cmd_buffer
*iob
,
2263 unsigned int data_length
)
2265 struct qeth_channel
*channel
= iob
->channel
;
2269 QETH_CARD_TEXT(card
, 2, "idxwrcb");
2271 rc
= qeth_idx_check_activate_response(card
, channel
, iob
);
2275 memcpy(&peer_level
, QETH_IDX_ACT_FUNC_LEVEL(iob
->data
), 2);
2276 if ((peer_level
& ~0x0100) !=
2277 qeth_peer_func_level(card
->info
.func_level
)) {
2278 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2279 CCW_DEVID(channel
->ccwdev
),
2280 card
->info
.func_level
, peer_level
);
2285 qeth_notify_cmd(iob
, rc
);
2289 static void qeth_idx_setup_activate_cmd(struct qeth_card
*card
,
2290 struct qeth_cmd_buffer
*iob
)
2292 u16 addr
= (card
->info
.cula
<< 8) + card
->info
.unit_addr2
;
2293 u8 port
= ((u8
)card
->dev
->dev_port
) | 0x80;
2294 struct ccw1
*ccw
= __ccw_from_cmd(iob
);
2296 qeth_setup_ccw(&ccw
[0], CCW_CMD_WRITE
, CCW_FLAG_CC
, IDX_ACTIVATE_SIZE
,
2298 qeth_setup_ccw(&ccw
[1], CCW_CMD_READ
, 0, iob
->length
, iob
->data
);
2299 iob
->finalize
= qeth_idx_finalize_cmd
;
2301 port
|= QETH_IDX_ACT_INVAL_FRAME
;
2302 memcpy(QETH_IDX_ACT_PNO(iob
->data
), &port
, 1);
2303 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob
->data
),
2304 &card
->token
.issuer_rm_w
, QETH_MPC_TOKEN_LENGTH
);
2305 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob
->data
),
2306 &card
->info
.func_level
, 2);
2307 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob
->data
), &card
->info
.ddev_devno
, 2);
2308 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob
->data
), &addr
, 2);
2311 static int qeth_idx_activate_read_channel(struct qeth_card
*card
)
2313 struct qeth_channel
*channel
= &card
->read
;
2314 struct qeth_cmd_buffer
*iob
;
2317 QETH_CARD_TEXT(card
, 2, "idxread");
2319 iob
= qeth_alloc_cmd(channel
, QETH_BUFSIZE
, 2, QETH_TIMEOUT
);
2323 memcpy(iob
->data
, IDX_ACTIVATE_READ
, IDX_ACTIVATE_SIZE
);
2324 qeth_idx_setup_activate_cmd(card
, iob
);
2325 iob
->callback
= qeth_idx_activate_read_channel_cb
;
2327 rc
= qeth_send_control_data(card
, iob
, NULL
, NULL
);
2331 channel
->state
= CH_STATE_UP
;
2335 static int qeth_idx_activate_write_channel(struct qeth_card
*card
)
2337 struct qeth_channel
*channel
= &card
->write
;
2338 struct qeth_cmd_buffer
*iob
;
2341 QETH_CARD_TEXT(card
, 2, "idxwrite");
2343 iob
= qeth_alloc_cmd(channel
, QETH_BUFSIZE
, 2, QETH_TIMEOUT
);
2347 memcpy(iob
->data
, IDX_ACTIVATE_WRITE
, IDX_ACTIVATE_SIZE
);
2348 qeth_idx_setup_activate_cmd(card
, iob
);
2349 iob
->callback
= qeth_idx_activate_write_channel_cb
;
2351 rc
= qeth_send_control_data(card
, iob
, NULL
, NULL
);
2355 channel
->state
= CH_STATE_UP
;
2359 static int qeth_cm_enable_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2362 struct qeth_cmd_buffer
*iob
;
2364 QETH_CARD_TEXT(card
, 2, "cmenblcb");
2366 iob
= (struct qeth_cmd_buffer
*) data
;
2367 memcpy(&card
->token
.cm_filter_r
,
2368 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob
->data
),
2369 QETH_MPC_TOKEN_LENGTH
);
2373 static int qeth_cm_enable(struct qeth_card
*card
)
2375 struct qeth_cmd_buffer
*iob
;
2377 QETH_CARD_TEXT(card
, 2, "cmenable");
2379 iob
= qeth_mpc_alloc_cmd(card
, CM_ENABLE
, CM_ENABLE_SIZE
);
2383 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob
->data
),
2384 &card
->token
.issuer_rm_r
, QETH_MPC_TOKEN_LENGTH
);
2385 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob
->data
),
2386 &card
->token
.cm_filter_w
, QETH_MPC_TOKEN_LENGTH
);
2388 return qeth_send_control_data(card
, iob
, qeth_cm_enable_cb
, NULL
);
2391 static int qeth_cm_setup_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2394 struct qeth_cmd_buffer
*iob
;
2396 QETH_CARD_TEXT(card
, 2, "cmsetpcb");
2398 iob
= (struct qeth_cmd_buffer
*) data
;
2399 memcpy(&card
->token
.cm_connection_r
,
2400 QETH_CM_SETUP_RESP_DEST_ADDR(iob
->data
),
2401 QETH_MPC_TOKEN_LENGTH
);
2405 static int qeth_cm_setup(struct qeth_card
*card
)
2407 struct qeth_cmd_buffer
*iob
;
2409 QETH_CARD_TEXT(card
, 2, "cmsetup");
2411 iob
= qeth_mpc_alloc_cmd(card
, CM_SETUP
, CM_SETUP_SIZE
);
2415 memcpy(QETH_CM_SETUP_DEST_ADDR(iob
->data
),
2416 &card
->token
.issuer_rm_r
, QETH_MPC_TOKEN_LENGTH
);
2417 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob
->data
),
2418 &card
->token
.cm_connection_w
, QETH_MPC_TOKEN_LENGTH
);
2419 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob
->data
),
2420 &card
->token
.cm_filter_r
, QETH_MPC_TOKEN_LENGTH
);
2421 return qeth_send_control_data(card
, iob
, qeth_cm_setup_cb
, NULL
);
2424 static bool qeth_is_supported_link_type(struct qeth_card
*card
, u8 link_type
)
2426 if (link_type
== QETH_LINK_TYPE_LANE_TR
||
2427 link_type
== QETH_LINK_TYPE_HSTR
) {
2428 dev_err(&card
->gdev
->dev
, "Unsupported Token Ring device\n");
2435 static int qeth_update_max_mtu(struct qeth_card
*card
, unsigned int max_mtu
)
2437 struct net_device
*dev
= card
->dev
;
2438 unsigned int new_mtu
;
2441 /* IQD needs accurate max MTU to set up its RX buffers: */
2444 /* tolerate quirky HW: */
2445 max_mtu
= ETH_MAX_MTU
;
2450 /* move any device with default MTU to new max MTU: */
2451 new_mtu
= (dev
->mtu
== dev
->max_mtu
) ? max_mtu
: dev
->mtu
;
2453 /* adjust RX buffer size to new max MTU: */
2454 card
->qdio
.in_buf_size
= max_mtu
+ 2 * PAGE_SIZE
;
2455 if (dev
->max_mtu
&& dev
->max_mtu
!= max_mtu
)
2456 qeth_free_qdio_queues(card
);
2460 /* default MTUs for first setup: */
2461 else if (IS_LAYER2(card
))
2462 new_mtu
= ETH_DATA_LEN
;
2464 new_mtu
= ETH_DATA_LEN
- 8; /* allow for LLC + SNAP */
2467 dev
->max_mtu
= max_mtu
;
2468 dev
->mtu
= min(new_mtu
, max_mtu
);
2473 static int qeth_get_mtu_outof_framesize(int framesize
)
2475 switch (framesize
) {
2489 static int qeth_ulp_enable_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2492 __u16 mtu
, framesize
;
2494 struct qeth_cmd_buffer
*iob
;
2497 QETH_CARD_TEXT(card
, 2, "ulpenacb");
2499 iob
= (struct qeth_cmd_buffer
*) data
;
2500 memcpy(&card
->token
.ulp_filter_r
,
2501 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob
->data
),
2502 QETH_MPC_TOKEN_LENGTH
);
2504 memcpy(&framesize
, QETH_ULP_ENABLE_RESP_MAX_MTU(iob
->data
), 2);
2505 mtu
= qeth_get_mtu_outof_framesize(framesize
);
2507 mtu
= *(__u16
*)QETH_ULP_ENABLE_RESP_MAX_MTU(iob
->data
);
2509 *(u16
*)reply
->param
= mtu
;
2511 memcpy(&len
, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob
->data
), 2);
2512 if (len
>= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE
) {
2514 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob
->data
), 1);
2515 if (!qeth_is_supported_link_type(card
, link_type
))
2516 return -EPROTONOSUPPORT
;
2519 card
->info
.link_type
= link_type
;
2520 QETH_CARD_TEXT_(card
, 2, "link%d", card
->info
.link_type
);
2524 static u8
qeth_mpc_select_prot_type(struct qeth_card
*card
)
2527 return QETH_PROT_OSN2
;
2528 return IS_LAYER2(card
) ? QETH_PROT_LAYER2
: QETH_PROT_TCPIP
;
2531 static int qeth_ulp_enable(struct qeth_card
*card
)
2533 u8 prot_type
= qeth_mpc_select_prot_type(card
);
2534 struct qeth_cmd_buffer
*iob
;
2538 QETH_CARD_TEXT(card
, 2, "ulpenabl");
2540 iob
= qeth_mpc_alloc_cmd(card
, ULP_ENABLE
, ULP_ENABLE_SIZE
);
2544 *(QETH_ULP_ENABLE_LINKNUM(iob
->data
)) = (u8
) card
->dev
->dev_port
;
2545 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob
->data
), &prot_type
, 1);
2546 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob
->data
),
2547 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2548 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob
->data
),
2549 &card
->token
.ulp_filter_w
, QETH_MPC_TOKEN_LENGTH
);
2550 rc
= qeth_send_control_data(card
, iob
, qeth_ulp_enable_cb
, &max_mtu
);
2553 return qeth_update_max_mtu(card
, max_mtu
);
2556 static int qeth_ulp_setup_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2559 struct qeth_cmd_buffer
*iob
;
2561 QETH_CARD_TEXT(card
, 2, "ulpstpcb");
2563 iob
= (struct qeth_cmd_buffer
*) data
;
2564 memcpy(&card
->token
.ulp_connection_r
,
2565 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob
->data
),
2566 QETH_MPC_TOKEN_LENGTH
);
2567 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob
->data
),
2569 QETH_CARD_TEXT(card
, 2, "olmlimit");
2570 dev_err(&card
->gdev
->dev
, "A connection could not be "
2571 "established because of an OLM limit\n");
2577 static int qeth_ulp_setup(struct qeth_card
*card
)
2580 struct qeth_cmd_buffer
*iob
;
2582 QETH_CARD_TEXT(card
, 2, "ulpsetup");
2584 iob
= qeth_mpc_alloc_cmd(card
, ULP_SETUP
, ULP_SETUP_SIZE
);
2588 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob
->data
),
2589 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2590 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob
->data
),
2591 &card
->token
.ulp_connection_w
, QETH_MPC_TOKEN_LENGTH
);
2592 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob
->data
),
2593 &card
->token
.ulp_filter_r
, QETH_MPC_TOKEN_LENGTH
);
2595 memcpy(QETH_ULP_SETUP_CUA(iob
->data
), &card
->info
.ddev_devno
, 2);
2596 temp
= (card
->info
.cula
<< 8) + card
->info
.unit_addr2
;
2597 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob
->data
), &temp
, 2);
2598 return qeth_send_control_data(card
, iob
, qeth_ulp_setup_cb
, NULL
);
2601 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q
*q
, int bidx
)
2603 struct qeth_qdio_out_buffer
*newbuf
;
2605 newbuf
= kmem_cache_zalloc(qeth_qdio_outbuf_cache
, GFP_ATOMIC
);
2609 newbuf
->buffer
= q
->qdio_bufs
[bidx
];
2610 skb_queue_head_init(&newbuf
->skb_list
);
2611 lockdep_set_class(&newbuf
->skb_list
.lock
, &qdio_out_skb_queue_key
);
2613 newbuf
->next_pending
= q
->bufs
[bidx
];
2614 atomic_set(&newbuf
->state
, QETH_QDIO_BUF_EMPTY
);
2615 q
->bufs
[bidx
] = newbuf
;
2619 static void qeth_free_output_queue(struct qeth_qdio_out_q
*q
)
2624 qeth_drain_output_queue(q
, true);
2625 qdio_free_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2629 static struct qeth_qdio_out_q
*qeth_alloc_output_queue(void)
2631 struct qeth_qdio_out_q
*q
= kzalloc(sizeof(*q
), GFP_KERNEL
);
2636 if (qdio_alloc_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
)) {
2643 static void qeth_tx_completion_timer(struct timer_list
*timer
)
2645 struct qeth_qdio_out_q
*queue
= from_timer(queue
, timer
, timer
);
2647 napi_schedule(&queue
->napi
);
2648 QETH_TXQ_STAT_INC(queue
, completion_timer
);
2651 static int qeth_alloc_qdio_queues(struct qeth_card
*card
)
2655 QETH_CARD_TEXT(card
, 2, "allcqdbf");
2657 if (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
,
2658 QETH_QDIO_ALLOCATED
) != QETH_QDIO_UNINITIALIZED
)
2661 QETH_CARD_TEXT(card
, 2, "inq");
2662 card
->qdio
.in_q
= qeth_alloc_qdio_queue();
2663 if (!card
->qdio
.in_q
)
2666 /* inbound buffer pool */
2667 if (qeth_alloc_buffer_pool(card
))
2671 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
2672 struct qeth_qdio_out_q
*queue
;
2674 queue
= qeth_alloc_output_queue();
2677 QETH_CARD_TEXT_(card
, 2, "outq %i", i
);
2678 QETH_CARD_HEX(card
, 2, &queue
, sizeof(void *));
2679 card
->qdio
.out_qs
[i
] = queue
;
2681 queue
->queue_no
= i
;
2682 spin_lock_init(&queue
->lock
);
2683 timer_setup(&queue
->timer
, qeth_tx_completion_timer
, 0);
2684 queue
->coalesce_usecs
= QETH_TX_COALESCE_USECS
;
2685 queue
->max_coalesced_frames
= QETH_TX_MAX_COALESCED_FRAMES
;
2686 queue
->priority
= QETH_QIB_PQUE_PRIO_DEFAULT
;
2688 /* give outbound qeth_qdio_buffers their qdio_buffers */
2689 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
) {
2690 WARN_ON(queue
->bufs
[j
]);
2691 if (qeth_init_qdio_out_buf(queue
, j
))
2692 goto out_freeoutqbufs
;
2697 if (qeth_alloc_cq(card
))
2705 kmem_cache_free(qeth_qdio_outbuf_cache
,
2706 card
->qdio
.out_qs
[i
]->bufs
[j
]);
2707 card
->qdio
.out_qs
[i
]->bufs
[j
] = NULL
;
2711 qeth_free_output_queue(card
->qdio
.out_qs
[--i
]);
2712 card
->qdio
.out_qs
[i
] = NULL
;
2714 qeth_free_buffer_pool(card
);
2716 qeth_free_qdio_queue(card
->qdio
.in_q
);
2717 card
->qdio
.in_q
= NULL
;
2719 atomic_set(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
);
2723 static void qeth_free_qdio_queues(struct qeth_card
*card
)
2727 if (atomic_xchg(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
) ==
2728 QETH_QDIO_UNINITIALIZED
)
2732 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
) {
2733 if (card
->qdio
.in_q
->bufs
[j
].rx_skb
)
2734 dev_kfree_skb_any(card
->qdio
.in_q
->bufs
[j
].rx_skb
);
2736 qeth_free_qdio_queue(card
->qdio
.in_q
);
2737 card
->qdio
.in_q
= NULL
;
2738 /* inbound buffer pool */
2739 qeth_free_buffer_pool(card
);
2740 /* free outbound qdio_qs */
2741 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++) {
2742 qeth_free_output_queue(card
->qdio
.out_qs
[i
]);
2743 card
->qdio
.out_qs
[i
] = NULL
;
2747 static void qeth_fill_qib_parms(struct qeth_card
*card
,
2748 struct qeth_qib_parms
*parms
)
2750 struct qeth_qdio_out_q
*queue
;
2753 parms
->pcit_magic
[0] = 'P';
2754 parms
->pcit_magic
[1] = 'C';
2755 parms
->pcit_magic
[2] = 'I';
2756 parms
->pcit_magic
[3] = 'T';
2757 ASCEBC(parms
->pcit_magic
, sizeof(parms
->pcit_magic
));
2758 parms
->pcit_a
= QETH_PCI_THRESHOLD_A(card
);
2759 parms
->pcit_b
= QETH_PCI_THRESHOLD_B(card
);
2760 parms
->pcit_c
= QETH_PCI_TIMER_VALUE(card
);
2762 parms
->blkt_magic
[0] = 'B';
2763 parms
->blkt_magic
[1] = 'L';
2764 parms
->blkt_magic
[2] = 'K';
2765 parms
->blkt_magic
[3] = 'T';
2766 ASCEBC(parms
->blkt_magic
, sizeof(parms
->blkt_magic
));
2767 parms
->blkt_total
= card
->info
.blkt
.time_total
;
2768 parms
->blkt_inter_packet
= card
->info
.blkt
.inter_packet
;
2769 parms
->blkt_inter_packet_jumbo
= card
->info
.blkt
.inter_packet_jumbo
;
2771 /* Prio-queueing implicitly uses the default priorities: */
2772 if (qeth_uses_tx_prio_queueing(card
) || card
->qdio
.no_out_queues
== 1)
2775 parms
->pque_magic
[0] = 'P';
2776 parms
->pque_magic
[1] = 'Q';
2777 parms
->pque_magic
[2] = 'U';
2778 parms
->pque_magic
[3] = 'E';
2779 ASCEBC(parms
->pque_magic
, sizeof(parms
->pque_magic
));
2780 parms
->pque_order
= QETH_QIB_PQUE_ORDER_RR
;
2781 parms
->pque_units
= QETH_QIB_PQUE_UNITS_SBAL
;
2783 qeth_for_each_output_queue(card
, queue
, i
)
2784 parms
->pque_priority
[i
] = queue
->priority
;
2787 static int qeth_qdio_activate(struct qeth_card
*card
)
2789 QETH_CARD_TEXT(card
, 3, "qdioact");
2790 return qdio_activate(CARD_DDEV(card
));
2793 static int qeth_dm_act(struct qeth_card
*card
)
2795 struct qeth_cmd_buffer
*iob
;
2797 QETH_CARD_TEXT(card
, 2, "dmact");
2799 iob
= qeth_mpc_alloc_cmd(card
, DM_ACT
, DM_ACT_SIZE
);
2803 memcpy(QETH_DM_ACT_DEST_ADDR(iob
->data
),
2804 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2805 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob
->data
),
2806 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2807 return qeth_send_control_data(card
, iob
, NULL
, NULL
);
2810 static int qeth_mpc_initialize(struct qeth_card
*card
)
2814 QETH_CARD_TEXT(card
, 2, "mpcinit");
2816 rc
= qeth_issue_next_read(card
);
2818 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
2821 rc
= qeth_cm_enable(card
);
2823 QETH_CARD_TEXT_(card
, 2, "2err%d", rc
);
2826 rc
= qeth_cm_setup(card
);
2828 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
2831 rc
= qeth_ulp_enable(card
);
2833 QETH_CARD_TEXT_(card
, 2, "4err%d", rc
);
2836 rc
= qeth_ulp_setup(card
);
2838 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
2841 rc
= qeth_alloc_qdio_queues(card
);
2843 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
2846 rc
= qeth_qdio_establish(card
);
2848 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
2849 qeth_free_qdio_queues(card
);
2852 rc
= qeth_qdio_activate(card
);
2854 QETH_CARD_TEXT_(card
, 2, "7err%d", rc
);
2857 rc
= qeth_dm_act(card
);
2859 QETH_CARD_TEXT_(card
, 2, "8err%d", rc
);
2866 static void qeth_print_status_message(struct qeth_card
*card
)
2868 switch (card
->info
.type
) {
2869 case QETH_CARD_TYPE_OSD
:
2870 case QETH_CARD_TYPE_OSM
:
2871 case QETH_CARD_TYPE_OSX
:
2872 /* VM will use a non-zero first character
2873 * to indicate a HiperSockets like reporting
2874 * of the level OSA sets the first character to zero
2876 if (!card
->info
.mcl_level
[0]) {
2877 sprintf(card
->info
.mcl_level
, "%02x%02x",
2878 card
->info
.mcl_level
[2],
2879 card
->info
.mcl_level
[3]);
2883 case QETH_CARD_TYPE_IQD
:
2884 if (IS_VM_NIC(card
) || (card
->info
.mcl_level
[0] & 0x80)) {
2885 card
->info
.mcl_level
[0] = (char) _ebcasc
[(__u8
)
2886 card
->info
.mcl_level
[0]];
2887 card
->info
.mcl_level
[1] = (char) _ebcasc
[(__u8
)
2888 card
->info
.mcl_level
[1]];
2889 card
->info
.mcl_level
[2] = (char) _ebcasc
[(__u8
)
2890 card
->info
.mcl_level
[2]];
2891 card
->info
.mcl_level
[3] = (char) _ebcasc
[(__u8
)
2892 card
->info
.mcl_level
[3]];
2893 card
->info
.mcl_level
[QETH_MCL_LENGTH
] = 0;
2897 memset(&card
->info
.mcl_level
[0], 0, QETH_MCL_LENGTH
+ 1);
2899 dev_info(&card
->gdev
->dev
,
2900 "Device is a%s card%s%s%s\nwith link type %s.\n",
2901 qeth_get_cardname(card
),
2902 (card
->info
.mcl_level
[0]) ? " (level: " : "",
2903 (card
->info
.mcl_level
[0]) ? card
->info
.mcl_level
: "",
2904 (card
->info
.mcl_level
[0]) ? ")" : "",
2905 qeth_get_cardname_short(card
));
2908 static void qeth_initialize_working_pool_list(struct qeth_card
*card
)
2910 struct qeth_buffer_pool_entry
*entry
;
2912 QETH_CARD_TEXT(card
, 5, "inwrklst");
2914 list_for_each_entry(entry
,
2915 &card
->qdio
.init_pool
.entry_list
, init_list
) {
2916 qeth_put_buffer_pool_entry(card
, entry
);
2920 static struct qeth_buffer_pool_entry
*qeth_find_free_buffer_pool_entry(
2921 struct qeth_card
*card
)
2923 struct qeth_buffer_pool_entry
*entry
;
2926 if (list_empty(&card
->qdio
.in_buf_pool
.entry_list
))
2929 list_for_each_entry(entry
, &card
->qdio
.in_buf_pool
.entry_list
, list
) {
2931 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2932 if (page_count(entry
->elements
[i
]) > 1) {
2938 list_del_init(&entry
->list
);
2943 /* no free buffer in pool so take first one and swap pages */
2944 entry
= list_first_entry(&card
->qdio
.in_buf_pool
.entry_list
,
2945 struct qeth_buffer_pool_entry
, list
);
2946 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2947 if (page_count(entry
->elements
[i
]) > 1) {
2948 struct page
*page
= dev_alloc_page();
2953 __free_page(entry
->elements
[i
]);
2954 entry
->elements
[i
] = page
;
2955 QETH_CARD_STAT_INC(card
, rx_sg_alloc_page
);
2958 list_del_init(&entry
->list
);
2962 static int qeth_init_input_buffer(struct qeth_card
*card
,
2963 struct qeth_qdio_buffer
*buf
)
2965 struct qeth_buffer_pool_entry
*pool_entry
= buf
->pool_entry
;
2968 if ((card
->options
.cq
== QETH_CQ_ENABLED
) && (!buf
->rx_skb
)) {
2969 buf
->rx_skb
= netdev_alloc_skb(card
->dev
,
2971 sizeof(struct ipv6hdr
));
2977 pool_entry
= qeth_find_free_buffer_pool_entry(card
);
2981 buf
->pool_entry
= pool_entry
;
2985 * since the buffer is accessed only from the input_tasklet
2986 * there shouldn't be a need to synchronize; also, since we use
2987 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2990 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2991 buf
->buffer
->element
[i
].length
= PAGE_SIZE
;
2992 buf
->buffer
->element
[i
].addr
=
2993 page_to_phys(pool_entry
->elements
[i
]);
2994 if (i
== QETH_MAX_BUFFER_ELEMENTS(card
) - 1)
2995 buf
->buffer
->element
[i
].eflags
= SBAL_EFLAGS_LAST_ENTRY
;
2997 buf
->buffer
->element
[i
].eflags
= 0;
2998 buf
->buffer
->element
[i
].sflags
= 0;
3003 static unsigned int qeth_tx_select_bulk_max(struct qeth_card
*card
,
3004 struct qeth_qdio_out_q
*queue
)
3006 if (!IS_IQD(card
) ||
3007 qeth_iqd_is_mcast_queue(card
, queue
) ||
3008 card
->options
.cq
== QETH_CQ_ENABLED
||
3009 qdio_get_ssqd_desc(CARD_DDEV(card
), &card
->ssqd
))
3012 return card
->ssqd
.mmwc
? card
->ssqd
.mmwc
: 1;
3015 static int qeth_init_qdio_queues(struct qeth_card
*card
)
3017 unsigned int rx_bufs
= card
->qdio
.in_buf_pool
.buf_count
;
3021 QETH_CARD_TEXT(card
, 2, "initqdqs");
3024 qdio_reset_buffers(card
->qdio
.in_q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
3025 memset(&card
->rx
, 0, sizeof(struct qeth_rx
));
3027 qeth_initialize_working_pool_list(card
);
3028 /*give only as many buffers to hardware as we have buffer pool entries*/
3029 for (i
= 0; i
< rx_bufs
; i
++) {
3030 rc
= qeth_init_input_buffer(card
, &card
->qdio
.in_q
->bufs
[i
]);
3035 card
->qdio
.in_q
->next_buf_to_init
= QDIO_BUFNR(rx_bufs
);
3036 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, 0, 0, rx_bufs
);
3038 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
3043 rc
= qeth_cq_init(card
);
3048 /* outbound queue */
3049 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
3050 struct qeth_qdio_out_q
*queue
= card
->qdio
.out_qs
[i
];
3052 qdio_reset_buffers(queue
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
3053 queue
->max_elements
= QETH_MAX_BUFFER_ELEMENTS(card
);
3054 queue
->next_buf_to_fill
= 0;
3056 queue
->prev_hdr
= NULL
;
3057 queue
->coalesced_frames
= 0;
3058 queue
->bulk_start
= 0;
3059 queue
->bulk_count
= 0;
3060 queue
->bulk_max
= qeth_tx_select_bulk_max(card
, queue
);
3061 atomic_set(&queue
->used_buffers
, 0);
3062 atomic_set(&queue
->set_pci_flags_count
, 0);
3063 netdev_tx_reset_queue(netdev_get_tx_queue(card
->dev
, i
));
3068 static void qeth_ipa_finalize_cmd(struct qeth_card
*card
,
3069 struct qeth_cmd_buffer
*iob
)
3071 qeth_mpc_finalize_cmd(card
, iob
);
3073 /* override with IPA-specific values: */
3074 __ipa_cmd(iob
)->hdr
.seqno
= card
->seqno
.ipa
++;
3077 void qeth_prepare_ipa_cmd(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
,
3079 bool (*match
)(struct qeth_cmd_buffer
*iob
,
3080 struct qeth_cmd_buffer
*reply
))
3082 u8 prot_type
= qeth_mpc_select_prot_type(card
);
3083 u16 total_length
= iob
->length
;
3085 qeth_setup_ccw(__ccw_from_cmd(iob
), CCW_CMD_WRITE
, 0, total_length
,
3087 iob
->finalize
= qeth_ipa_finalize_cmd
;
3090 memcpy(iob
->data
, IPA_PDU_HEADER
, IPA_PDU_HEADER_SIZE
);
3091 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob
->data
), &total_length
, 2);
3092 memcpy(QETH_IPA_CMD_PROT_TYPE(iob
->data
), &prot_type
, 1);
3093 memcpy(QETH_IPA_PDU_LEN_PDU1(iob
->data
), &cmd_length
, 2);
3094 memcpy(QETH_IPA_PDU_LEN_PDU2(iob
->data
), &cmd_length
, 2);
3095 memcpy(QETH_IPA_CMD_DEST_ADDR(iob
->data
),
3096 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
3097 memcpy(QETH_IPA_PDU_LEN_PDU3(iob
->data
), &cmd_length
, 2);
3099 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd
);
3101 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer
*iob
,
3102 struct qeth_cmd_buffer
*reply
)
3104 struct qeth_ipa_cmd
*ipa_reply
= __ipa_reply(reply
);
3106 return ipa_reply
&& (__ipa_cmd(iob
)->hdr
.seqno
== ipa_reply
->hdr
.seqno
);
3109 struct qeth_cmd_buffer
*qeth_ipa_alloc_cmd(struct qeth_card
*card
,
3110 enum qeth_ipa_cmds cmd_code
,
3111 enum qeth_prot_versions prot
,
3112 unsigned int data_length
)
3114 struct qeth_cmd_buffer
*iob
;
3115 struct qeth_ipacmd_hdr
*hdr
;
3117 data_length
+= offsetof(struct qeth_ipa_cmd
, data
);
3118 iob
= qeth_alloc_cmd(&card
->write
, IPA_PDU_HEADER_SIZE
+ data_length
, 1,
3123 qeth_prepare_ipa_cmd(card
, iob
, data_length
, qeth_ipa_match_reply
);
3125 hdr
= &__ipa_cmd(iob
)->hdr
;
3126 hdr
->command
= cmd_code
;
3127 hdr
->initiator
= IPA_CMD_INITIATOR_HOST
;
3128 /* hdr->seqno is set by qeth_send_control_data() */
3129 hdr
->adapter_type
= QETH_LINK_TYPE_FAST_ETH
;
3130 hdr
->rel_adapter_no
= (u8
) card
->dev
->dev_port
;
3131 hdr
->prim_version_no
= IS_LAYER2(card
) ? 2 : 1;
3132 hdr
->param_count
= 1;
3133 hdr
->prot_version
= prot
;
3136 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd
);
3138 static int qeth_send_ipa_cmd_cb(struct qeth_card
*card
,
3139 struct qeth_reply
*reply
, unsigned long data
)
3141 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3143 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
3147 * qeth_send_ipa_cmd() - send an IPA command
3149 * See qeth_send_control_data() for explanation of the arguments.
3152 int qeth_send_ipa_cmd(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
,
3153 int (*reply_cb
)(struct qeth_card
*, struct qeth_reply
*,
3159 QETH_CARD_TEXT(card
, 4, "sendipa");
3161 if (card
->read_or_write_problem
) {
3166 if (reply_cb
== NULL
)
3167 reply_cb
= qeth_send_ipa_cmd_cb
;
3168 rc
= qeth_send_control_data(card
, iob
, reply_cb
, reply_param
);
3170 qeth_clear_ipacmd_list(card
);
3171 qeth_schedule_recovery(card
);
3175 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd
);
3177 static int qeth_send_startlan_cb(struct qeth_card
*card
,
3178 struct qeth_reply
*reply
, unsigned long data
)
3180 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3182 if (cmd
->hdr
.return_code
== IPA_RC_LAN_OFFLINE
)
3185 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
3188 static int qeth_send_startlan(struct qeth_card
*card
)
3190 struct qeth_cmd_buffer
*iob
;
3192 QETH_CARD_TEXT(card
, 2, "strtlan");
3194 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_STARTLAN
, QETH_PROT_NONE
, 0);
3197 return qeth_send_ipa_cmd(card
, iob
, qeth_send_startlan_cb
, NULL
);
3200 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd
*cmd
)
3202 if (!cmd
->hdr
.return_code
)
3203 cmd
->hdr
.return_code
=
3204 cmd
->data
.setadapterparms
.hdr
.return_code
;
3205 return cmd
->hdr
.return_code
;
3208 static int qeth_query_setadapterparms_cb(struct qeth_card
*card
,
3209 struct qeth_reply
*reply
, unsigned long data
)
3211 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3212 struct qeth_query_cmds_supp
*query_cmd
;
3214 QETH_CARD_TEXT(card
, 3, "quyadpcb");
3215 if (qeth_setadpparms_inspect_rc(cmd
))
3218 query_cmd
= &cmd
->data
.setadapterparms
.data
.query_cmds_supp
;
3219 if (query_cmd
->lan_type
& 0x7f) {
3220 if (!qeth_is_supported_link_type(card
, query_cmd
->lan_type
))
3221 return -EPROTONOSUPPORT
;
3223 card
->info
.link_type
= query_cmd
->lan_type
;
3224 QETH_CARD_TEXT_(card
, 2, "lnk %d", card
->info
.link_type
);
3227 card
->options
.adp
.supported
= query_cmd
->supported_cmds
;
3231 static struct qeth_cmd_buffer
*qeth_get_adapter_cmd(struct qeth_card
*card
,
3232 enum qeth_ipa_setadp_cmd adp_cmd
,
3233 unsigned int data_length
)
3235 struct qeth_ipacmd_setadpparms_hdr
*hdr
;
3236 struct qeth_cmd_buffer
*iob
;
3238 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SETADAPTERPARMS
, QETH_PROT_IPV4
,
3240 offsetof(struct qeth_ipacmd_setadpparms
,
3245 hdr
= &__ipa_cmd(iob
)->data
.setadapterparms
.hdr
;
3246 hdr
->cmdlength
= sizeof(*hdr
) + data_length
;
3247 hdr
->command_code
= adp_cmd
;
3248 hdr
->used_total
= 1;
3253 static int qeth_query_setadapterparms(struct qeth_card
*card
)
3256 struct qeth_cmd_buffer
*iob
;
3258 QETH_CARD_TEXT(card
, 3, "queryadp");
3259 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_COMMANDS_SUPPORTED
,
3260 SETADP_DATA_SIZEOF(query_cmds_supp
));
3263 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_query_setadapterparms_cb
, NULL
);
3267 static int qeth_query_ipassists_cb(struct qeth_card
*card
,
3268 struct qeth_reply
*reply
, unsigned long data
)
3270 struct qeth_ipa_cmd
*cmd
;
3272 QETH_CARD_TEXT(card
, 2, "qipasscb");
3274 cmd
= (struct qeth_ipa_cmd
*) data
;
3276 switch (cmd
->hdr
.return_code
) {
3277 case IPA_RC_SUCCESS
:
3279 case IPA_RC_NOTSUPP
:
3280 case IPA_RC_L2_UNSUPPORTED_CMD
:
3281 QETH_CARD_TEXT(card
, 2, "ipaunsup");
3282 card
->options
.ipa4
.supported
|= IPA_SETADAPTERPARMS
;
3283 card
->options
.ipa6
.supported
|= IPA_SETADAPTERPARMS
;
3286 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3287 CARD_DEVID(card
), cmd
->hdr
.return_code
);
3291 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
3292 card
->options
.ipa4
= cmd
->hdr
.assists
;
3293 else if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
3294 card
->options
.ipa6
= cmd
->hdr
.assists
;
3296 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3301 static int qeth_query_ipassists(struct qeth_card
*card
,
3302 enum qeth_prot_versions prot
)
3305 struct qeth_cmd_buffer
*iob
;
3307 QETH_CARD_TEXT_(card
, 2, "qipassi%i", prot
);
3308 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_QIPASSIST
, prot
, 0);
3311 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_query_ipassists_cb
, NULL
);
3315 static int qeth_query_switch_attributes_cb(struct qeth_card
*card
,
3316 struct qeth_reply
*reply
, unsigned long data
)
3318 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3319 struct qeth_query_switch_attributes
*attrs
;
3320 struct qeth_switch_info
*sw_info
;
3322 QETH_CARD_TEXT(card
, 2, "qswiatcb");
3323 if (qeth_setadpparms_inspect_rc(cmd
))
3326 sw_info
= (struct qeth_switch_info
*)reply
->param
;
3327 attrs
= &cmd
->data
.setadapterparms
.data
.query_switch_attributes
;
3328 sw_info
->capabilities
= attrs
->capabilities
;
3329 sw_info
->settings
= attrs
->settings
;
3330 QETH_CARD_TEXT_(card
, 2, "%04x%04x", sw_info
->capabilities
,
3335 int qeth_query_switch_attributes(struct qeth_card
*card
,
3336 struct qeth_switch_info
*sw_info
)
3338 struct qeth_cmd_buffer
*iob
;
3340 QETH_CARD_TEXT(card
, 2, "qswiattr");
3341 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES
))
3343 if (!netif_carrier_ok(card
->dev
))
3345 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES
, 0);
3348 return qeth_send_ipa_cmd(card
, iob
,
3349 qeth_query_switch_attributes_cb
, sw_info
);
3352 struct qeth_cmd_buffer
*qeth_get_diag_cmd(struct qeth_card
*card
,
3353 enum qeth_diags_cmds sub_cmd
,
3354 unsigned int data_length
)
3356 struct qeth_ipacmd_diagass
*cmd
;
3357 struct qeth_cmd_buffer
*iob
;
3359 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SET_DIAG_ASS
, QETH_PROT_NONE
,
3360 DIAG_HDR_LEN
+ data_length
);
3364 cmd
= &__ipa_cmd(iob
)->data
.diagass
;
3365 cmd
->subcmd_len
= DIAG_SUB_HDR_LEN
+ data_length
;
3366 cmd
->subcmd
= sub_cmd
;
3369 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd
);
3371 static int qeth_query_setdiagass_cb(struct qeth_card
*card
,
3372 struct qeth_reply
*reply
, unsigned long data
)
3374 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3375 u16 rc
= cmd
->hdr
.return_code
;
3378 QETH_CARD_TEXT_(card
, 2, "diagq:%x", rc
);
3382 card
->info
.diagass_support
= cmd
->data
.diagass
.ext
;
3386 static int qeth_query_setdiagass(struct qeth_card
*card
)
3388 struct qeth_cmd_buffer
*iob
;
3390 QETH_CARD_TEXT(card
, 2, "qdiagass");
3391 iob
= qeth_get_diag_cmd(card
, QETH_DIAGS_CMD_QUERY
, 0);
3394 return qeth_send_ipa_cmd(card
, iob
, qeth_query_setdiagass_cb
, NULL
);
3397 static void qeth_get_trap_id(struct qeth_card
*card
, struct qeth_trap_id
*tid
)
3399 unsigned long info
= get_zeroed_page(GFP_KERNEL
);
3400 struct sysinfo_2_2_2
*info222
= (struct sysinfo_2_2_2
*)info
;
3401 struct sysinfo_3_2_2
*info322
= (struct sysinfo_3_2_2
*)info
;
3402 struct ccw_dev_id ccwid
;
3405 tid
->chpid
= card
->info
.chpid
;
3406 ccw_device_get_id(CARD_RDEV(card
), &ccwid
);
3407 tid
->ssid
= ccwid
.ssid
;
3408 tid
->devno
= ccwid
.devno
;
3411 level
= stsi(NULL
, 0, 0, 0);
3412 if ((level
>= 2) && (stsi(info222
, 2, 2, 2) == 0))
3413 tid
->lparnr
= info222
->lpar_number
;
3414 if ((level
>= 3) && (stsi(info322
, 3, 2, 2) == 0)) {
3415 EBCASC(info322
->vm
[0].name
, sizeof(info322
->vm
[0].name
));
3416 memcpy(tid
->vmname
, info322
->vm
[0].name
, sizeof(tid
->vmname
));
3421 static int qeth_hw_trap_cb(struct qeth_card
*card
,
3422 struct qeth_reply
*reply
, unsigned long data
)
3424 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3425 u16 rc
= cmd
->hdr
.return_code
;
3428 QETH_CARD_TEXT_(card
, 2, "trapc:%x", rc
);
3434 int qeth_hw_trap(struct qeth_card
*card
, enum qeth_diags_trap_action action
)
3436 struct qeth_cmd_buffer
*iob
;
3437 struct qeth_ipa_cmd
*cmd
;
3439 QETH_CARD_TEXT(card
, 2, "diagtrap");
3440 iob
= qeth_get_diag_cmd(card
, QETH_DIAGS_CMD_TRAP
, 64);
3443 cmd
= __ipa_cmd(iob
);
3444 cmd
->data
.diagass
.type
= 1;
3445 cmd
->data
.diagass
.action
= action
;
3447 case QETH_DIAGS_TRAP_ARM
:
3448 cmd
->data
.diagass
.options
= 0x0003;
3449 cmd
->data
.diagass
.ext
= 0x00010000 +
3450 sizeof(struct qeth_trap_id
);
3451 qeth_get_trap_id(card
,
3452 (struct qeth_trap_id
*)cmd
->data
.diagass
.cdata
);
3454 case QETH_DIAGS_TRAP_DISARM
:
3455 cmd
->data
.diagass
.options
= 0x0001;
3457 case QETH_DIAGS_TRAP_CAPTURE
:
3460 return qeth_send_ipa_cmd(card
, iob
, qeth_hw_trap_cb
, NULL
);
3463 static int qeth_check_qdio_errors(struct qeth_card
*card
,
3464 struct qdio_buffer
*buf
,
3465 unsigned int qdio_error
,
3466 const char *dbftext
)
3469 QETH_CARD_TEXT(card
, 2, dbftext
);
3470 QETH_CARD_TEXT_(card
, 2, " F15=%02X",
3471 buf
->element
[15].sflags
);
3472 QETH_CARD_TEXT_(card
, 2, " F14=%02X",
3473 buf
->element
[14].sflags
);
3474 QETH_CARD_TEXT_(card
, 2, " qerr=%X", qdio_error
);
3475 if ((buf
->element
[15].sflags
) == 0x12) {
3476 QETH_CARD_STAT_INC(card
, rx_fifo_errors
);
3484 static unsigned int qeth_rx_refill_queue(struct qeth_card
*card
,
3487 struct qeth_qdio_q
*queue
= card
->qdio
.in_q
;
3488 struct list_head
*lh
;
3493 /* only requeue at a certain threshold to avoid SIGAs */
3494 if (count
>= QETH_IN_BUF_REQUEUE_THRESHOLD(card
)) {
3495 for (i
= queue
->next_buf_to_init
;
3496 i
< queue
->next_buf_to_init
+ count
; ++i
) {
3497 if (qeth_init_input_buffer(card
,
3498 &queue
->bufs
[QDIO_BUFNR(i
)])) {
3505 if (newcount
< count
) {
3506 /* we are in memory shortage so we switch back to
3507 traditional skb allocation and drop packages */
3508 atomic_set(&card
->force_alloc_skb
, 3);
3511 atomic_add_unless(&card
->force_alloc_skb
, -1, 0);
3516 list_for_each(lh
, &card
->qdio
.in_buf_pool
.entry_list
)
3518 if (i
== card
->qdio
.in_buf_pool
.buf_count
) {
3519 QETH_CARD_TEXT(card
, 2, "qsarbw");
3520 schedule_delayed_work(
3521 &card
->buffer_reclaim_work
,
3522 QETH_RECLAIM_WORK_TIME
);
3527 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, 0,
3528 queue
->next_buf_to_init
, count
);
3530 QETH_CARD_TEXT(card
, 2, "qinberr");
3532 queue
->next_buf_to_init
= QDIO_BUFNR(queue
->next_buf_to_init
+
3540 static void qeth_buffer_reclaim_work(struct work_struct
*work
)
3542 struct qeth_card
*card
= container_of(to_delayed_work(work
),
3544 buffer_reclaim_work
);
3547 napi_schedule(&card
->napi
);
3548 /* kick-start the NAPI softirq: */
3552 static void qeth_handle_send_error(struct qeth_card
*card
,
3553 struct qeth_qdio_out_buffer
*buffer
, unsigned int qdio_err
)
3555 int sbalf15
= buffer
->buffer
->element
[15].sflags
;
3557 QETH_CARD_TEXT(card
, 6, "hdsnderr");
3558 qeth_check_qdio_errors(card
, buffer
->buffer
, qdio_err
, "qouterr");
3563 if ((sbalf15
>= 15) && (sbalf15
<= 31))
3566 QETH_CARD_TEXT(card
, 1, "lnkfail");
3567 QETH_CARD_TEXT_(card
, 1, "%04x %02x",
3568 (u16
)qdio_err
, (u8
)sbalf15
);
3572 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3573 * @queue: queue to check for packing buffer
3575 * Returns number of buffers that were prepared for flush.
3577 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q
*queue
)
3579 struct qeth_qdio_out_buffer
*buffer
;
3581 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
3582 if ((atomic_read(&buffer
->state
) == QETH_QDIO_BUF_EMPTY
) &&
3583 (buffer
->next_element_to_fill
> 0)) {
3584 /* it's a packing buffer */
3585 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
3586 queue
->next_buf_to_fill
=
3587 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
3594 * Switched to packing state if the number of used buffers on a queue
3595 * reaches a certain limit.
3597 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q
*queue
)
3599 if (!queue
->do_pack
) {
3600 if (atomic_read(&queue
->used_buffers
)
3601 >= QETH_HIGH_WATERMARK_PACK
){
3602 /* switch non-PACKING -> PACKING */
3603 QETH_CARD_TEXT(queue
->card
, 6, "np->pack");
3604 QETH_TXQ_STAT_INC(queue
, packing_mode_switch
);
3611 * Switches from packing to non-packing mode. If there is a packing
3612 * buffer on the queue this buffer will be prepared to be flushed.
3613 * In that case 1 is returned to inform the caller. If no buffer
3614 * has to be flushed, zero is returned.
3616 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q
*queue
)
3618 if (queue
->do_pack
) {
3619 if (atomic_read(&queue
->used_buffers
)
3620 <= QETH_LOW_WATERMARK_PACK
) {
3621 /* switch PACKING -> non-PACKING */
3622 QETH_CARD_TEXT(queue
->card
, 6, "pack->np");
3623 QETH_TXQ_STAT_INC(queue
, packing_mode_switch
);
3625 return qeth_prep_flush_pack_buffer(queue
);
3631 static void qeth_flush_buffers(struct qeth_qdio_out_q
*queue
, int index
,
3634 struct qeth_qdio_out_buffer
*buf
= queue
->bufs
[index
];
3635 unsigned int qdio_flags
= QDIO_FLAG_SYNC_OUTPUT
;
3636 struct qeth_card
*card
= queue
->card
;
3640 for (i
= index
; i
< index
+ count
; ++i
) {
3641 unsigned int bidx
= QDIO_BUFNR(i
);
3642 struct sk_buff
*skb
;
3644 buf
= queue
->bufs
[bidx
];
3645 buf
->buffer
->element
[buf
->next_element_to_fill
- 1].eflags
|=
3646 SBAL_EFLAGS_LAST_ENTRY
;
3647 queue
->coalesced_frames
+= buf
->frames
;
3649 if (queue
->bufstates
)
3650 queue
->bufstates
[bidx
].user
= buf
;
3653 skb_queue_walk(&buf
->skb_list
, skb
)
3654 skb_tx_timestamp(skb
);
3658 if (!IS_IQD(card
)) {
3659 if (!queue
->do_pack
) {
3660 if ((atomic_read(&queue
->used_buffers
) >=
3661 (QETH_HIGH_WATERMARK_PACK
-
3662 QETH_WATERMARK_PACK_FUZZ
)) &&
3663 !atomic_read(&queue
->set_pci_flags_count
)) {
3664 /* it's likely that we'll go to packing
3666 atomic_inc(&queue
->set_pci_flags_count
);
3667 buf
->buffer
->element
[0].sflags
|= SBAL_SFLAGS0_PCI_REQ
;
3670 if (!atomic_read(&queue
->set_pci_flags_count
)) {
3672 * there's no outstanding PCI any more, so we
3673 * have to request a PCI to be sure the the PCI
3674 * will wake at some time in the future then we
3675 * can flush packed buffers that might still be
3676 * hanging around, which can happen if no
3677 * further send was requested by the stack
3679 atomic_inc(&queue
->set_pci_flags_count
);
3680 buf
->buffer
->element
[0].sflags
|= SBAL_SFLAGS0_PCI_REQ
;
3684 if (atomic_read(&queue
->set_pci_flags_count
))
3685 qdio_flags
|= QDIO_FLAG_PCI_OUT
;
3688 QETH_TXQ_STAT_INC(queue
, doorbell
);
3689 rc
= do_QDIO(CARD_DDEV(queue
->card
), qdio_flags
,
3690 queue
->queue_no
, index
, count
);
3692 /* Fake the TX completion interrupt: */
3694 unsigned int frames
= READ_ONCE(queue
->max_coalesced_frames
);
3695 unsigned int usecs
= READ_ONCE(queue
->coalesce_usecs
);
3697 if (frames
&& queue
->coalesced_frames
>= frames
) {
3698 napi_schedule(&queue
->napi
);
3699 queue
->coalesced_frames
= 0;
3700 QETH_TXQ_STAT_INC(queue
, coal_frames
);
3702 qeth_tx_arm_timer(queue
, usecs
);
3707 /* ignore temporary SIGA errors without busy condition */
3710 QETH_CARD_TEXT(queue
->card
, 2, "flushbuf");
3711 QETH_CARD_TEXT_(queue
->card
, 2, " q%d", queue
->queue_no
);
3712 QETH_CARD_TEXT_(queue
->card
, 2, " idx%d", index
);
3713 QETH_CARD_TEXT_(queue
->card
, 2, " c%d", count
);
3714 QETH_CARD_TEXT_(queue
->card
, 2, " err%d", rc
);
3716 /* this must not happen under normal circumstances. if it
3717 * happens something is really wrong -> recover */
3718 qeth_schedule_recovery(queue
->card
);
3723 static void qeth_flush_queue(struct qeth_qdio_out_q
*queue
)
3725 qeth_flush_buffers(queue
, queue
->bulk_start
, queue
->bulk_count
);
3727 queue
->bulk_start
= QDIO_BUFNR(queue
->bulk_start
+ queue
->bulk_count
);
3728 queue
->prev_hdr
= NULL
;
3729 queue
->bulk_count
= 0;
3732 static void qeth_check_outbound_queue(struct qeth_qdio_out_q
*queue
)
3735 * check if weed have to switch to non-packing mode or if
3736 * we have to get a pci flag out on the queue
3738 if ((atomic_read(&queue
->used_buffers
) <= QETH_LOW_WATERMARK_PACK
) ||
3739 !atomic_read(&queue
->set_pci_flags_count
)) {
3740 unsigned int index
, flush_cnt
;
3743 spin_lock(&queue
->lock
);
3745 index
= queue
->next_buf_to_fill
;
3746 q_was_packing
= queue
->do_pack
;
3748 flush_cnt
= qeth_switch_to_nonpacking_if_needed(queue
);
3749 if (!flush_cnt
&& !atomic_read(&queue
->set_pci_flags_count
))
3750 flush_cnt
= qeth_prep_flush_pack_buffer(queue
);
3753 qeth_flush_buffers(queue
, index
, flush_cnt
);
3755 QETH_TXQ_STAT_ADD(queue
, bufs_pack
, flush_cnt
);
3758 spin_unlock(&queue
->lock
);
3762 static void qeth_qdio_poll(struct ccw_device
*cdev
, unsigned long card_ptr
)
3764 struct qeth_card
*card
= (struct qeth_card
*)card_ptr
;
3766 napi_schedule_irqoff(&card
->napi
);
3769 int qeth_configure_cq(struct qeth_card
*card
, enum qeth_cq cq
)
3773 if (card
->options
.cq
== QETH_CQ_NOTAVAILABLE
) {
3777 if (card
->options
.cq
== cq
) {
3782 qeth_free_qdio_queues(card
);
3783 card
->options
.cq
= cq
;
3790 EXPORT_SYMBOL_GPL(qeth_configure_cq
);
3792 static void qeth_qdio_cq_handler(struct qeth_card
*card
, unsigned int qdio_err
,
3793 unsigned int queue
, int first_element
,
3796 struct qeth_qdio_q
*cq
= card
->qdio
.c_q
;
3800 QETH_CARD_TEXT_(card
, 5, "qcqhe%d", first_element
);
3801 QETH_CARD_TEXT_(card
, 5, "qcqhc%d", count
);
3802 QETH_CARD_TEXT_(card
, 5, "qcqherr%d", qdio_err
);
3805 netif_tx_stop_all_queues(card
->dev
);
3806 qeth_schedule_recovery(card
);
3810 for (i
= first_element
; i
< first_element
+ count
; ++i
) {
3811 struct qdio_buffer
*buffer
= cq
->qdio_bufs
[QDIO_BUFNR(i
)];
3814 while ((e
< QDIO_MAX_ELEMENTS_PER_BUFFER
) &&
3815 buffer
->element
[e
].addr
) {
3816 unsigned long phys_aob_addr
= buffer
->element
[e
].addr
;
3818 qeth_qdio_handle_aob(card
, phys_aob_addr
);
3821 qeth_scrub_qdio_buffer(buffer
, QDIO_MAX_ELEMENTS_PER_BUFFER
);
3823 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, queue
,
3824 card
->qdio
.c_q
->next_buf_to_init
,
3827 dev_warn(&card
->gdev
->dev
,
3828 "QDIO reported an error, rc=%i\n", rc
);
3829 QETH_CARD_TEXT(card
, 2, "qcqherr");
3832 cq
->next_buf_to_init
= QDIO_BUFNR(cq
->next_buf_to_init
+ count
);
3835 static void qeth_qdio_input_handler(struct ccw_device
*ccwdev
,
3836 unsigned int qdio_err
, int queue
,
3837 int first_elem
, int count
,
3838 unsigned long card_ptr
)
3840 struct qeth_card
*card
= (struct qeth_card
*)card_ptr
;
3842 QETH_CARD_TEXT_(card
, 2, "qihq%d", queue
);
3843 QETH_CARD_TEXT_(card
, 2, "qiec%d", qdio_err
);
3846 qeth_schedule_recovery(card
);
3849 static void qeth_qdio_output_handler(struct ccw_device
*ccwdev
,
3850 unsigned int qdio_error
, int __queue
,
3851 int first_element
, int count
,
3852 unsigned long card_ptr
)
3854 struct qeth_card
*card
= (struct qeth_card
*) card_ptr
;
3855 struct qeth_qdio_out_q
*queue
= card
->qdio
.out_qs
[__queue
];
3856 struct net_device
*dev
= card
->dev
;
3857 struct netdev_queue
*txq
;
3860 QETH_CARD_TEXT(card
, 6, "qdouhdl");
3861 if (qdio_error
& QDIO_ERROR_FATAL
) {
3862 QETH_CARD_TEXT(card
, 2, "achkcond");
3863 netif_tx_stop_all_queues(dev
);
3864 qeth_schedule_recovery(card
);
3868 for (i
= first_element
; i
< (first_element
+ count
); ++i
) {
3869 struct qeth_qdio_out_buffer
*buf
= queue
->bufs
[QDIO_BUFNR(i
)];
3871 qeth_handle_send_error(card
, buf
, qdio_error
);
3872 qeth_clear_output_buffer(queue
, buf
, qdio_error
, 0);
3875 atomic_sub(count
, &queue
->used_buffers
);
3876 qeth_check_outbound_queue(queue
);
3878 txq
= netdev_get_tx_queue(dev
, __queue
);
3879 /* xmit may have observed the full-condition, but not yet stopped the
3880 * txq. In which case the code below won't trigger. So before returning,
3881 * xmit will re-check the txq's fill level and wake it up if needed.
3883 if (netif_tx_queue_stopped(txq
) && !qeth_out_queue_is_full(queue
))
3884 netif_tx_wake_queue(txq
);
3888 * Note: Function assumes that we have 4 outbound queues.
3890 int qeth_get_priority_queue(struct qeth_card
*card
, struct sk_buff
*skb
)
3892 struct vlan_ethhdr
*veth
= vlan_eth_hdr(skb
);
3895 switch (card
->qdio
.do_prio_queueing
) {
3896 case QETH_PRIO_Q_ING_TOS
:
3897 case QETH_PRIO_Q_ING_PREC
:
3898 switch (qeth_get_ip_version(skb
)) {
3900 tos
= ipv4_get_dsfield(ip_hdr(skb
));
3903 tos
= ipv6_get_dsfield(ipv6_hdr(skb
));
3906 return card
->qdio
.default_out_queue
;
3908 if (card
->qdio
.do_prio_queueing
== QETH_PRIO_Q_ING_PREC
)
3909 return ~tos
>> 6 & 3;
3910 if (tos
& IPTOS_MINCOST
)
3912 if (tos
& IPTOS_RELIABILITY
)
3914 if (tos
& IPTOS_THROUGHPUT
)
3916 if (tos
& IPTOS_LOWDELAY
)
3919 case QETH_PRIO_Q_ING_SKB
:
3920 if (skb
->priority
> 5)
3922 return ~skb
->priority
>> 1 & 3;
3923 case QETH_PRIO_Q_ING_VLAN
:
3924 if (veth
->h_vlan_proto
== htons(ETH_P_8021Q
))
3925 return ~ntohs(veth
->h_vlan_TCI
) >>
3926 (VLAN_PRIO_SHIFT
+ 1) & 3;
3928 case QETH_PRIO_Q_ING_FIXED
:
3929 return card
->qdio
.default_out_queue
;
3933 return card
->qdio
.default_out_queue
;
3935 EXPORT_SYMBOL_GPL(qeth_get_priority_queue
);
3938 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3941 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3942 * fragmented part of the SKB. Returns zero for linear SKB.
3944 static int qeth_get_elements_for_frags(struct sk_buff
*skb
)
3946 int cnt
, elements
= 0;
3948 for (cnt
= 0; cnt
< skb_shinfo(skb
)->nr_frags
; cnt
++) {
3949 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[cnt
];
3951 elements
+= qeth_get_elements_for_range(
3952 (addr_t
)skb_frag_address(frag
),
3953 (addr_t
)skb_frag_address(frag
) + skb_frag_size(frag
));
3959 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3960 * to transmit an skb.
3961 * @skb: the skb to operate on.
3962 * @data_offset: skip this part of the skb's linear data
3964 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3965 * skb's data (both its linear part and paged fragments).
3967 unsigned int qeth_count_elements(struct sk_buff
*skb
, unsigned int data_offset
)
3969 unsigned int elements
= qeth_get_elements_for_frags(skb
);
3970 addr_t end
= (addr_t
)skb
->data
+ skb_headlen(skb
);
3971 addr_t start
= (addr_t
)skb
->data
+ data_offset
;
3974 elements
+= qeth_get_elements_for_range(start
, end
);
3977 EXPORT_SYMBOL_GPL(qeth_count_elements
);
3979 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3983 * qeth_add_hw_header() - add a HW header to an skb.
3984 * @skb: skb that the HW header should be added to.
3985 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3986 * it contains a valid pointer to a qeth_hdr.
3987 * @hdr_len: length of the HW header.
3988 * @proto_len: length of protocol headers that need to be in same page as the
3991 * Returns the pushed length. If the header can't be pushed on
3992 * (eg. because it would cross a page boundary), it is allocated from
3993 * the cache instead and 0 is returned.
3994 * The number of needed buffer elements is returned in @elements.
3995 * Error to create the hdr is indicated by returning with < 0.
3997 static int qeth_add_hw_header(struct qeth_qdio_out_q
*queue
,
3998 struct sk_buff
*skb
, struct qeth_hdr
**hdr
,
3999 unsigned int hdr_len
, unsigned int proto_len
,
4000 unsigned int *elements
)
4002 gfp_t gfp
= GFP_ATOMIC
| (skb_pfmemalloc(skb
) ? __GFP_MEMALLOC
: 0);
4003 const unsigned int contiguous
= proto_len
? proto_len
: 1;
4004 const unsigned int max_elements
= queue
->max_elements
;
4005 unsigned int __elements
;
4011 start
= (addr_t
)skb
->data
- hdr_len
;
4012 end
= (addr_t
)skb
->data
;
4014 if (qeth_get_elements_for_range(start
, end
+ contiguous
) == 1) {
4015 /* Push HW header into same page as first protocol header. */
4017 /* ... but TSO always needs a separate element for headers: */
4018 if (skb_is_gso(skb
))
4019 __elements
= 1 + qeth_count_elements(skb
, proto_len
);
4021 __elements
= qeth_count_elements(skb
, 0);
4022 } else if (!proto_len
&& PAGE_ALIGNED(skb
->data
)) {
4023 /* Push HW header into preceding page, flush with skb->data. */
4025 __elements
= 1 + qeth_count_elements(skb
, 0);
4027 /* Use header cache, copy protocol headers up. */
4029 __elements
= 1 + qeth_count_elements(skb
, proto_len
);
4032 /* Compress skb to fit into one IO buffer: */
4033 if (__elements
> max_elements
) {
4034 if (!skb_is_nonlinear(skb
)) {
4035 /* Drop it, no easy way of shrinking it further. */
4036 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
4037 max_elements
, __elements
, skb
->len
);
4041 rc
= skb_linearize(skb
);
4043 QETH_TXQ_STAT_INC(queue
, skbs_linearized_fail
);
4047 QETH_TXQ_STAT_INC(queue
, skbs_linearized
);
4048 /* Linearization changed the layout, re-evaluate: */
4052 *elements
= __elements
;
4053 /* Add the header: */
4055 *hdr
= skb_push(skb
, hdr_len
);
4059 /* Fall back to cache element with known-good alignment: */
4060 if (hdr_len
+ proto_len
> QETH_HDR_CACHE_OBJ_SIZE
)
4062 *hdr
= kmem_cache_alloc(qeth_core_header_cache
, gfp
);
4065 /* Copy protocol headers behind HW header: */
4066 skb_copy_from_linear_data(skb
, ((char *)*hdr
) + hdr_len
, proto_len
);
4070 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q
*queue
,
4071 struct sk_buff
*curr_skb
,
4072 struct qeth_hdr
*curr_hdr
)
4074 struct qeth_qdio_out_buffer
*buffer
= queue
->bufs
[queue
->bulk_start
];
4075 struct qeth_hdr
*prev_hdr
= queue
->prev_hdr
;
4080 /* All packets must have the same target: */
4081 if (curr_hdr
->hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
) {
4082 struct sk_buff
*prev_skb
= skb_peek(&buffer
->skb_list
);
4084 return ether_addr_equal(eth_hdr(prev_skb
)->h_dest
,
4085 eth_hdr(curr_skb
)->h_dest
) &&
4086 qeth_l2_same_vlan(&prev_hdr
->hdr
.l2
, &curr_hdr
->hdr
.l2
);
4089 return qeth_l3_same_next_hop(&prev_hdr
->hdr
.l3
, &curr_hdr
->hdr
.l3
) &&
4090 qeth_l3_iqd_same_vlan(&prev_hdr
->hdr
.l3
, &curr_hdr
->hdr
.l3
);
4094 * qeth_fill_buffer() - map skb into an output buffer
4095 * @buf: buffer to transport the skb
4096 * @skb: skb to map into the buffer
4097 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
4098 * from qeth_core_header_cache.
4099 * @offset: when mapping the skb, start at skb->data + offset
4100 * @hd_len: if > 0, build a dedicated header element of this size
4102 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer
*buf
,
4103 struct sk_buff
*skb
, struct qeth_hdr
*hdr
,
4104 unsigned int offset
, unsigned int hd_len
)
4106 struct qdio_buffer
*buffer
= buf
->buffer
;
4107 int element
= buf
->next_element_to_fill
;
4108 int length
= skb_headlen(skb
) - offset
;
4109 char *data
= skb
->data
+ offset
;
4110 unsigned int elem_length
, cnt
;
4111 bool is_first_elem
= true;
4113 __skb_queue_tail(&buf
->skb_list
, skb
);
4115 /* build dedicated element for HW Header */
4117 is_first_elem
= false;
4119 buffer
->element
[element
].addr
= virt_to_phys(hdr
);
4120 buffer
->element
[element
].length
= hd_len
;
4121 buffer
->element
[element
].eflags
= SBAL_EFLAGS_FIRST_FRAG
;
4123 /* HW header is allocated from cache: */
4124 if ((void *)hdr
!= skb
->data
)
4125 buf
->is_header
[element
] = 1;
4126 /* HW header was pushed and is contiguous with linear part: */
4127 else if (length
> 0 && !PAGE_ALIGNED(data
) &&
4128 (data
== (char *)hdr
+ hd_len
))
4129 buffer
->element
[element
].eflags
|=
4130 SBAL_EFLAGS_CONTIGUOUS
;
4135 /* map linear part into buffer element(s) */
4136 while (length
> 0) {
4137 elem_length
= min_t(unsigned int, length
,
4138 PAGE_SIZE
- offset_in_page(data
));
4140 buffer
->element
[element
].addr
= virt_to_phys(data
);
4141 buffer
->element
[element
].length
= elem_length
;
4142 length
-= elem_length
;
4143 if (is_first_elem
) {
4144 is_first_elem
= false;
4145 if (length
|| skb_is_nonlinear(skb
))
4146 /* skb needs additional elements */
4147 buffer
->element
[element
].eflags
=
4148 SBAL_EFLAGS_FIRST_FRAG
;
4150 buffer
->element
[element
].eflags
= 0;
4152 buffer
->element
[element
].eflags
=
4153 SBAL_EFLAGS_MIDDLE_FRAG
;
4156 data
+= elem_length
;
4160 /* map page frags into buffer element(s) */
4161 for (cnt
= 0; cnt
< skb_shinfo(skb
)->nr_frags
; cnt
++) {
4162 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[cnt
];
4164 data
= skb_frag_address(frag
);
4165 length
= skb_frag_size(frag
);
4166 while (length
> 0) {
4167 elem_length
= min_t(unsigned int, length
,
4168 PAGE_SIZE
- offset_in_page(data
));
4170 buffer
->element
[element
].addr
= virt_to_phys(data
);
4171 buffer
->element
[element
].length
= elem_length
;
4172 buffer
->element
[element
].eflags
=
4173 SBAL_EFLAGS_MIDDLE_FRAG
;
4175 length
-= elem_length
;
4176 data
+= elem_length
;
4181 if (buffer
->element
[element
- 1].eflags
)
4182 buffer
->element
[element
- 1].eflags
= SBAL_EFLAGS_LAST_FRAG
;
4183 buf
->next_element_to_fill
= element
;
4187 static int __qeth_xmit(struct qeth_card
*card
, struct qeth_qdio_out_q
*queue
,
4188 struct sk_buff
*skb
, unsigned int elements
,
4189 struct qeth_hdr
*hdr
, unsigned int offset
,
4190 unsigned int hd_len
)
4192 unsigned int bytes
= qdisc_pkt_len(skb
);
4193 struct qeth_qdio_out_buffer
*buffer
;
4194 unsigned int next_element
;
4195 struct netdev_queue
*txq
;
4196 bool stopped
= false;
4199 buffer
= queue
->bufs
[QDIO_BUFNR(queue
->bulk_start
+ queue
->bulk_count
)];
4200 txq
= netdev_get_tx_queue(card
->dev
, skb_get_queue_mapping(skb
));
4202 /* Just a sanity check, the wake/stop logic should ensure that we always
4203 * get a free buffer.
4205 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
4208 flush
= !qeth_iqd_may_bulk(queue
, skb
, hdr
);
4211 (buffer
->next_element_to_fill
+ elements
> queue
->max_elements
)) {
4212 if (buffer
->next_element_to_fill
> 0) {
4213 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4214 queue
->bulk_count
++;
4217 if (queue
->bulk_count
>= queue
->bulk_max
)
4221 qeth_flush_queue(queue
);
4223 buffer
= queue
->bufs
[QDIO_BUFNR(queue
->bulk_start
+
4224 queue
->bulk_count
)];
4226 /* Sanity-check again: */
4227 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
4231 if (buffer
->next_element_to_fill
== 0 &&
4232 atomic_inc_return(&queue
->used_buffers
) >= QDIO_MAX_BUFFERS_PER_Q
) {
4233 /* If a TX completion happens right _here_ and misses to wake
4234 * the txq, then our re-check below will catch the race.
4236 QETH_TXQ_STAT_INC(queue
, stopped
);
4237 netif_tx_stop_queue(txq
);
4241 next_element
= qeth_fill_buffer(buffer
, skb
, hdr
, offset
, hd_len
);
4242 buffer
->bytes
+= bytes
;
4243 buffer
->frames
+= skb_is_gso(skb
) ? skb_shinfo(skb
)->gso_segs
: 1;
4244 queue
->prev_hdr
= hdr
;
4246 flush
= __netdev_tx_sent_queue(txq
, bytes
,
4247 !stopped
&& netdev_xmit_more());
4249 if (flush
|| next_element
>= queue
->max_elements
) {
4250 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4251 queue
->bulk_count
++;
4253 if (queue
->bulk_count
>= queue
->bulk_max
)
4257 qeth_flush_queue(queue
);
4260 if (stopped
&& !qeth_out_queue_is_full(queue
))
4261 netif_tx_start_queue(txq
);
4265 int qeth_do_send_packet(struct qeth_card
*card
, struct qeth_qdio_out_q
*queue
,
4266 struct sk_buff
*skb
, struct qeth_hdr
*hdr
,
4267 unsigned int offset
, unsigned int hd_len
,
4268 int elements_needed
)
4270 unsigned int start_index
= queue
->next_buf_to_fill
;
4271 struct qeth_qdio_out_buffer
*buffer
;
4272 unsigned int next_element
;
4273 struct netdev_queue
*txq
;
4274 bool stopped
= false;
4275 int flush_count
= 0;
4279 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
4281 /* Just a sanity check, the wake/stop logic should ensure that we always
4282 * get a free buffer.
4284 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
4287 txq
= netdev_get_tx_queue(card
->dev
, skb_get_queue_mapping(skb
));
4289 /* check if we need to switch packing state of this queue */
4290 qeth_switch_to_packing_if_needed(queue
);
4291 if (queue
->do_pack
) {
4293 /* does packet fit in current buffer? */
4294 if (buffer
->next_element_to_fill
+ elements_needed
>
4295 queue
->max_elements
) {
4296 /* ... no -> set state PRIMED */
4297 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4299 queue
->next_buf_to_fill
=
4300 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
4301 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
4303 /* We stepped forward, so sanity-check again: */
4304 if (atomic_read(&buffer
->state
) !=
4305 QETH_QDIO_BUF_EMPTY
) {
4306 qeth_flush_buffers(queue
, start_index
,
4314 if (buffer
->next_element_to_fill
== 0 &&
4315 atomic_inc_return(&queue
->used_buffers
) >= QDIO_MAX_BUFFERS_PER_Q
) {
4316 /* If a TX completion happens right _here_ and misses to wake
4317 * the txq, then our re-check below will catch the race.
4319 QETH_TXQ_STAT_INC(queue
, stopped
);
4320 netif_tx_stop_queue(txq
);
4324 next_element
= qeth_fill_buffer(buffer
, skb
, hdr
, offset
, hd_len
);
4325 buffer
->bytes
+= qdisc_pkt_len(skb
);
4326 buffer
->frames
+= skb_is_gso(skb
) ? skb_shinfo(skb
)->gso_segs
: 1;
4329 QETH_TXQ_STAT_INC(queue
, skbs_pack
);
4330 if (!queue
->do_pack
|| stopped
|| next_element
>= queue
->max_elements
) {
4332 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4333 queue
->next_buf_to_fill
=
4334 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
4338 qeth_flush_buffers(queue
, start_index
, flush_count
);
4342 QETH_TXQ_STAT_ADD(queue
, bufs_pack
, flush_count
);
4344 if (stopped
&& !qeth_out_queue_is_full(queue
))
4345 netif_tx_start_queue(txq
);
4348 EXPORT_SYMBOL_GPL(qeth_do_send_packet
);
4350 static void qeth_fill_tso_ext(struct qeth_hdr_tso
*hdr
,
4351 unsigned int payload_len
, struct sk_buff
*skb
,
4352 unsigned int proto_len
)
4354 struct qeth_hdr_ext_tso
*ext
= &hdr
->ext
;
4356 ext
->hdr_tot_len
= sizeof(*ext
);
4357 ext
->imb_hdr_no
= 1;
4359 ext
->hdr_version
= 1;
4361 ext
->payload_len
= payload_len
;
4362 ext
->mss
= skb_shinfo(skb
)->gso_size
;
4363 ext
->dg_hdr_len
= proto_len
;
4366 int qeth_xmit(struct qeth_card
*card
, struct sk_buff
*skb
,
4367 struct qeth_qdio_out_q
*queue
, int ipv
,
4368 void (*fill_header
)(struct qeth_qdio_out_q
*queue
,
4369 struct qeth_hdr
*hdr
, struct sk_buff
*skb
,
4370 int ipv
, unsigned int data_len
))
4372 unsigned int proto_len
, hw_hdr_len
;
4373 unsigned int frame_len
= skb
->len
;
4374 bool is_tso
= skb_is_gso(skb
);
4375 unsigned int data_offset
= 0;
4376 struct qeth_hdr
*hdr
= NULL
;
4377 unsigned int hd_len
= 0;
4378 unsigned int elements
;
4382 hw_hdr_len
= sizeof(struct qeth_hdr_tso
);
4383 proto_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
4385 hw_hdr_len
= sizeof(struct qeth_hdr
);
4386 proto_len
= (IS_IQD(card
) && IS_LAYER2(card
)) ? ETH_HLEN
: 0;
4389 rc
= skb_cow_head(skb
, hw_hdr_len
);
4393 push_len
= qeth_add_hw_header(queue
, skb
, &hdr
, hw_hdr_len
, proto_len
,
4397 if (is_tso
|| !push_len
) {
4398 /* HW header needs its own buffer element. */
4399 hd_len
= hw_hdr_len
+ proto_len
;
4400 data_offset
= push_len
+ proto_len
;
4402 memset(hdr
, 0, hw_hdr_len
);
4403 fill_header(queue
, hdr
, skb
, ipv
, frame_len
);
4405 qeth_fill_tso_ext((struct qeth_hdr_tso
*) hdr
,
4406 frame_len
- proto_len
, skb
, proto_len
);
4409 rc
= __qeth_xmit(card
, queue
, skb
, elements
, hdr
, data_offset
,
4412 /* TODO: drop skb_orphan() once TX completion is fast enough */
4414 spin_lock(&queue
->lock
);
4415 rc
= qeth_do_send_packet(card
, queue
, skb
, hdr
, data_offset
,
4417 spin_unlock(&queue
->lock
);
4420 if (rc
&& !push_len
)
4421 kmem_cache_free(qeth_core_header_cache
, hdr
);
4425 EXPORT_SYMBOL_GPL(qeth_xmit
);
4427 static int qeth_setadp_promisc_mode_cb(struct qeth_card
*card
,
4428 struct qeth_reply
*reply
, unsigned long data
)
4430 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4431 struct qeth_ipacmd_setadpparms
*setparms
;
4433 QETH_CARD_TEXT(card
, 4, "prmadpcb");
4435 setparms
= &(cmd
->data
.setadapterparms
);
4436 if (qeth_setadpparms_inspect_rc(cmd
)) {
4437 QETH_CARD_TEXT_(card
, 4, "prmrc%x", cmd
->hdr
.return_code
);
4438 setparms
->data
.mode
= SET_PROMISC_MODE_OFF
;
4440 card
->info
.promisc_mode
= setparms
->data
.mode
;
4441 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
4444 void qeth_setadp_promisc_mode(struct qeth_card
*card
, bool enable
)
4446 enum qeth_ipa_promisc_modes mode
= enable
? SET_PROMISC_MODE_ON
:
4447 SET_PROMISC_MODE_OFF
;
4448 struct qeth_cmd_buffer
*iob
;
4449 struct qeth_ipa_cmd
*cmd
;
4451 QETH_CARD_TEXT(card
, 4, "setprom");
4452 QETH_CARD_TEXT_(card
, 4, "mode:%x", mode
);
4454 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_PROMISC_MODE
,
4455 SETADP_DATA_SIZEOF(mode
));
4458 cmd
= __ipa_cmd(iob
);
4459 cmd
->data
.setadapterparms
.data
.mode
= mode
;
4460 qeth_send_ipa_cmd(card
, iob
, qeth_setadp_promisc_mode_cb
, NULL
);
4462 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode
);
4464 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card
*card
,
4465 struct qeth_reply
*reply
, unsigned long data
)
4467 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4468 struct qeth_ipacmd_setadpparms
*adp_cmd
;
4470 QETH_CARD_TEXT(card
, 4, "chgmaccb");
4471 if (qeth_setadpparms_inspect_rc(cmd
))
4474 adp_cmd
= &cmd
->data
.setadapterparms
;
4475 if (!is_valid_ether_addr(adp_cmd
->data
.change_addr
.addr
))
4476 return -EADDRNOTAVAIL
;
4478 if (IS_LAYER2(card
) && IS_OSD(card
) && !IS_VM_NIC(card
) &&
4479 !(adp_cmd
->hdr
.flags
& QETH_SETADP_FLAGS_VIRTUAL_MAC
))
4480 return -EADDRNOTAVAIL
;
4482 ether_addr_copy(card
->dev
->dev_addr
, adp_cmd
->data
.change_addr
.addr
);
4486 int qeth_setadpparms_change_macaddr(struct qeth_card
*card
)
4489 struct qeth_cmd_buffer
*iob
;
4490 struct qeth_ipa_cmd
*cmd
;
4492 QETH_CARD_TEXT(card
, 4, "chgmac");
4494 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_ALTER_MAC_ADDRESS
,
4495 SETADP_DATA_SIZEOF(change_addr
));
4498 cmd
= __ipa_cmd(iob
);
4499 cmd
->data
.setadapterparms
.data
.change_addr
.cmd
= CHANGE_ADDR_READ_MAC
;
4500 cmd
->data
.setadapterparms
.data
.change_addr
.addr_size
= ETH_ALEN
;
4501 ether_addr_copy(cmd
->data
.setadapterparms
.data
.change_addr
.addr
,
4502 card
->dev
->dev_addr
);
4503 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_change_macaddr_cb
,
4507 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr
);
4509 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card
*card
,
4510 struct qeth_reply
*reply
, unsigned long data
)
4512 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4513 struct qeth_set_access_ctrl
*access_ctrl_req
;
4515 QETH_CARD_TEXT(card
, 4, "setaccb");
4517 access_ctrl_req
= &cmd
->data
.setadapterparms
.data
.set_access_ctrl
;
4518 QETH_CARD_TEXT_(card
, 2, "rc=%d",
4519 cmd
->data
.setadapterparms
.hdr
.return_code
);
4520 if (cmd
->data
.setadapterparms
.hdr
.return_code
!=
4521 SET_ACCESS_CTRL_RC_SUCCESS
)
4522 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4523 access_ctrl_req
->subcmd_code
, CARD_DEVID(card
),
4524 cmd
->data
.setadapterparms
.hdr
.return_code
);
4525 switch (qeth_setadpparms_inspect_rc(cmd
)) {
4526 case SET_ACCESS_CTRL_RC_SUCCESS
:
4527 if (access_ctrl_req
->subcmd_code
== ISOLATION_MODE_NONE
)
4528 dev_info(&card
->gdev
->dev
,
4529 "QDIO data connection isolation is deactivated\n");
4531 dev_info(&card
->gdev
->dev
,
4532 "QDIO data connection isolation is activated\n");
4534 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED
:
4535 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4538 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED
:
4539 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4542 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED
:
4543 dev_err(&card
->gdev
->dev
, "Adapter does not "
4544 "support QDIO data connection isolation\n");
4546 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER
:
4547 dev_err(&card
->gdev
->dev
,
4548 "Adapter is dedicated. "
4549 "QDIO data connection isolation not supported\n");
4551 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF
:
4552 dev_err(&card
->gdev
->dev
,
4553 "TSO does not permit QDIO data connection isolation\n");
4555 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED
:
4556 dev_err(&card
->gdev
->dev
, "The adjacent switch port does not "
4557 "support reflective relay mode\n");
4559 case SET_ACCESS_CTRL_RC_REFLREL_FAILED
:
4560 dev_err(&card
->gdev
->dev
, "The reflective relay mode cannot be "
4561 "enabled at the adjacent switch port");
4563 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED
:
4564 dev_warn(&card
->gdev
->dev
, "Turning off reflective relay mode "
4565 "at the adjacent switch failed\n");
4566 /* benign error while disabling ISOLATION_MODE_FWD */
4573 int qeth_setadpparms_set_access_ctrl(struct qeth_card
*card
,
4574 enum qeth_ipa_isolation_modes mode
)
4577 struct qeth_cmd_buffer
*iob
;
4578 struct qeth_ipa_cmd
*cmd
;
4579 struct qeth_set_access_ctrl
*access_ctrl_req
;
4581 QETH_CARD_TEXT(card
, 4, "setacctl");
4583 if (!qeth_adp_supported(card
, IPA_SETADP_SET_ACCESS_CONTROL
)) {
4584 dev_err(&card
->gdev
->dev
,
4585 "Adapter does not support QDIO data connection isolation\n");
4589 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_ACCESS_CONTROL
,
4590 SETADP_DATA_SIZEOF(set_access_ctrl
));
4593 cmd
= __ipa_cmd(iob
);
4594 access_ctrl_req
= &cmd
->data
.setadapterparms
.data
.set_access_ctrl
;
4595 access_ctrl_req
->subcmd_code
= mode
;
4597 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_set_access_ctrl_cb
,
4600 QETH_CARD_TEXT_(card
, 2, "rc=%d", rc
);
4601 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4602 rc
, CARD_DEVID(card
));
4608 void qeth_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
4610 struct qeth_card
*card
;
4612 card
= dev
->ml_priv
;
4613 QETH_CARD_TEXT(card
, 4, "txtimeo");
4614 qeth_schedule_recovery(card
);
4616 EXPORT_SYMBOL_GPL(qeth_tx_timeout
);
4618 static int qeth_mdio_read(struct net_device
*dev
, int phy_id
, int regnum
)
4620 struct qeth_card
*card
= dev
->ml_priv
;
4624 case MII_BMCR
: /* Basic mode control register */
4626 if ((card
->info
.link_type
!= QETH_LINK_TYPE_GBIT_ETH
) &&
4627 (card
->info
.link_type
!= QETH_LINK_TYPE_OSN
) &&
4628 (card
->info
.link_type
!= QETH_LINK_TYPE_10GBIT_ETH
) &&
4629 (card
->info
.link_type
!= QETH_LINK_TYPE_25GBIT_ETH
))
4630 rc
|= BMCR_SPEED100
;
4632 case MII_BMSR
: /* Basic mode status register */
4633 rc
= BMSR_ERCAP
| BMSR_ANEGCOMPLETE
| BMSR_LSTATUS
|
4634 BMSR_10HALF
| BMSR_10FULL
| BMSR_100HALF
| BMSR_100FULL
|
4637 case MII_PHYSID1
: /* PHYS ID 1 */
4638 rc
= (dev
->dev_addr
[0] << 16) | (dev
->dev_addr
[1] << 8) |
4640 rc
= (rc
>> 5) & 0xFFFF;
4642 case MII_PHYSID2
: /* PHYS ID 2 */
4643 rc
= (dev
->dev_addr
[2] << 10) & 0xFFFF;
4645 case MII_ADVERTISE
: /* Advertisement control reg */
4648 case MII_LPA
: /* Link partner ability reg */
4649 rc
= LPA_10HALF
| LPA_10FULL
| LPA_100HALF
| LPA_100FULL
|
4650 LPA_100BASE4
| LPA_LPACK
;
4652 case MII_EXPANSION
: /* Expansion register */
4654 case MII_DCOUNTER
: /* disconnect counter */
4656 case MII_FCSCOUNTER
: /* false carrier counter */
4658 case MII_NWAYTEST
: /* N-way auto-neg test register */
4660 case MII_RERRCOUNTER
: /* rx error counter */
4661 rc
= card
->stats
.rx_length_errors
+
4662 card
->stats
.rx_frame_errors
+
4663 card
->stats
.rx_fifo_errors
;
4665 case MII_SREVISION
: /* silicon revision */
4667 case MII_RESV1
: /* reserved 1 */
4669 case MII_LBRERROR
: /* loopback, rx, bypass error */
4671 case MII_PHYADDR
: /* physical address */
4673 case MII_RESV2
: /* reserved 2 */
4675 case MII_TPISTATUS
: /* TPI status for 10mbps */
4677 case MII_NCONFIG
: /* network interface config */
4685 static int qeth_snmp_command_cb(struct qeth_card
*card
,
4686 struct qeth_reply
*reply
, unsigned long data
)
4688 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4689 struct qeth_arp_query_info
*qinfo
= reply
->param
;
4690 struct qeth_ipacmd_setadpparms
*adp_cmd
;
4691 unsigned int data_len
;
4694 QETH_CARD_TEXT(card
, 3, "snpcmdcb");
4696 if (cmd
->hdr
.return_code
) {
4697 QETH_CARD_TEXT_(card
, 4, "scer1%x", cmd
->hdr
.return_code
);
4700 if (cmd
->data
.setadapterparms
.hdr
.return_code
) {
4701 cmd
->hdr
.return_code
=
4702 cmd
->data
.setadapterparms
.hdr
.return_code
;
4703 QETH_CARD_TEXT_(card
, 4, "scer2%x", cmd
->hdr
.return_code
);
4707 adp_cmd
= &cmd
->data
.setadapterparms
;
4708 data_len
= adp_cmd
->hdr
.cmdlength
- sizeof(adp_cmd
->hdr
);
4709 if (adp_cmd
->hdr
.seq_no
== 1) {
4710 snmp_data
= &adp_cmd
->data
.snmp
;
4712 snmp_data
= &adp_cmd
->data
.snmp
.request
;
4713 data_len
-= offsetof(struct qeth_snmp_cmd
, request
);
4716 /* check if there is enough room in userspace */
4717 if ((qinfo
->udata_len
- qinfo
->udata_offset
) < data_len
) {
4718 QETH_CARD_TEXT_(card
, 4, "scer3%i", -ENOSPC
);
4721 QETH_CARD_TEXT_(card
, 4, "snore%i",
4722 cmd
->data
.setadapterparms
.hdr
.used_total
);
4723 QETH_CARD_TEXT_(card
, 4, "sseqn%i",
4724 cmd
->data
.setadapterparms
.hdr
.seq_no
);
4725 /*copy entries to user buffer*/
4726 memcpy(qinfo
->udata
+ qinfo
->udata_offset
, snmp_data
, data_len
);
4727 qinfo
->udata_offset
+= data_len
;
4729 if (cmd
->data
.setadapterparms
.hdr
.seq_no
<
4730 cmd
->data
.setadapterparms
.hdr
.used_total
)
4735 static int qeth_snmp_command(struct qeth_card
*card
, char __user
*udata
)
4737 struct qeth_snmp_ureq __user
*ureq
;
4738 struct qeth_cmd_buffer
*iob
;
4739 unsigned int req_len
;
4740 struct qeth_arp_query_info qinfo
= {0, };
4743 QETH_CARD_TEXT(card
, 3, "snmpcmd");
4745 if (IS_VM_NIC(card
))
4748 if ((!qeth_adp_supported(card
, IPA_SETADP_SET_SNMP_CONTROL
)) &&
4752 ureq
= (struct qeth_snmp_ureq __user
*) udata
;
4753 if (get_user(qinfo
.udata_len
, &ureq
->hdr
.data_len
) ||
4754 get_user(req_len
, &ureq
->hdr
.req_len
))
4757 /* Sanitize user input, to avoid overflows in iob size calculation: */
4758 if (req_len
> QETH_BUFSIZE
)
4761 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_SNMP_CONTROL
, req_len
);
4765 if (copy_from_user(&__ipa_cmd(iob
)->data
.setadapterparms
.data
.snmp
,
4766 &ureq
->cmd
, req_len
)) {
4771 qinfo
.udata
= kzalloc(qinfo
.udata_len
, GFP_KERNEL
);
4776 qinfo
.udata_offset
= sizeof(struct qeth_snmp_ureq_hdr
);
4778 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_snmp_command_cb
, &qinfo
);
4780 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4781 CARD_DEVID(card
), rc
);
4783 if (copy_to_user(udata
, qinfo
.udata
, qinfo
.udata_len
))
4791 static int qeth_setadpparms_query_oat_cb(struct qeth_card
*card
,
4792 struct qeth_reply
*reply
,
4795 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*)data
;
4796 struct qeth_qoat_priv
*priv
= reply
->param
;
4799 QETH_CARD_TEXT(card
, 3, "qoatcb");
4800 if (qeth_setadpparms_inspect_rc(cmd
))
4803 resdatalen
= cmd
->data
.setadapterparms
.hdr
.cmdlength
;
4805 if (resdatalen
> (priv
->buffer_len
- priv
->response_len
))
4808 memcpy(priv
->buffer
+ priv
->response_len
,
4809 &cmd
->data
.setadapterparms
.hdr
, resdatalen
);
4810 priv
->response_len
+= resdatalen
;
4812 if (cmd
->data
.setadapterparms
.hdr
.seq_no
<
4813 cmd
->data
.setadapterparms
.hdr
.used_total
)
4818 static int qeth_query_oat_command(struct qeth_card
*card
, char __user
*udata
)
4821 struct qeth_cmd_buffer
*iob
;
4822 struct qeth_ipa_cmd
*cmd
;
4823 struct qeth_query_oat
*oat_req
;
4824 struct qeth_query_oat_data oat_data
;
4825 struct qeth_qoat_priv priv
;
4828 QETH_CARD_TEXT(card
, 3, "qoatcmd");
4830 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_OAT
))
4833 if (copy_from_user(&oat_data
, udata
, sizeof(oat_data
)))
4836 priv
.buffer_len
= oat_data
.buffer_len
;
4837 priv
.response_len
= 0;
4838 priv
.buffer
= vzalloc(oat_data
.buffer_len
);
4842 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_OAT
,
4843 SETADP_DATA_SIZEOF(query_oat
));
4848 cmd
= __ipa_cmd(iob
);
4849 oat_req
= &cmd
->data
.setadapterparms
.data
.query_oat
;
4850 oat_req
->subcmd_code
= oat_data
.command
;
4852 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_query_oat_cb
, &priv
);
4854 tmp
= is_compat_task() ? compat_ptr(oat_data
.ptr
) :
4855 u64_to_user_ptr(oat_data
.ptr
);
4856 oat_data
.response_len
= priv
.response_len
;
4858 if (copy_to_user(tmp
, priv
.buffer
, priv
.response_len
) ||
4859 copy_to_user(udata
, &oat_data
, sizeof(oat_data
)))
4868 static int qeth_query_card_info_cb(struct qeth_card
*card
,
4869 struct qeth_reply
*reply
, unsigned long data
)
4871 struct carrier_info
*carrier_info
= (struct carrier_info
*)reply
->param
;
4872 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*)data
;
4873 struct qeth_query_card_info
*card_info
;
4875 QETH_CARD_TEXT(card
, 2, "qcrdincb");
4876 if (qeth_setadpparms_inspect_rc(cmd
))
4879 card_info
= &cmd
->data
.setadapterparms
.data
.card_info
;
4880 carrier_info
->card_type
= card_info
->card_type
;
4881 carrier_info
->port_mode
= card_info
->port_mode
;
4882 carrier_info
->port_speed
= card_info
->port_speed
;
4886 int qeth_query_card_info(struct qeth_card
*card
,
4887 struct carrier_info
*carrier_info
)
4889 struct qeth_cmd_buffer
*iob
;
4891 QETH_CARD_TEXT(card
, 2, "qcrdinfo");
4892 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_CARD_INFO
))
4894 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_CARD_INFO
, 0);
4897 return qeth_send_ipa_cmd(card
, iob
, qeth_query_card_info_cb
,
4898 (void *)carrier_info
);
4902 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4903 * @card: pointer to a qeth_card
4906 * 0, if a MAC address has been set for the card's netdevice
4907 * a return code, for various error conditions
4909 int qeth_vm_request_mac(struct qeth_card
*card
)
4911 struct diag26c_mac_resp
*response
;
4912 struct diag26c_mac_req
*request
;
4915 QETH_CARD_TEXT(card
, 2, "vmreqmac");
4917 request
= kzalloc(sizeof(*request
), GFP_KERNEL
| GFP_DMA
);
4918 response
= kzalloc(sizeof(*response
), GFP_KERNEL
| GFP_DMA
);
4919 if (!request
|| !response
) {
4924 request
->resp_buf_len
= sizeof(*response
);
4925 request
->resp_version
= DIAG26C_VERSION2
;
4926 request
->op_code
= DIAG26C_GET_MAC
;
4927 request
->devno
= card
->info
.ddev_devno
;
4929 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
4930 rc
= diag26c(request
, response
, DIAG26C_MAC_SERVICES
);
4931 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
4934 QETH_DBF_HEX(CTRL
, 2, response
, sizeof(*response
));
4936 if (request
->resp_buf_len
< sizeof(*response
) ||
4937 response
->version
!= request
->resp_version
) {
4939 QETH_CARD_TEXT(card
, 2, "badresp");
4940 QETH_CARD_HEX(card
, 2, &request
->resp_buf_len
,
4941 sizeof(request
->resp_buf_len
));
4942 } else if (!is_valid_ether_addr(response
->mac
)) {
4944 QETH_CARD_TEXT(card
, 2, "badmac");
4945 QETH_CARD_HEX(card
, 2, response
->mac
, ETH_ALEN
);
4947 ether_addr_copy(card
->dev
->dev_addr
, response
->mac
);
4955 EXPORT_SYMBOL_GPL(qeth_vm_request_mac
);
4957 static void qeth_determine_capabilities(struct qeth_card
*card
)
4959 struct qeth_channel
*channel
= &card
->data
;
4960 struct ccw_device
*ddev
= channel
->ccwdev
;
4962 int ddev_offline
= 0;
4964 QETH_CARD_TEXT(card
, 2, "detcapab");
4965 if (!ddev
->online
) {
4967 rc
= qeth_start_channel(channel
);
4969 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
4974 rc
= qeth_read_conf_data(card
);
4976 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4977 CARD_DEVID(card
), rc
);
4978 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
4982 rc
= qdio_get_ssqd_desc(ddev
, &card
->ssqd
);
4984 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
4986 QETH_CARD_TEXT_(card
, 2, "qfmt%d", card
->ssqd
.qfmt
);
4987 QETH_CARD_TEXT_(card
, 2, "ac1:%02x", card
->ssqd
.qdioac1
);
4988 QETH_CARD_TEXT_(card
, 2, "ac2:%04x", card
->ssqd
.qdioac2
);
4989 QETH_CARD_TEXT_(card
, 2, "ac3:%04x", card
->ssqd
.qdioac3
);
4990 QETH_CARD_TEXT_(card
, 2, "icnt%d", card
->ssqd
.icnt
);
4991 if (!((card
->ssqd
.qfmt
!= QDIO_IQDIO_QFMT
) ||
4992 ((card
->ssqd
.qdioac1
& CHSC_AC1_INITIATE_INPUTQ
) == 0) ||
4993 ((card
->ssqd
.qdioac3
& CHSC_AC3_FORMAT2_CQ_AVAILABLE
) == 0))) {
4994 dev_info(&card
->gdev
->dev
,
4995 "Completion Queueing supported\n");
4997 card
->options
.cq
= QETH_CQ_NOTAVAILABLE
;
5001 if (ddev_offline
== 1)
5002 qeth_stop_channel(channel
);
5007 static void qeth_read_ccw_conf_data(struct qeth_card
*card
)
5009 struct qeth_card_info
*info
= &card
->info
;
5010 struct ccw_device
*cdev
= CARD_DDEV(card
);
5011 struct ccw_dev_id dev_id
;
5013 QETH_CARD_TEXT(card
, 2, "ccwconfd");
5014 ccw_device_get_id(cdev
, &dev_id
);
5016 info
->ddev_devno
= dev_id
.devno
;
5017 info
->ids_valid
= !ccw_device_get_cssid(cdev
, &info
->cssid
) &&
5018 !ccw_device_get_iid(cdev
, &info
->iid
) &&
5019 !ccw_device_get_chid(cdev
, 0, &info
->chid
);
5020 info
->ssid
= dev_id
.ssid
;
5022 dev_info(&card
->gdev
->dev
, "CHID: %x CHPID: %x\n",
5023 info
->chid
, info
->chpid
);
5025 QETH_CARD_TEXT_(card
, 3, "devn%x", info
->ddev_devno
);
5026 QETH_CARD_TEXT_(card
, 3, "cssid:%x", info
->cssid
);
5027 QETH_CARD_TEXT_(card
, 3, "iid:%x", info
->iid
);
5028 QETH_CARD_TEXT_(card
, 3, "ssid:%x", info
->ssid
);
5029 QETH_CARD_TEXT_(card
, 3, "chpid:%x", info
->chpid
);
5030 QETH_CARD_TEXT_(card
, 3, "chid:%x", info
->chid
);
5031 QETH_CARD_TEXT_(card
, 3, "idval%x", info
->ids_valid
);
5034 static int qeth_qdio_establish(struct qeth_card
*card
)
5036 struct qdio_buffer
**out_sbal_ptrs
[QETH_MAX_OUT_QUEUES
];
5037 struct qdio_buffer
**in_sbal_ptrs
[QETH_MAX_IN_QUEUES
];
5038 struct qeth_qib_parms
*qib_parms
= NULL
;
5039 struct qdio_initialize init_data
;
5043 QETH_CARD_TEXT(card
, 2, "qdioest");
5045 if (!IS_IQD(card
) && !IS_VM_NIC(card
)) {
5046 qib_parms
= kzalloc(sizeof_field(struct qib
, parm
), GFP_KERNEL
);
5050 qeth_fill_qib_parms(card
, qib_parms
);
5053 in_sbal_ptrs
[0] = card
->qdio
.in_q
->qdio_bufs
;
5054 if (card
->options
.cq
== QETH_CQ_ENABLED
)
5055 in_sbal_ptrs
[1] = card
->qdio
.c_q
->qdio_bufs
;
5057 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++)
5058 out_sbal_ptrs
[i
] = card
->qdio
.out_qs
[i
]->qdio_bufs
;
5060 memset(&init_data
, 0, sizeof(struct qdio_initialize
));
5061 init_data
.q_format
= IS_IQD(card
) ? QDIO_IQDIO_QFMT
:
5063 init_data
.qib_param_field_format
= 0;
5064 init_data
.qib_param_field
= (void *)qib_parms
;
5065 init_data
.no_input_qs
= card
->qdio
.no_in_queues
;
5066 init_data
.no_output_qs
= card
->qdio
.no_out_queues
;
5067 init_data
.input_handler
= qeth_qdio_input_handler
;
5068 init_data
.output_handler
= qeth_qdio_output_handler
;
5069 init_data
.irq_poll
= qeth_qdio_poll
;
5070 init_data
.int_parm
= (unsigned long) card
;
5071 init_data
.input_sbal_addr_array
= in_sbal_ptrs
;
5072 init_data
.output_sbal_addr_array
= out_sbal_ptrs
;
5073 init_data
.output_sbal_state_array
= card
->qdio
.out_bufstates
;
5074 init_data
.scan_threshold
= IS_IQD(card
) ? 0 : 32;
5076 if (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
,
5077 QETH_QDIO_ESTABLISHED
) == QETH_QDIO_ALLOCATED
) {
5078 rc
= qdio_allocate(CARD_DDEV(card
), init_data
.no_input_qs
,
5079 init_data
.no_output_qs
);
5081 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
5084 rc
= qdio_establish(CARD_DDEV(card
), &init_data
);
5086 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
5087 qdio_free(CARD_DDEV(card
));
5091 switch (card
->options
.cq
) {
5092 case QETH_CQ_ENABLED
:
5093 dev_info(&card
->gdev
->dev
, "Completion Queue support enabled");
5095 case QETH_CQ_DISABLED
:
5096 dev_info(&card
->gdev
->dev
, "Completion Queue support disabled");
5107 static void qeth_core_free_card(struct qeth_card
*card
)
5109 QETH_CARD_TEXT(card
, 2, "freecrd");
5111 unregister_service_level(&card
->qeth_service_level
);
5112 debugfs_remove_recursive(card
->debugfs
);
5113 qeth_put_cmd(card
->read_cmd
);
5114 destroy_workqueue(card
->event_wq
);
5115 dev_set_drvdata(&card
->gdev
->dev
, NULL
);
5119 static void qeth_trace_features(struct qeth_card
*card
)
5121 QETH_CARD_TEXT(card
, 2, "features");
5122 QETH_CARD_HEX(card
, 2, &card
->options
.ipa4
, sizeof(card
->options
.ipa4
));
5123 QETH_CARD_HEX(card
, 2, &card
->options
.ipa6
, sizeof(card
->options
.ipa6
));
5124 QETH_CARD_HEX(card
, 2, &card
->options
.adp
, sizeof(card
->options
.adp
));
5125 QETH_CARD_HEX(card
, 2, &card
->info
.diagass_support
,
5126 sizeof(card
->info
.diagass_support
));
5129 static struct ccw_device_id qeth_ids
[] = {
5130 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5131 .driver_info
= QETH_CARD_TYPE_OSD
},
5132 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5133 .driver_info
= QETH_CARD_TYPE_IQD
},
5134 #ifdef CONFIG_QETH_OSN
5135 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
5136 .driver_info
= QETH_CARD_TYPE_OSN
},
5138 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5139 .driver_info
= QETH_CARD_TYPE_OSM
},
5140 #ifdef CONFIG_QETH_OSX
5141 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5142 .driver_info
= QETH_CARD_TYPE_OSX
},
5146 MODULE_DEVICE_TABLE(ccw
, qeth_ids
);
5148 static struct ccw_driver qeth_ccw_driver
= {
5150 .owner
= THIS_MODULE
,
5154 .probe
= ccwgroup_probe_ccwdev
,
5155 .remove
= ccwgroup_remove_ccwdev
,
5158 static int qeth_hardsetup_card(struct qeth_card
*card
, bool *carrier_ok
)
5163 QETH_CARD_TEXT(card
, 2, "hrdsetup");
5164 atomic_set(&card
->force_alloc_skb
, 0);
5165 rc
= qeth_update_from_chp_desc(card
);
5170 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5172 rc
= qeth_qdio_clear_card(card
, !IS_IQD(card
));
5173 qeth_stop_channel(&card
->data
);
5174 qeth_stop_channel(&card
->write
);
5175 qeth_stop_channel(&card
->read
);
5176 qdio_free(CARD_DDEV(card
));
5178 rc
= qeth_start_channel(&card
->read
);
5181 rc
= qeth_start_channel(&card
->write
);
5184 rc
= qeth_start_channel(&card
->data
);
5188 if (rc
== -ERESTARTSYS
) {
5189 QETH_CARD_TEXT(card
, 2, "break1");
5192 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
5199 qeth_determine_capabilities(card
);
5200 qeth_read_ccw_conf_data(card
);
5201 qeth_idx_init(card
);
5203 rc
= qeth_idx_activate_read_channel(card
);
5205 QETH_CARD_TEXT(card
, 2, "break2");
5208 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
5215 rc
= qeth_idx_activate_write_channel(card
);
5217 QETH_CARD_TEXT(card
, 2, "break3");
5220 QETH_CARD_TEXT_(card
, 2, "4err%d", rc
);
5226 card
->read_or_write_problem
= 0;
5227 rc
= qeth_mpc_initialize(card
);
5229 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
5233 rc
= qeth_send_startlan(card
);
5235 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
5236 if (rc
== -ENETDOWN
) {
5237 dev_warn(&card
->gdev
->dev
, "The LAN is offline\n");
5238 *carrier_ok
= false;
5246 card
->options
.ipa4
.supported
= 0;
5247 card
->options
.ipa6
.supported
= 0;
5248 card
->options
.adp
.supported
= 0;
5249 card
->options
.sbp
.supported_funcs
= 0;
5250 card
->info
.diagass_support
= 0;
5251 rc
= qeth_query_ipassists(card
, QETH_PROT_IPV4
);
5254 if (qeth_is_supported(card
, IPA_IPV6
)) {
5255 rc
= qeth_query_ipassists(card
, QETH_PROT_IPV6
);
5259 if (qeth_is_supported(card
, IPA_SETADAPTERPARMS
)) {
5260 rc
= qeth_query_setadapterparms(card
);
5262 QETH_CARD_TEXT_(card
, 2, "7err%d", rc
);
5266 if (qeth_adp_supported(card
, IPA_SETADP_SET_DIAG_ASSIST
)) {
5267 rc
= qeth_query_setdiagass(card
);
5269 QETH_CARD_TEXT_(card
, 2, "8err%d", rc
);
5272 qeth_trace_features(card
);
5274 if (!qeth_is_diagass_supported(card
, QETH_DIAGS_CMD_TRAP
) ||
5275 (card
->info
.hwtrap
&& qeth_hw_trap(card
, QETH_DIAGS_TRAP_ARM
)))
5276 card
->info
.hwtrap
= 0;
5278 if (card
->options
.isolation
!= ISOLATION_MODE_NONE
) {
5279 rc
= qeth_setadpparms_set_access_ctrl(card
,
5280 card
->options
.isolation
);
5285 rc
= qeth_init_qdio_queues(card
);
5287 QETH_CARD_TEXT_(card
, 2, "9err%d", rc
);
5293 dev_warn(&card
->gdev
->dev
, "The qeth device driver failed to recover "
5294 "an error on the device\n");
5295 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5296 CARD_DEVID(card
), rc
);
5300 static int qeth_set_online(struct qeth_card
*card
)
5305 mutex_lock(&card
->discipline_mutex
);
5306 mutex_lock(&card
->conf_mutex
);
5307 QETH_CARD_TEXT(card
, 2, "setonlin");
5309 rc
= qeth_hardsetup_card(card
, &carrier_ok
);
5311 QETH_CARD_TEXT_(card
, 2, "2err%04x", rc
);
5316 qeth_print_status_message(card
);
5318 if (card
->dev
->reg_state
!= NETREG_REGISTERED
)
5319 /* no need for locking / error handling at this early stage: */
5320 qeth_set_real_num_tx_queues(card
, qeth_tx_actual_queues(card
));
5322 rc
= card
->discipline
->set_online(card
, carrier_ok
);
5326 /* let user_space know that device is online */
5327 kobject_uevent(&card
->gdev
->dev
.kobj
, KOBJ_CHANGE
);
5329 mutex_unlock(&card
->conf_mutex
);
5330 mutex_unlock(&card
->discipline_mutex
);
5335 qeth_qdio_clear_card(card
, 0);
5336 qeth_clear_working_pool_list(card
);
5337 qeth_flush_local_addrs(card
);
5339 qeth_stop_channel(&card
->data
);
5340 qeth_stop_channel(&card
->write
);
5341 qeth_stop_channel(&card
->read
);
5342 qdio_free(CARD_DDEV(card
));
5344 mutex_unlock(&card
->conf_mutex
);
5345 mutex_unlock(&card
->discipline_mutex
);
5349 int qeth_set_offline(struct qeth_card
*card
, bool resetting
)
5353 mutex_lock(&card
->discipline_mutex
);
5354 mutex_lock(&card
->conf_mutex
);
5355 QETH_CARD_TEXT(card
, 3, "setoffl");
5357 if ((!resetting
&& card
->info
.hwtrap
) || card
->info
.hwtrap
== 2) {
5358 qeth_hw_trap(card
, QETH_DIAGS_TRAP_DISARM
);
5359 card
->info
.hwtrap
= 1;
5362 /* cancel any stalled cmd that might block the rtnl: */
5363 qeth_clear_ipacmd_list(card
);
5366 card
->info
.open_when_online
= card
->dev
->flags
& IFF_UP
;
5367 dev_close(card
->dev
);
5368 netif_device_detach(card
->dev
);
5369 netif_carrier_off(card
->dev
);
5372 cancel_work_sync(&card
->rx_mode_work
);
5374 card
->discipline
->set_offline(card
);
5376 qeth_qdio_clear_card(card
, 0);
5377 qeth_drain_output_queues(card
);
5378 qeth_clear_working_pool_list(card
);
5379 qeth_flush_local_addrs(card
);
5380 card
->info
.promisc_mode
= 0;
5382 rc
= qeth_stop_channel(&card
->data
);
5383 rc2
= qeth_stop_channel(&card
->write
);
5384 rc3
= qeth_stop_channel(&card
->read
);
5386 rc
= (rc2
) ? rc2
: rc3
;
5388 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
5389 qdio_free(CARD_DDEV(card
));
5391 /* let user_space know that device is offline */
5392 kobject_uevent(&card
->gdev
->dev
.kobj
, KOBJ_CHANGE
);
5394 mutex_unlock(&card
->conf_mutex
);
5395 mutex_unlock(&card
->discipline_mutex
);
5398 EXPORT_SYMBOL_GPL(qeth_set_offline
);
5400 static int qeth_do_reset(void *data
)
5402 struct qeth_card
*card
= data
;
5405 QETH_CARD_TEXT(card
, 2, "recover1");
5406 if (!qeth_do_run_thread(card
, QETH_RECOVER_THREAD
))
5408 QETH_CARD_TEXT(card
, 2, "recover2");
5409 dev_warn(&card
->gdev
->dev
,
5410 "A recovery process has been started for the device\n");
5412 qeth_set_offline(card
, true);
5413 rc
= qeth_set_online(card
);
5415 dev_info(&card
->gdev
->dev
,
5416 "Device successfully recovered!\n");
5418 ccwgroup_set_offline(card
->gdev
);
5419 dev_warn(&card
->gdev
->dev
,
5420 "The qeth device driver failed to recover an error on the device\n");
5422 qeth_clear_thread_start_bit(card
, QETH_RECOVER_THREAD
);
5423 qeth_clear_thread_running_bit(card
, QETH_RECOVER_THREAD
);
5427 #if IS_ENABLED(CONFIG_QETH_L3)
5428 static void qeth_l3_rebuild_skb(struct qeth_card
*card
, struct sk_buff
*skb
,
5429 struct qeth_hdr
*hdr
)
5431 struct af_iucv_trans_hdr
*iucv
= (struct af_iucv_trans_hdr
*) skb
->data
;
5432 struct qeth_hdr_layer3
*l3_hdr
= &hdr
->hdr
.l3
;
5433 struct net_device
*dev
= skb
->dev
;
5435 if (IS_IQD(card
) && iucv
->magic
== ETH_P_AF_IUCV
) {
5436 dev_hard_header(skb
, dev
, ETH_P_AF_IUCV
, dev
->dev_addr
,
5437 "FAKELL", skb
->len
);
5441 if (!(l3_hdr
->flags
& QETH_HDR_PASSTHRU
)) {
5442 u16 prot
= (l3_hdr
->flags
& QETH_HDR_IPV6
) ? ETH_P_IPV6
:
5444 unsigned char tg_addr
[ETH_ALEN
];
5446 skb_reset_network_header(skb
);
5447 switch (l3_hdr
->flags
& QETH_HDR_CAST_MASK
) {
5448 case QETH_CAST_MULTICAST
:
5449 if (prot
== ETH_P_IP
)
5450 ip_eth_mc_map(ip_hdr(skb
)->daddr
, tg_addr
);
5452 ipv6_eth_mc_map(&ipv6_hdr(skb
)->daddr
, tg_addr
);
5453 QETH_CARD_STAT_INC(card
, rx_multicast
);
5455 case QETH_CAST_BROADCAST
:
5456 ether_addr_copy(tg_addr
, dev
->broadcast
);
5457 QETH_CARD_STAT_INC(card
, rx_multicast
);
5460 if (card
->options
.sniffer
)
5461 skb
->pkt_type
= PACKET_OTHERHOST
;
5462 ether_addr_copy(tg_addr
, dev
->dev_addr
);
5465 if (l3_hdr
->ext_flags
& QETH_HDR_EXT_SRC_MAC_ADDR
)
5466 dev_hard_header(skb
, dev
, prot
, tg_addr
,
5467 &l3_hdr
->next_hop
.rx
.src_mac
, skb
->len
);
5469 dev_hard_header(skb
, dev
, prot
, tg_addr
, "FAKELL",
5473 /* copy VLAN tag from hdr into skb */
5474 if (!card
->options
.sniffer
&&
5475 (l3_hdr
->ext_flags
& (QETH_HDR_EXT_VLAN_FRAME
|
5476 QETH_HDR_EXT_INCLUDE_VLAN_TAG
))) {
5477 u16 tag
= (l3_hdr
->ext_flags
& QETH_HDR_EXT_VLAN_FRAME
) ?
5479 l3_hdr
->next_hop
.rx
.vlan_id
;
5481 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), tag
);
5486 static void qeth_receive_skb(struct qeth_card
*card
, struct sk_buff
*skb
,
5487 struct qeth_hdr
*hdr
, bool uses_frags
)
5489 struct napi_struct
*napi
= &card
->napi
;
5492 switch (hdr
->hdr
.l2
.id
) {
5493 case QETH_HEADER_TYPE_OSN
:
5494 skb_push(skb
, sizeof(*hdr
));
5495 skb_copy_to_linear_data(skb
, hdr
, sizeof(*hdr
));
5496 QETH_CARD_STAT_ADD(card
, rx_bytes
, skb
->len
);
5497 QETH_CARD_STAT_INC(card
, rx_packets
);
5499 card
->osn_info
.data_cb(skb
);
5501 #if IS_ENABLED(CONFIG_QETH_L3)
5502 case QETH_HEADER_TYPE_LAYER3
:
5503 qeth_l3_rebuild_skb(card
, skb
, hdr
);
5504 is_cso
= hdr
->hdr
.l3
.ext_flags
& QETH_HDR_EXT_CSUM_TRANSP_REQ
;
5507 case QETH_HEADER_TYPE_LAYER2
:
5508 is_cso
= hdr
->hdr
.l2
.flags
[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ
;
5513 napi_free_frags(napi
);
5515 dev_kfree_skb_any(skb
);
5519 if (is_cso
&& (card
->dev
->features
& NETIF_F_RXCSUM
)) {
5520 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5521 QETH_CARD_STAT_INC(card
, rx_skb_csum
);
5523 skb
->ip_summed
= CHECKSUM_NONE
;
5526 QETH_CARD_STAT_ADD(card
, rx_bytes
, skb
->len
);
5527 QETH_CARD_STAT_INC(card
, rx_packets
);
5528 if (skb_is_nonlinear(skb
)) {
5529 QETH_CARD_STAT_INC(card
, rx_sg_skbs
);
5530 QETH_CARD_STAT_ADD(card
, rx_sg_frags
,
5531 skb_shinfo(skb
)->nr_frags
);
5535 napi_gro_frags(napi
);
5537 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5538 napi_gro_receive(napi
, skb
);
5542 static void qeth_create_skb_frag(struct sk_buff
*skb
, char *data
, int data_len
)
5544 struct page
*page
= virt_to_page(data
);
5545 unsigned int next_frag
;
5547 next_frag
= skb_shinfo(skb
)->nr_frags
;
5549 skb_add_rx_frag(skb
, next_frag
, page
, offset_in_page(data
), data_len
,
5553 static inline int qeth_is_last_sbale(struct qdio_buffer_element
*sbale
)
5555 return (sbale
->eflags
& SBAL_EFLAGS_LAST_ENTRY
);
5558 static int qeth_extract_skb(struct qeth_card
*card
,
5559 struct qeth_qdio_buffer
*qethbuffer
, u8
*element_no
,
5562 struct qeth_priv
*priv
= netdev_priv(card
->dev
);
5563 struct qdio_buffer
*buffer
= qethbuffer
->buffer
;
5564 struct napi_struct
*napi
= &card
->napi
;
5565 struct qdio_buffer_element
*element
;
5566 unsigned int linear_len
= 0;
5567 bool uses_frags
= false;
5568 int offset
= *__offset
;
5569 bool use_rx_sg
= false;
5570 unsigned int headroom
;
5571 struct qeth_hdr
*hdr
;
5572 struct sk_buff
*skb
;
5575 element
= &buffer
->element
[*element_no
];
5578 /* qeth_hdr must not cross element boundaries */
5579 while (element
->length
< offset
+ sizeof(struct qeth_hdr
)) {
5580 if (qeth_is_last_sbale(element
))
5586 hdr
= phys_to_virt(element
->addr
) + offset
;
5587 offset
+= sizeof(*hdr
);
5590 switch (hdr
->hdr
.l2
.id
) {
5591 case QETH_HEADER_TYPE_LAYER2
:
5592 skb_len
= hdr
->hdr
.l2
.pkt_length
;
5593 linear_len
= ETH_HLEN
;
5596 case QETH_HEADER_TYPE_LAYER3
:
5597 skb_len
= hdr
->hdr
.l3
.length
;
5598 if (!IS_LAYER3(card
)) {
5599 QETH_CARD_STAT_INC(card
, rx_dropped_notsupp
);
5603 if (hdr
->hdr
.l3
.flags
& QETH_HDR_PASSTHRU
) {
5604 linear_len
= ETH_HLEN
;
5609 if (hdr
->hdr
.l3
.flags
& QETH_HDR_IPV6
)
5610 linear_len
= sizeof(struct ipv6hdr
);
5612 linear_len
= sizeof(struct iphdr
);
5613 headroom
= ETH_HLEN
;
5615 case QETH_HEADER_TYPE_OSN
:
5616 skb_len
= hdr
->hdr
.osn
.pdu_length
;
5617 if (!IS_OSN(card
)) {
5618 QETH_CARD_STAT_INC(card
, rx_dropped_notsupp
);
5622 linear_len
= skb_len
;
5623 headroom
= sizeof(struct qeth_hdr
);
5626 if (hdr
->hdr
.l2
.id
& QETH_HEADER_MASK_INVAL
)
5627 QETH_CARD_STAT_INC(card
, rx_frame_errors
);
5629 QETH_CARD_STAT_INC(card
, rx_dropped_notsupp
);
5631 /* Can't determine packet length, drop the whole buffer. */
5632 return -EPROTONOSUPPORT
;
5635 if (skb_len
< linear_len
) {
5636 QETH_CARD_STAT_INC(card
, rx_dropped_runt
);
5640 use_rx_sg
= (card
->options
.cq
== QETH_CQ_ENABLED
) ||
5641 (skb_len
> READ_ONCE(priv
->rx_copybreak
) &&
5642 !atomic_read(&card
->force_alloc_skb
) &&
5646 /* QETH_CQ_ENABLED only: */
5647 if (qethbuffer
->rx_skb
&&
5648 skb_tailroom(qethbuffer
->rx_skb
) >= linear_len
+ headroom
) {
5649 skb
= qethbuffer
->rx_skb
;
5650 qethbuffer
->rx_skb
= NULL
;
5654 skb
= napi_get_frags(napi
);
5656 /* -ENOMEM, no point in falling back further. */
5657 QETH_CARD_STAT_INC(card
, rx_dropped_nomem
);
5661 if (skb_tailroom(skb
) >= linear_len
+ headroom
) {
5666 netdev_info_once(card
->dev
,
5667 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5668 linear_len
+ headroom
, skb_tailroom(skb
));
5669 /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5672 linear_len
= skb_len
;
5673 skb
= napi_alloc_skb(napi
, linear_len
+ headroom
);
5675 QETH_CARD_STAT_INC(card
, rx_dropped_nomem
);
5681 skb_reserve(skb
, headroom
);
5684 int data_len
= min(skb_len
, (int)(element
->length
- offset
));
5685 char *data
= phys_to_virt(element
->addr
) + offset
;
5687 skb_len
-= data_len
;
5690 /* Extract data from current element: */
5691 if (skb
&& data_len
) {
5693 unsigned int copy_len
;
5695 copy_len
= min_t(unsigned int, linear_len
,
5698 skb_put_data(skb
, data
, copy_len
);
5699 linear_len
-= copy_len
;
5700 data_len
-= copy_len
;
5705 qeth_create_skb_frag(skb
, data
, data_len
);
5708 /* Step forward to next element: */
5710 if (qeth_is_last_sbale(element
)) {
5711 QETH_CARD_TEXT(card
, 4, "unexeob");
5712 QETH_CARD_HEX(card
, 2, buffer
, sizeof(void *));
5715 napi_free_frags(napi
);
5717 dev_kfree_skb_any(skb
);
5718 QETH_CARD_STAT_INC(card
,
5728 /* This packet was skipped, go get another one: */
5732 *element_no
= element
- &buffer
->element
[0];
5735 qeth_receive_skb(card
, skb
, hdr
, uses_frags
);
5739 static unsigned int qeth_extract_skbs(struct qeth_card
*card
, int budget
,
5740 struct qeth_qdio_buffer
*buf
, bool *done
)
5742 unsigned int work_done
= 0;
5745 if (qeth_extract_skb(card
, buf
, &card
->rx
.buf_element
,
5746 &card
->rx
.e_offset
)) {
5758 static unsigned int qeth_rx_poll(struct qeth_card
*card
, int budget
)
5760 struct qeth_rx
*ctx
= &card
->rx
;
5761 unsigned int work_done
= 0;
5763 while (budget
> 0) {
5764 struct qeth_qdio_buffer
*buffer
;
5765 unsigned int skbs_done
= 0;
5768 /* Fetch completed RX buffers: */
5769 if (!card
->rx
.b_count
) {
5770 card
->rx
.qdio_err
= 0;
5771 card
->rx
.b_count
= qdio_get_next_buffers(
5772 card
->data
.ccwdev
, 0, &card
->rx
.b_index
,
5773 &card
->rx
.qdio_err
);
5774 if (card
->rx
.b_count
<= 0) {
5775 card
->rx
.b_count
= 0;
5780 /* Process one completed RX buffer: */
5781 buffer
= &card
->qdio
.in_q
->bufs
[card
->rx
.b_index
];
5782 if (!(card
->rx
.qdio_err
&&
5783 qeth_check_qdio_errors(card
, buffer
->buffer
,
5784 card
->rx
.qdio_err
, "qinerr")))
5785 skbs_done
= qeth_extract_skbs(card
, budget
, buffer
,
5790 work_done
+= skbs_done
;
5791 budget
-= skbs_done
;
5794 QETH_CARD_STAT_INC(card
, rx_bufs
);
5795 qeth_put_buffer_pool_entry(card
, buffer
->pool_entry
);
5796 buffer
->pool_entry
= NULL
;
5799 ctx
->bufs_refill
-= qeth_rx_refill_queue(card
,
5802 /* Step forward to next buffer: */
5803 card
->rx
.b_index
= QDIO_BUFNR(card
->rx
.b_index
+ 1);
5804 card
->rx
.buf_element
= 0;
5805 card
->rx
.e_offset
= 0;
5812 static void qeth_cq_poll(struct qeth_card
*card
)
5814 unsigned int work_done
= 0;
5816 while (work_done
< QDIO_MAX_BUFFERS_PER_Q
) {
5817 unsigned int start
, error
;
5820 completed
= qdio_inspect_queue(CARD_DDEV(card
), 1, true, &start
,
5825 qeth_qdio_cq_handler(card
, error
, 1, start
, completed
);
5826 work_done
+= completed
;
5830 int qeth_poll(struct napi_struct
*napi
, int budget
)
5832 struct qeth_card
*card
= container_of(napi
, struct qeth_card
, napi
);
5833 unsigned int work_done
;
5835 work_done
= qeth_rx_poll(card
, budget
);
5837 if (card
->options
.cq
== QETH_CQ_ENABLED
)
5841 struct qeth_rx
*ctx
= &card
->rx
;
5843 /* Process any substantial refill backlog: */
5844 ctx
->bufs_refill
-= qeth_rx_refill_queue(card
, ctx
->bufs_refill
);
5846 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5847 if (work_done
>= budget
)
5851 if (napi_complete_done(napi
, work_done
) &&
5852 qdio_start_irq(CARD_DDEV(card
)))
5853 napi_schedule(napi
);
5857 EXPORT_SYMBOL_GPL(qeth_poll
);
5859 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q
*queue
,
5860 unsigned int bidx
, bool error
, int budget
)
5862 struct qeth_qdio_out_buffer
*buffer
= queue
->bufs
[bidx
];
5863 u8 sflags
= buffer
->buffer
->element
[15].sflags
;
5864 struct qeth_card
*card
= queue
->card
;
5866 if (queue
->bufstates
&& (queue
->bufstates
[bidx
].flags
&
5867 QDIO_OUTBUF_STATE_FLAG_PENDING
)) {
5868 WARN_ON_ONCE(card
->options
.cq
!= QETH_CQ_ENABLED
);
5870 if (atomic_cmpxchg(&buffer
->state
, QETH_QDIO_BUF_PRIMED
,
5871 QETH_QDIO_BUF_PENDING
) ==
5872 QETH_QDIO_BUF_PRIMED
)
5873 qeth_notify_skbs(queue
, buffer
, TX_NOTIFY_PENDING
);
5875 QETH_CARD_TEXT_(card
, 5, "pel%u", bidx
);
5877 /* prepare the queue slot for re-use: */
5878 qeth_scrub_qdio_buffer(buffer
->buffer
, queue
->max_elements
);
5879 if (qeth_init_qdio_out_buf(queue
, bidx
)) {
5880 QETH_CARD_TEXT(card
, 2, "outofbuf");
5881 qeth_schedule_recovery(card
);
5887 if (card
->options
.cq
== QETH_CQ_ENABLED
)
5888 qeth_notify_skbs(queue
, buffer
,
5889 qeth_compute_cq_notification(sflags
, 0));
5890 qeth_clear_output_buffer(queue
, buffer
, error
, budget
);
5893 static int qeth_tx_poll(struct napi_struct
*napi
, int budget
)
5895 struct qeth_qdio_out_q
*queue
= qeth_napi_to_out_queue(napi
);
5896 unsigned int queue_no
= queue
->queue_no
;
5897 struct qeth_card
*card
= queue
->card
;
5898 struct net_device
*dev
= card
->dev
;
5899 unsigned int work_done
= 0;
5900 struct netdev_queue
*txq
;
5902 txq
= netdev_get_tx_queue(dev
, qeth_iqd_translate_txq(dev
, queue_no
));
5905 unsigned int start
, error
, i
;
5906 unsigned int packets
= 0;
5907 unsigned int bytes
= 0;
5910 if (qeth_out_queue_is_empty(queue
)) {
5911 napi_complete(napi
);
5915 /* Give the CPU a breather: */
5916 if (work_done
>= QDIO_MAX_BUFFERS_PER_Q
) {
5917 QETH_TXQ_STAT_INC(queue
, completion_yield
);
5918 if (napi_complete_done(napi
, 0))
5919 napi_schedule(napi
);
5923 completed
= qdio_inspect_queue(CARD_DDEV(card
), queue_no
, false,
5925 if (completed
<= 0) {
5926 /* Ensure we see TX completion for pending work: */
5927 if (napi_complete_done(napi
, 0))
5928 qeth_tx_arm_timer(queue
, QETH_TX_TIMER_USECS
);
5932 for (i
= start
; i
< start
+ completed
; i
++) {
5933 struct qeth_qdio_out_buffer
*buffer
;
5934 unsigned int bidx
= QDIO_BUFNR(i
);
5936 buffer
= queue
->bufs
[bidx
];
5937 packets
+= buffer
->frames
;
5938 bytes
+= buffer
->bytes
;
5940 qeth_handle_send_error(card
, buffer
, error
);
5941 qeth_iqd_tx_complete(queue
, bidx
, error
, budget
);
5942 qeth_cleanup_handled_pending(queue
, bidx
, false);
5945 netdev_tx_completed_queue(txq
, packets
, bytes
);
5946 atomic_sub(completed
, &queue
->used_buffers
);
5947 work_done
+= completed
;
5949 /* xmit may have observed the full-condition, but not yet
5950 * stopped the txq. In which case the code below won't trigger.
5951 * So before returning, xmit will re-check the txq's fill level
5952 * and wake it up if needed.
5954 if (netif_tx_queue_stopped(txq
) &&
5955 !qeth_out_queue_is_full(queue
))
5956 netif_tx_wake_queue(txq
);
5960 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd
*cmd
)
5962 if (!cmd
->hdr
.return_code
)
5963 cmd
->hdr
.return_code
= cmd
->data
.setassparms
.hdr
.return_code
;
5964 return cmd
->hdr
.return_code
;
5967 static int qeth_setassparms_get_caps_cb(struct qeth_card
*card
,
5968 struct qeth_reply
*reply
,
5971 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
5972 struct qeth_ipa_caps
*caps
= reply
->param
;
5974 if (qeth_setassparms_inspect_rc(cmd
))
5977 caps
->supported
= cmd
->data
.setassparms
.data
.caps
.supported
;
5978 caps
->enabled
= cmd
->data
.setassparms
.data
.caps
.enabled
;
5982 int qeth_setassparms_cb(struct qeth_card
*card
,
5983 struct qeth_reply
*reply
, unsigned long data
)
5985 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
5987 QETH_CARD_TEXT(card
, 4, "defadpcb");
5989 if (cmd
->hdr
.return_code
)
5992 cmd
->hdr
.return_code
= cmd
->data
.setassparms
.hdr
.return_code
;
5993 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
5994 card
->options
.ipa4
.enabled
= cmd
->hdr
.assists
.enabled
;
5995 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
5996 card
->options
.ipa6
.enabled
= cmd
->hdr
.assists
.enabled
;
5999 EXPORT_SYMBOL_GPL(qeth_setassparms_cb
);
6001 struct qeth_cmd_buffer
*qeth_get_setassparms_cmd(struct qeth_card
*card
,
6002 enum qeth_ipa_funcs ipa_func
,
6004 unsigned int data_length
,
6005 enum qeth_prot_versions prot
)
6007 struct qeth_ipacmd_setassparms
*setassparms
;
6008 struct qeth_ipacmd_setassparms_hdr
*hdr
;
6009 struct qeth_cmd_buffer
*iob
;
6011 QETH_CARD_TEXT(card
, 4, "getasscm");
6012 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SETASSPARMS
, prot
,
6014 offsetof(struct qeth_ipacmd_setassparms
,
6019 setassparms
= &__ipa_cmd(iob
)->data
.setassparms
;
6020 setassparms
->assist_no
= ipa_func
;
6022 hdr
= &setassparms
->hdr
;
6023 hdr
->length
= sizeof(*hdr
) + data_length
;
6024 hdr
->command_code
= cmd_code
;
6027 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd
);
6029 int qeth_send_simple_setassparms_prot(struct qeth_card
*card
,
6030 enum qeth_ipa_funcs ipa_func
,
6031 u16 cmd_code
, u32
*data
,
6032 enum qeth_prot_versions prot
)
6034 unsigned int length
= data
? SETASS_DATA_SIZEOF(flags_32bit
) : 0;
6035 struct qeth_cmd_buffer
*iob
;
6037 QETH_CARD_TEXT_(card
, 4, "simassp%i", prot
);
6038 iob
= qeth_get_setassparms_cmd(card
, ipa_func
, cmd_code
, length
, prot
);
6043 __ipa_cmd(iob
)->data
.setassparms
.data
.flags_32bit
= *data
;
6044 return qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_cb
, NULL
);
6046 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot
);
6048 static void qeth_unregister_dbf_views(void)
6052 for (x
= 0; x
< QETH_DBF_INFOS
; x
++) {
6053 debug_unregister(qeth_dbf
[x
].id
);
6054 qeth_dbf
[x
].id
= NULL
;
6058 void qeth_dbf_longtext(debug_info_t
*id
, int level
, char *fmt
, ...)
6060 char dbf_txt_buf
[32];
6063 if (!debug_level_enabled(id
, level
))
6065 va_start(args
, fmt
);
6066 vsnprintf(dbf_txt_buf
, sizeof(dbf_txt_buf
), fmt
, args
);
6068 debug_text_event(id
, level
, dbf_txt_buf
);
6070 EXPORT_SYMBOL_GPL(qeth_dbf_longtext
);
6072 static int qeth_register_dbf_views(void)
6077 for (x
= 0; x
< QETH_DBF_INFOS
; x
++) {
6078 /* register the areas */
6079 qeth_dbf
[x
].id
= debug_register(qeth_dbf
[x
].name
,
6083 if (qeth_dbf
[x
].id
== NULL
) {
6084 qeth_unregister_dbf_views();
6088 /* register a view */
6089 ret
= debug_register_view(qeth_dbf
[x
].id
, qeth_dbf
[x
].view
);
6091 qeth_unregister_dbf_views();
6095 /* set a passing level */
6096 debug_set_level(qeth_dbf
[x
].id
, qeth_dbf
[x
].level
);
6102 static DEFINE_MUTEX(qeth_mod_mutex
); /* for synchronized module loading */
6104 int qeth_core_load_discipline(struct qeth_card
*card
,
6105 enum qeth_discipline_id discipline
)
6107 mutex_lock(&qeth_mod_mutex
);
6108 switch (discipline
) {
6109 case QETH_DISCIPLINE_LAYER3
:
6110 card
->discipline
= try_then_request_module(
6111 symbol_get(qeth_l3_discipline
), "qeth_l3");
6113 case QETH_DISCIPLINE_LAYER2
:
6114 card
->discipline
= try_then_request_module(
6115 symbol_get(qeth_l2_discipline
), "qeth_l2");
6120 mutex_unlock(&qeth_mod_mutex
);
6122 if (!card
->discipline
) {
6123 dev_err(&card
->gdev
->dev
, "There is no kernel module to "
6124 "support discipline %d\n", discipline
);
6128 card
->options
.layer
= discipline
;
6132 void qeth_core_free_discipline(struct qeth_card
*card
)
6134 if (IS_LAYER2(card
))
6135 symbol_put(qeth_l2_discipline
);
6137 symbol_put(qeth_l3_discipline
);
6138 card
->options
.layer
= QETH_DISCIPLINE_UNDETERMINED
;
6139 card
->discipline
= NULL
;
6142 const struct device_type qeth_generic_devtype
= {
6143 .name
= "qeth_generic",
6144 .groups
= qeth_generic_attr_groups
,
6146 EXPORT_SYMBOL_GPL(qeth_generic_devtype
);
6148 static const struct device_type qeth_osn_devtype
= {
6150 .groups
= qeth_osn_attr_groups
,
6153 #define DBF_NAME_LEN 20
6155 struct qeth_dbf_entry
{
6156 char dbf_name
[DBF_NAME_LEN
];
6157 debug_info_t
*dbf_info
;
6158 struct list_head dbf_list
;
6161 static LIST_HEAD(qeth_dbf_list
);
6162 static DEFINE_MUTEX(qeth_dbf_list_mutex
);
6164 static debug_info_t
*qeth_get_dbf_entry(char *name
)
6166 struct qeth_dbf_entry
*entry
;
6167 debug_info_t
*rc
= NULL
;
6169 mutex_lock(&qeth_dbf_list_mutex
);
6170 list_for_each_entry(entry
, &qeth_dbf_list
, dbf_list
) {
6171 if (strcmp(entry
->dbf_name
, name
) == 0) {
6172 rc
= entry
->dbf_info
;
6176 mutex_unlock(&qeth_dbf_list_mutex
);
6180 static int qeth_add_dbf_entry(struct qeth_card
*card
, char *name
)
6182 struct qeth_dbf_entry
*new_entry
;
6184 card
->debug
= debug_register(name
, 2, 1, 8);
6186 QETH_DBF_TEXT_(SETUP
, 2, "%s", "qcdbf");
6189 if (debug_register_view(card
->debug
, &debug_hex_ascii_view
))
6191 new_entry
= kzalloc(sizeof(struct qeth_dbf_entry
), GFP_KERNEL
);
6194 strncpy(new_entry
->dbf_name
, name
, DBF_NAME_LEN
);
6195 new_entry
->dbf_info
= card
->debug
;
6196 mutex_lock(&qeth_dbf_list_mutex
);
6197 list_add(&new_entry
->dbf_list
, &qeth_dbf_list
);
6198 mutex_unlock(&qeth_dbf_list_mutex
);
6203 debug_unregister(card
->debug
);
6208 static void qeth_clear_dbf_list(void)
6210 struct qeth_dbf_entry
*entry
, *tmp
;
6212 mutex_lock(&qeth_dbf_list_mutex
);
6213 list_for_each_entry_safe(entry
, tmp
, &qeth_dbf_list
, dbf_list
) {
6214 list_del(&entry
->dbf_list
);
6215 debug_unregister(entry
->dbf_info
);
6218 mutex_unlock(&qeth_dbf_list_mutex
);
6221 static struct net_device
*qeth_alloc_netdev(struct qeth_card
*card
)
6223 struct net_device
*dev
;
6224 struct qeth_priv
*priv
;
6226 switch (card
->info
.type
) {
6227 case QETH_CARD_TYPE_IQD
:
6228 dev
= alloc_netdev_mqs(sizeof(*priv
), "hsi%d", NET_NAME_UNKNOWN
,
6229 ether_setup
, QETH_MAX_OUT_QUEUES
, 1);
6231 case QETH_CARD_TYPE_OSM
:
6232 dev
= alloc_etherdev(sizeof(*priv
));
6234 case QETH_CARD_TYPE_OSN
:
6235 dev
= alloc_netdev(sizeof(*priv
), "osn%d", NET_NAME_UNKNOWN
,
6239 dev
= alloc_etherdev_mqs(sizeof(*priv
), QETH_MAX_OUT_QUEUES
, 1);
6245 priv
= netdev_priv(dev
);
6246 priv
->rx_copybreak
= QETH_RX_COPYBREAK
;
6247 priv
->tx_wanted_queues
= IS_IQD(card
) ? QETH_IQD_MIN_TXQ
: 1;
6249 dev
->ml_priv
= card
;
6250 dev
->watchdog_timeo
= QETH_TX_TIMEOUT
;
6251 dev
->min_mtu
= IS_OSN(card
) ? 64 : 576;
6252 /* initialized when device first goes online: */
6255 SET_NETDEV_DEV(dev
, &card
->gdev
->dev
);
6256 netif_carrier_off(dev
);
6259 dev
->ethtool_ops
= &qeth_osn_ethtool_ops
;
6261 dev
->ethtool_ops
= &qeth_ethtool_ops
;
6262 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
6263 dev
->hw_features
|= NETIF_F_SG
;
6264 dev
->vlan_features
|= NETIF_F_SG
;
6266 dev
->features
|= NETIF_F_SG
;
6272 struct net_device
*qeth_clone_netdev(struct net_device
*orig
)
6274 struct net_device
*clone
= qeth_alloc_netdev(orig
->ml_priv
);
6279 clone
->dev_port
= orig
->dev_port
;
6283 static int qeth_core_probe_device(struct ccwgroup_device
*gdev
)
6285 struct qeth_card
*card
;
6288 enum qeth_discipline_id enforced_disc
;
6289 char dbf_name
[DBF_NAME_LEN
];
6291 QETH_DBF_TEXT(SETUP
, 2, "probedev");
6294 if (!get_device(dev
))
6297 QETH_DBF_TEXT_(SETUP
, 2, "%s", dev_name(&gdev
->dev
));
6299 card
= qeth_alloc_card(gdev
);
6301 QETH_DBF_TEXT_(SETUP
, 2, "1err%d", -ENOMEM
);
6306 snprintf(dbf_name
, sizeof(dbf_name
), "qeth_card_%s",
6307 dev_name(&gdev
->dev
));
6308 card
->debug
= qeth_get_dbf_entry(dbf_name
);
6310 rc
= qeth_add_dbf_entry(card
, dbf_name
);
6315 qeth_setup_card(card
);
6316 card
->dev
= qeth_alloc_netdev(card
);
6322 qeth_determine_capabilities(card
);
6323 qeth_set_blkt_defaults(card
);
6325 card
->qdio
.no_out_queues
= card
->dev
->num_tx_queues
;
6326 rc
= qeth_update_from_chp_desc(card
);
6330 enforced_disc
= qeth_enforce_discipline(card
);
6331 switch (enforced_disc
) {
6332 case QETH_DISCIPLINE_UNDETERMINED
:
6333 gdev
->dev
.type
= &qeth_generic_devtype
;
6336 card
->info
.layer_enforced
= true;
6337 rc
= qeth_core_load_discipline(card
, enforced_disc
);
6341 gdev
->dev
.type
= IS_OSN(card
) ? &qeth_osn_devtype
:
6342 card
->discipline
->devtype
;
6343 rc
= card
->discipline
->setup(card
->gdev
);
6352 qeth_core_free_discipline(card
);
6355 free_netdev(card
->dev
);
6357 qeth_core_free_card(card
);
6363 static void qeth_core_remove_device(struct ccwgroup_device
*gdev
)
6365 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6367 QETH_CARD_TEXT(card
, 2, "removedv");
6369 if (card
->discipline
) {
6370 card
->discipline
->remove(gdev
);
6371 qeth_core_free_discipline(card
);
6374 qeth_free_qdio_queues(card
);
6376 free_netdev(card
->dev
);
6377 qeth_core_free_card(card
);
6378 put_device(&gdev
->dev
);
6381 static int qeth_core_set_online(struct ccwgroup_device
*gdev
)
6383 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6385 enum qeth_discipline_id def_discipline
;
6387 if (!card
->discipline
) {
6388 def_discipline
= IS_IQD(card
) ? QETH_DISCIPLINE_LAYER3
:
6389 QETH_DISCIPLINE_LAYER2
;
6390 rc
= qeth_core_load_discipline(card
, def_discipline
);
6393 rc
= card
->discipline
->setup(card
->gdev
);
6395 qeth_core_free_discipline(card
);
6400 rc
= qeth_set_online(card
);
6405 static int qeth_core_set_offline(struct ccwgroup_device
*gdev
)
6407 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6409 return qeth_set_offline(card
, false);
6412 static void qeth_core_shutdown(struct ccwgroup_device
*gdev
)
6414 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6416 qeth_set_allowed_threads(card
, 0, 1);
6417 if ((gdev
->state
== CCWGROUP_ONLINE
) && card
->info
.hwtrap
)
6418 qeth_hw_trap(card
, QETH_DIAGS_TRAP_DISARM
);
6419 qeth_qdio_clear_card(card
, 0);
6420 qeth_drain_output_queues(card
);
6421 qdio_free(CARD_DDEV(card
));
6424 static ssize_t
group_store(struct device_driver
*ddrv
, const char *buf
,
6429 err
= ccwgroup_create_dev(qeth_core_root_dev
, to_ccwgroupdrv(ddrv
), 3,
6432 return err
? err
: count
;
6434 static DRIVER_ATTR_WO(group
);
6436 static struct attribute
*qeth_drv_attrs
[] = {
6437 &driver_attr_group
.attr
,
6440 static struct attribute_group qeth_drv_attr_group
= {
6441 .attrs
= qeth_drv_attrs
,
6443 static const struct attribute_group
*qeth_drv_attr_groups
[] = {
6444 &qeth_drv_attr_group
,
6448 static struct ccwgroup_driver qeth_core_ccwgroup_driver
= {
6450 .groups
= qeth_drv_attr_groups
,
6451 .owner
= THIS_MODULE
,
6454 .ccw_driver
= &qeth_ccw_driver
,
6455 .setup
= qeth_core_probe_device
,
6456 .remove
= qeth_core_remove_device
,
6457 .set_online
= qeth_core_set_online
,
6458 .set_offline
= qeth_core_set_offline
,
6459 .shutdown
= qeth_core_shutdown
,
6462 struct qeth_card
*qeth_get_card_by_busid(char *bus_id
)
6464 struct ccwgroup_device
*gdev
;
6465 struct qeth_card
*card
;
6467 gdev
= get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver
, bus_id
);
6471 card
= dev_get_drvdata(&gdev
->dev
);
6472 put_device(&gdev
->dev
);
6475 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid
);
6477 int qeth_do_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
6479 struct qeth_card
*card
= dev
->ml_priv
;
6480 struct mii_ioctl_data
*mii_data
;
6484 case SIOC_QETH_ADP_SET_SNMP_CONTROL
:
6485 rc
= qeth_snmp_command(card
, rq
->ifr_ifru
.ifru_data
);
6487 case SIOC_QETH_GET_CARD_TYPE
:
6488 if ((IS_OSD(card
) || IS_OSM(card
) || IS_OSX(card
)) &&
6493 mii_data
= if_mii(rq
);
6494 mii_data
->phy_id
= 0;
6497 mii_data
= if_mii(rq
);
6498 if (mii_data
->phy_id
!= 0)
6501 mii_data
->val_out
= qeth_mdio_read(dev
,
6502 mii_data
->phy_id
, mii_data
->reg_num
);
6504 case SIOC_QETH_QUERY_OAT
:
6505 rc
= qeth_query_oat_command(card
, rq
->ifr_ifru
.ifru_data
);
6508 if (card
->discipline
->do_ioctl
)
6509 rc
= card
->discipline
->do_ioctl(dev
, rq
, cmd
);
6514 QETH_CARD_TEXT_(card
, 2, "ioce%x", rc
);
6517 EXPORT_SYMBOL_GPL(qeth_do_ioctl
);
6519 static int qeth_start_csum_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
6522 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6523 u32
*features
= reply
->param
;
6525 if (qeth_setassparms_inspect_rc(cmd
))
6528 *features
= cmd
->data
.setassparms
.data
.flags_32bit
;
6532 static int qeth_set_csum_off(struct qeth_card
*card
, enum qeth_ipa_funcs cstype
,
6533 enum qeth_prot_versions prot
)
6535 return qeth_send_simple_setassparms_prot(card
, cstype
, IPA_CMD_ASS_STOP
,
6539 static int qeth_set_csum_on(struct qeth_card
*card
, enum qeth_ipa_funcs cstype
,
6540 enum qeth_prot_versions prot
, u8
*lp2lp
)
6542 u32 required_features
= QETH_IPA_CHECKSUM_UDP
| QETH_IPA_CHECKSUM_TCP
;
6543 struct qeth_cmd_buffer
*iob
;
6544 struct qeth_ipa_caps caps
;
6548 /* some L3 HW requires combined L3+L4 csum offload: */
6549 if (IS_LAYER3(card
) && prot
== QETH_PROT_IPV4
&&
6550 cstype
== IPA_OUTBOUND_CHECKSUM
)
6551 required_features
|= QETH_IPA_CHECKSUM_IP_HDR
;
6553 iob
= qeth_get_setassparms_cmd(card
, cstype
, IPA_CMD_ASS_START
, 0,
6558 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_start_csum_cb
, &features
);
6562 if ((required_features
& features
) != required_features
) {
6563 qeth_set_csum_off(card
, cstype
, prot
);
6567 iob
= qeth_get_setassparms_cmd(card
, cstype
, IPA_CMD_ASS_ENABLE
,
6568 SETASS_DATA_SIZEOF(flags_32bit
),
6571 qeth_set_csum_off(card
, cstype
, prot
);
6575 if (features
& QETH_IPA_CHECKSUM_LP2LP
)
6576 required_features
|= QETH_IPA_CHECKSUM_LP2LP
;
6577 __ipa_cmd(iob
)->data
.setassparms
.data
.flags_32bit
= required_features
;
6578 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_get_caps_cb
, &caps
);
6580 qeth_set_csum_off(card
, cstype
, prot
);
6584 if (!qeth_ipa_caps_supported(&caps
, required_features
) ||
6585 !qeth_ipa_caps_enabled(&caps
, required_features
)) {
6586 qeth_set_csum_off(card
, cstype
, prot
);
6590 dev_info(&card
->gdev
->dev
, "HW Checksumming (%sbound IPv%d) enabled\n",
6591 cstype
== IPA_INBOUND_CHECKSUM
? "in" : "out", prot
);
6594 *lp2lp
= qeth_ipa_caps_enabled(&caps
, QETH_IPA_CHECKSUM_LP2LP
);
6599 static int qeth_set_ipa_csum(struct qeth_card
*card
, bool on
, int cstype
,
6600 enum qeth_prot_versions prot
, u8
*lp2lp
)
6602 return on
? qeth_set_csum_on(card
, cstype
, prot
, lp2lp
) :
6603 qeth_set_csum_off(card
, cstype
, prot
);
6606 static int qeth_start_tso_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
6609 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6610 struct qeth_tso_start_data
*tso_data
= reply
->param
;
6612 if (qeth_setassparms_inspect_rc(cmd
))
6615 tso_data
->mss
= cmd
->data
.setassparms
.data
.tso
.mss
;
6616 tso_data
->supported
= cmd
->data
.setassparms
.data
.tso
.supported
;
6620 static int qeth_set_tso_off(struct qeth_card
*card
,
6621 enum qeth_prot_versions prot
)
6623 return qeth_send_simple_setassparms_prot(card
, IPA_OUTBOUND_TSO
,
6624 IPA_CMD_ASS_STOP
, NULL
, prot
);
6627 static int qeth_set_tso_on(struct qeth_card
*card
,
6628 enum qeth_prot_versions prot
)
6630 struct qeth_tso_start_data tso_data
;
6631 struct qeth_cmd_buffer
*iob
;
6632 struct qeth_ipa_caps caps
;
6635 iob
= qeth_get_setassparms_cmd(card
, IPA_OUTBOUND_TSO
,
6636 IPA_CMD_ASS_START
, 0, prot
);
6640 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_start_tso_cb
, &tso_data
);
6644 if (!tso_data
.mss
|| !(tso_data
.supported
& QETH_IPA_LARGE_SEND_TCP
)) {
6645 qeth_set_tso_off(card
, prot
);
6649 iob
= qeth_get_setassparms_cmd(card
, IPA_OUTBOUND_TSO
,
6651 SETASS_DATA_SIZEOF(caps
), prot
);
6653 qeth_set_tso_off(card
, prot
);
6657 /* enable TSO capability */
6658 __ipa_cmd(iob
)->data
.setassparms
.data
.caps
.enabled
=
6659 QETH_IPA_LARGE_SEND_TCP
;
6660 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_get_caps_cb
, &caps
);
6662 qeth_set_tso_off(card
, prot
);
6666 if (!qeth_ipa_caps_supported(&caps
, QETH_IPA_LARGE_SEND_TCP
) ||
6667 !qeth_ipa_caps_enabled(&caps
, QETH_IPA_LARGE_SEND_TCP
)) {
6668 qeth_set_tso_off(card
, prot
);
6672 dev_info(&card
->gdev
->dev
, "TSOv%u enabled (MSS: %u)\n", prot
,
6677 static int qeth_set_ipa_tso(struct qeth_card
*card
, bool on
,
6678 enum qeth_prot_versions prot
)
6680 return on
? qeth_set_tso_on(card
, prot
) : qeth_set_tso_off(card
, prot
);
6683 static int qeth_set_ipa_rx_csum(struct qeth_card
*card
, bool on
)
6685 int rc_ipv4
= (on
) ? -EOPNOTSUPP
: 0;
6688 if (qeth_is_supported(card
, IPA_INBOUND_CHECKSUM
))
6689 rc_ipv4
= qeth_set_ipa_csum(card
, on
, IPA_INBOUND_CHECKSUM
,
6690 QETH_PROT_IPV4
, NULL
);
6691 if (!qeth_is_supported6(card
, IPA_INBOUND_CHECKSUM_V6
))
6692 /* no/one Offload Assist available, so the rc is trivial */
6695 rc_ipv6
= qeth_set_ipa_csum(card
, on
, IPA_INBOUND_CHECKSUM
,
6696 QETH_PROT_IPV6
, NULL
);
6699 /* enable: success if any Assist is active */
6700 return (rc_ipv6
) ? rc_ipv4
: 0;
6702 /* disable: failure if any Assist is still active */
6703 return (rc_ipv6
) ? rc_ipv6
: rc_ipv4
;
6707 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6708 * @dev: a net_device
6710 void qeth_enable_hw_features(struct net_device
*dev
)
6712 struct qeth_card
*card
= dev
->ml_priv
;
6713 netdev_features_t features
;
6715 features
= dev
->features
;
6716 /* force-off any feature that might need an IPA sequence.
6717 * netdev_update_features() will restart them.
6719 dev
->features
&= ~dev
->hw_features
;
6720 /* toggle VLAN filter, so that VIDs are re-programmed: */
6721 if (IS_LAYER2(card
) && IS_VM_NIC(card
)) {
6722 dev
->features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
6723 dev
->wanted_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
6725 netdev_update_features(dev
);
6726 if (features
!= dev
->features
)
6727 dev_warn(&card
->gdev
->dev
,
6728 "Device recovery failed to restore all offload features\n");
6730 EXPORT_SYMBOL_GPL(qeth_enable_hw_features
);
6732 static void qeth_check_restricted_features(struct qeth_card
*card
,
6733 netdev_features_t changed
,
6734 netdev_features_t actual
)
6736 netdev_features_t ipv6_features
= NETIF_F_TSO6
;
6737 netdev_features_t ipv4_features
= NETIF_F_TSO
;
6739 if (!card
->info
.has_lp2lp_cso_v6
)
6740 ipv6_features
|= NETIF_F_IPV6_CSUM
;
6741 if (!card
->info
.has_lp2lp_cso_v4
)
6742 ipv4_features
|= NETIF_F_IP_CSUM
;
6744 if ((changed
& ipv6_features
) && !(actual
& ipv6_features
))
6745 qeth_flush_local_addrs6(card
);
6746 if ((changed
& ipv4_features
) && !(actual
& ipv4_features
))
6747 qeth_flush_local_addrs4(card
);
6750 int qeth_set_features(struct net_device
*dev
, netdev_features_t features
)
6752 struct qeth_card
*card
= dev
->ml_priv
;
6753 netdev_features_t changed
= dev
->features
^ features
;
6756 QETH_CARD_TEXT(card
, 2, "setfeat");
6757 QETH_CARD_HEX(card
, 2, &features
, sizeof(features
));
6759 if ((changed
& NETIF_F_IP_CSUM
)) {
6760 rc
= qeth_set_ipa_csum(card
, features
& NETIF_F_IP_CSUM
,
6761 IPA_OUTBOUND_CHECKSUM
, QETH_PROT_IPV4
,
6762 &card
->info
.has_lp2lp_cso_v4
);
6764 changed
^= NETIF_F_IP_CSUM
;
6766 if (changed
& NETIF_F_IPV6_CSUM
) {
6767 rc
= qeth_set_ipa_csum(card
, features
& NETIF_F_IPV6_CSUM
,
6768 IPA_OUTBOUND_CHECKSUM
, QETH_PROT_IPV6
,
6769 &card
->info
.has_lp2lp_cso_v6
);
6771 changed
^= NETIF_F_IPV6_CSUM
;
6773 if (changed
& NETIF_F_RXCSUM
) {
6774 rc
= qeth_set_ipa_rx_csum(card
, features
& NETIF_F_RXCSUM
);
6776 changed
^= NETIF_F_RXCSUM
;
6778 if (changed
& NETIF_F_TSO
) {
6779 rc
= qeth_set_ipa_tso(card
, features
& NETIF_F_TSO
,
6782 changed
^= NETIF_F_TSO
;
6784 if (changed
& NETIF_F_TSO6
) {
6785 rc
= qeth_set_ipa_tso(card
, features
& NETIF_F_TSO6
,
6788 changed
^= NETIF_F_TSO6
;
6791 qeth_check_restricted_features(card
, dev
->features
^ features
,
6792 dev
->features
^ changed
);
6794 /* everything changed successfully? */
6795 if ((dev
->features
^ features
) == changed
)
6797 /* something went wrong. save changed features and return error */
6798 dev
->features
^= changed
;
6801 EXPORT_SYMBOL_GPL(qeth_set_features
);
6803 netdev_features_t
qeth_fix_features(struct net_device
*dev
,
6804 netdev_features_t features
)
6806 struct qeth_card
*card
= dev
->ml_priv
;
6808 QETH_CARD_TEXT(card
, 2, "fixfeat");
6809 if (!qeth_is_supported(card
, IPA_OUTBOUND_CHECKSUM
))
6810 features
&= ~NETIF_F_IP_CSUM
;
6811 if (!qeth_is_supported6(card
, IPA_OUTBOUND_CHECKSUM_V6
))
6812 features
&= ~NETIF_F_IPV6_CSUM
;
6813 if (!qeth_is_supported(card
, IPA_INBOUND_CHECKSUM
) &&
6814 !qeth_is_supported6(card
, IPA_INBOUND_CHECKSUM_V6
))
6815 features
&= ~NETIF_F_RXCSUM
;
6816 if (!qeth_is_supported(card
, IPA_OUTBOUND_TSO
))
6817 features
&= ~NETIF_F_TSO
;
6818 if (!qeth_is_supported6(card
, IPA_OUTBOUND_TSO
))
6819 features
&= ~NETIF_F_TSO6
;
6821 QETH_CARD_HEX(card
, 2, &features
, sizeof(features
));
6824 EXPORT_SYMBOL_GPL(qeth_fix_features
);
6826 netdev_features_t
qeth_features_check(struct sk_buff
*skb
,
6827 struct net_device
*dev
,
6828 netdev_features_t features
)
6830 struct qeth_card
*card
= dev
->ml_priv
;
6832 /* Traffic with local next-hop is not eligible for some offloads: */
6833 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
6834 READ_ONCE(card
->options
.isolation
) != ISOLATION_MODE_FWD
) {
6835 netdev_features_t restricted
= 0;
6837 if (skb_is_gso(skb
) && !netif_needs_gso(skb
, features
))
6838 restricted
|= NETIF_F_ALL_TSO
;
6840 switch (vlan_get_protocol(skb
)) {
6841 case htons(ETH_P_IP
):
6842 if (!card
->info
.has_lp2lp_cso_v4
)
6843 restricted
|= NETIF_F_IP_CSUM
;
6845 if (restricted
&& qeth_next_hop_is_local_v4(card
, skb
))
6846 features
&= ~restricted
;
6848 case htons(ETH_P_IPV6
):
6849 if (!card
->info
.has_lp2lp_cso_v6
)
6850 restricted
|= NETIF_F_IPV6_CSUM
;
6852 if (restricted
&& qeth_next_hop_is_local_v6(card
, skb
))
6853 features
&= ~restricted
;
6860 /* GSO segmentation builds skbs with
6861 * a (small) linear part for the headers, and
6862 * page frags for the data.
6863 * Compared to a linear skb, the header-only part consumes an
6864 * additional buffer element. This reduces buffer utilization, and
6865 * hurts throughput. So compress small segments into one element.
6867 if (netif_needs_gso(skb
, features
)) {
6868 /* match skb_segment(): */
6869 unsigned int doffset
= skb
->data
- skb_mac_header(skb
);
6870 unsigned int hsize
= skb_shinfo(skb
)->gso_size
;
6871 unsigned int hroom
= skb_headroom(skb
);
6873 /* linearize only if resulting skb allocations are order-0: */
6874 if (SKB_DATA_ALIGN(hroom
+ doffset
+ hsize
) <= SKB_MAX_HEAD(0))
6875 features
&= ~NETIF_F_SG
;
6878 return vlan_features_check(skb
, features
);
6880 EXPORT_SYMBOL_GPL(qeth_features_check
);
6882 void qeth_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6884 struct qeth_card
*card
= dev
->ml_priv
;
6885 struct qeth_qdio_out_q
*queue
;
6888 QETH_CARD_TEXT(card
, 5, "getstat");
6890 stats
->rx_packets
= card
->stats
.rx_packets
;
6891 stats
->rx_bytes
= card
->stats
.rx_bytes
;
6892 stats
->rx_errors
= card
->stats
.rx_length_errors
+
6893 card
->stats
.rx_frame_errors
+
6894 card
->stats
.rx_fifo_errors
;
6895 stats
->rx_dropped
= card
->stats
.rx_dropped_nomem
+
6896 card
->stats
.rx_dropped_notsupp
+
6897 card
->stats
.rx_dropped_runt
;
6898 stats
->multicast
= card
->stats
.rx_multicast
;
6899 stats
->rx_length_errors
= card
->stats
.rx_length_errors
;
6900 stats
->rx_frame_errors
= card
->stats
.rx_frame_errors
;
6901 stats
->rx_fifo_errors
= card
->stats
.rx_fifo_errors
;
6903 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++) {
6904 queue
= card
->qdio
.out_qs
[i
];
6906 stats
->tx_packets
+= queue
->stats
.tx_packets
;
6907 stats
->tx_bytes
+= queue
->stats
.tx_bytes
;
6908 stats
->tx_errors
+= queue
->stats
.tx_errors
;
6909 stats
->tx_dropped
+= queue
->stats
.tx_dropped
;
6912 EXPORT_SYMBOL_GPL(qeth_get_stats64
);
6914 #define TC_IQD_UCAST 0
6915 static void qeth_iqd_set_prio_tc_map(struct net_device
*dev
,
6916 unsigned int ucast_txqs
)
6920 /* IQD requires mcast traffic to be placed on a dedicated queue, and
6921 * qeth_iqd_select_queue() deals with this.
6922 * For unicast traffic, we defer the queue selection to the stack.
6923 * By installing a trivial prio map that spans over only the unicast
6924 * queues, we can encourage the stack to spread the ucast traffic evenly
6925 * without selecting the mcast queue.
6928 /* One traffic class, spanning over all active ucast queues: */
6929 netdev_set_num_tc(dev
, 1);
6930 netdev_set_tc_queue(dev
, TC_IQD_UCAST
, ucast_txqs
,
6931 QETH_IQD_MIN_UCAST_TXQ
);
6933 /* Map all priorities to this traffic class: */
6934 for (prio
= 0; prio
<= TC_BITMASK
; prio
++)
6935 netdev_set_prio_tc_map(dev
, prio
, TC_IQD_UCAST
);
6938 int qeth_set_real_num_tx_queues(struct qeth_card
*card
, unsigned int count
)
6940 struct net_device
*dev
= card
->dev
;
6943 /* Per netif_setup_tc(), adjust the mapping first: */
6945 qeth_iqd_set_prio_tc_map(dev
, count
- 1);
6947 rc
= netif_set_real_num_tx_queues(dev
, count
);
6949 if (rc
&& IS_IQD(card
))
6950 qeth_iqd_set_prio_tc_map(dev
, dev
->real_num_tx_queues
- 1);
6954 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues
);
6956 u16
qeth_iqd_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
6957 u8 cast_type
, struct net_device
*sb_dev
)
6961 if (cast_type
!= RTN_UNICAST
)
6962 return QETH_IQD_MCAST_TXQ
;
6963 if (dev
->real_num_tx_queues
== QETH_IQD_MIN_TXQ
)
6964 return QETH_IQD_MIN_UCAST_TXQ
;
6966 txq
= netdev_pick_tx(dev
, skb
, sb_dev
);
6967 return (txq
== QETH_IQD_MCAST_TXQ
) ? QETH_IQD_MIN_UCAST_TXQ
: txq
;
6969 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue
);
6971 int qeth_open(struct net_device
*dev
)
6973 struct qeth_card
*card
= dev
->ml_priv
;
6975 QETH_CARD_TEXT(card
, 4, "qethopen");
6977 card
->data
.state
= CH_STATE_UP
;
6978 netif_tx_start_all_queues(dev
);
6980 napi_enable(&card
->napi
);
6982 napi_schedule(&card
->napi
);
6984 struct qeth_qdio_out_q
*queue
;
6987 qeth_for_each_output_queue(card
, queue
, i
) {
6988 netif_tx_napi_add(dev
, &queue
->napi
, qeth_tx_poll
,
6990 napi_enable(&queue
->napi
);
6991 napi_schedule(&queue
->napi
);
6994 /* kick-start the NAPI softirq: */
6998 EXPORT_SYMBOL_GPL(qeth_open
);
7000 int qeth_stop(struct net_device
*dev
)
7002 struct qeth_card
*card
= dev
->ml_priv
;
7004 QETH_CARD_TEXT(card
, 4, "qethstop");
7006 struct qeth_qdio_out_q
*queue
;
7009 /* Quiesce the NAPI instances: */
7010 qeth_for_each_output_queue(card
, queue
, i
)
7011 napi_disable(&queue
->napi
);
7013 /* Stop .ndo_start_xmit, might still access queue->napi. */
7014 netif_tx_disable(dev
);
7016 qeth_for_each_output_queue(card
, queue
, i
) {
7017 del_timer_sync(&queue
->timer
);
7018 /* Queues may get re-allocated, so remove the NAPIs. */
7019 netif_napi_del(&queue
->napi
);
7022 netif_tx_disable(dev
);
7025 napi_disable(&card
->napi
);
7026 cancel_delayed_work_sync(&card
->buffer_reclaim_work
);
7027 qdio_stop_irq(CARD_DDEV(card
));
7031 EXPORT_SYMBOL_GPL(qeth_stop
);
7033 static int __init
qeth_core_init(void)
7037 pr_info("loading core functions\n");
7039 qeth_debugfs_root
= debugfs_create_dir("qeth", NULL
);
7041 rc
= qeth_register_dbf_views();
7044 qeth_core_root_dev
= root_device_register("qeth");
7045 rc
= PTR_ERR_OR_ZERO(qeth_core_root_dev
);
7048 qeth_core_header_cache
=
7049 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE
,
7050 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE
),
7052 if (!qeth_core_header_cache
) {
7056 qeth_qdio_outbuf_cache
= kmem_cache_create("qeth_buf",
7057 sizeof(struct qeth_qdio_out_buffer
), 0, 0, NULL
);
7058 if (!qeth_qdio_outbuf_cache
) {
7062 rc
= ccw_driver_register(&qeth_ccw_driver
);
7065 rc
= ccwgroup_driver_register(&qeth_core_ccwgroup_driver
);
7072 ccw_driver_unregister(&qeth_ccw_driver
);
7074 kmem_cache_destroy(qeth_qdio_outbuf_cache
);
7076 kmem_cache_destroy(qeth_core_header_cache
);
7078 root_device_unregister(qeth_core_root_dev
);
7080 qeth_unregister_dbf_views();
7082 debugfs_remove_recursive(qeth_debugfs_root
);
7083 pr_err("Initializing the qeth device driver failed\n");
7087 static void __exit
qeth_core_exit(void)
7089 qeth_clear_dbf_list();
7090 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver
);
7091 ccw_driver_unregister(&qeth_ccw_driver
);
7092 kmem_cache_destroy(qeth_qdio_outbuf_cache
);
7093 kmem_cache_destroy(qeth_core_header_cache
);
7094 root_device_unregister(qeth_core_root_dev
);
7095 qeth_unregister_dbf_views();
7096 debugfs_remove_recursive(qeth_debugfs_root
);
7097 pr_info("core functions removed\n");
7100 module_init(qeth_core_init
);
7101 module_exit(qeth_core_exit
);
7102 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7103 MODULE_DESCRIPTION("qeth core functions");
7104 MODULE_LICENSE("GPL");