]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/s390/cio/qdio_main.c
Merge tag 'powerpc-5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-hirsute-kernel.git] / drivers / s390 / cio / qdio_main.c
CommitLineData
724117b7 1// SPDX-License-Identifier: GPL-2.0
779e6e1c 2/*
779e6e1c
JG
3 * Linux for s390 qdio support, buffer handling, qdio API and module support.
4 *
a53c8fab 5 * Copyright IBM Corp. 2000, 2008
779e6e1c
JG
6 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
7 * Jan Glauber <jang@linux.vnet.ibm.com>
8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
9 */
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/timer.h>
14#include <linux/delay.h>
5a0e3ad6 15#include <linux/gfp.h>
104ea556 16#include <linux/io.h>
60063497 17#include <linux/atomic.h>
779e6e1c
JG
18#include <asm/debug.h>
19#include <asm/qdio.h>
3ab121ab 20#include <asm/ipl.h>
779e6e1c
JG
21
22#include "cio.h"
23#include "css.h"
24#include "device.h"
25#include "qdio.h"
26#include "qdio_debug.h"
779e6e1c
JG
27
28MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30MODULE_DESCRIPTION("QDIO base support");
31MODULE_LICENSE("GPL");
32
958c0ba4
JG
33static inline int do_siga_sync(unsigned long schid,
34 unsigned int out_mask, unsigned int in_mask,
35 unsigned int fc)
779e6e1c 36{
958c0ba4
JG
37 register unsigned long __fc asm ("0") = fc;
38 register unsigned long __schid asm ("1") = schid;
779e6e1c
JG
39 register unsigned long out asm ("2") = out_mask;
40 register unsigned long in asm ("3") = in_mask;
41 int cc;
42
43 asm volatile(
44 " siga 0\n"
45 " ipm %0\n"
46 " srl %0,28\n"
47 : "=d" (cc)
48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
49 return cc;
50}
51
958c0ba4
JG
52static inline int do_siga_input(unsigned long schid, unsigned int mask,
53 unsigned int fc)
779e6e1c 54{
958c0ba4
JG
55 register unsigned long __fc asm ("0") = fc;
56 register unsigned long __schid asm ("1") = schid;
779e6e1c
JG
57 register unsigned long __mask asm ("2") = mask;
58 int cc;
59
60 asm volatile(
61 " siga 0\n"
62 " ipm %0\n"
63 " srl %0,28\n"
64 : "=d" (cc)
1549d13f 65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
779e6e1c
JG
66 return cc;
67}
68
69/**
70 * do_siga_output - perform SIGA-w/wt function
71 * @schid: subchannel id or in case of QEBSM the subchannel token
72 * @mask: which output queues to process
73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74 * @fc: function code to perform
364e3f90 75 * @aob: asynchronous operation block
779e6e1c 76 *
1549d13f 77 * Returns condition code.
779e6e1c
JG
78 * Note: For IQDC unicast queues only the highest priority queue is processed.
79 */
80static inline int do_siga_output(unsigned long schid, unsigned long mask,
104ea556 81 unsigned int *bb, unsigned int fc,
82 unsigned long aob)
779e6e1c
JG
83{
84 register unsigned long __fc asm("0") = fc;
85 register unsigned long __schid asm("1") = schid;
86 register unsigned long __mask asm("2") = mask;
104ea556 87 register unsigned long __aob asm("3") = aob;
1549d13f 88 int cc;
779e6e1c
JG
89
90 asm volatile(
91 " siga 0\n"
1549d13f 92 " ipm %0\n"
779e6e1c 93 " srl %0,28\n"
1549d13f
JG
94 : "=d" (cc), "+d" (__fc), "+d" (__aob)
95 : "d" (__schid), "d" (__mask)
96 : "cc");
97 *bb = __fc >> 31;
779e6e1c
JG
98 return cc;
99}
100
779e6e1c
JG
101/**
102 * qdio_do_eqbs - extract buffer states for QEBSM
103 * @q: queue to manipulate
104 * @state: state of the extracted buffers
105 * @start: buffer number to start at
106 * @count: count of buffers to examine
50f769df 107 * @auto_ack: automatically acknowledge buffers
779e6e1c 108 *
73ac36ea 109 * Returns the number of successfully extracted equal buffer states.
779e6e1c
JG
110 * Stops processing if a state is different from the last buffers state.
111 */
112static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
50f769df 113 int start, int count, int auto_ack)
779e6e1c 114{
88bf319f 115 int tmp_count = count, tmp_start = start, nr = q->nr;
779e6e1c 116 unsigned int ccq = 0;
779e6e1c 117
6486cda6 118 qperf_inc(q, eqbs);
779e6e1c
JG
119
120 if (!q->is_input_q)
121 nr += q->irq_ptr->nr_input_qs;
122again:
50f769df
JG
123 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
124 auto_ack);
22f99347 125
88bf319f
JW
126 switch (ccq) {
127 case 0:
128 case 32:
129 /* all done, or next buffer state different */
130 return count - tmp_count;
131 case 96:
132 /* not all buffers processed */
25f269f1
JG
133 qperf_inc(q, eqbs_partial);
134 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
135 tmp_count);
dae55b6f 136 return count - tmp_count;
88bf319f
JW
137 case 97:
138 /* no buffer processed */
139 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
140 goto again;
141 default:
142 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
143 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
144 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
145 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
146 q->first_to_kick, count, q->irq_ptr->int_parm);
147 return 0;
779e6e1c 148 }
779e6e1c
JG
149}
150
151/**
152 * qdio_do_sqbs - set buffer states for QEBSM
153 * @q: queue to manipulate
154 * @state: new state of the buffers
155 * @start: first buffer number to change
156 * @count: how many buffers to change
157 *
158 * Returns the number of successfully changed buffers.
159 * Does retrying until the specified count of buffer states is set or an
160 * error occurs.
161 */
162static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
163 int count)
164{
165 unsigned int ccq = 0;
166 int tmp_count = count, tmp_start = start;
167 int nr = q->nr;
779e6e1c 168
50f769df
JG
169 if (!count)
170 return 0;
6486cda6 171 qperf_inc(q, sqbs);
779e6e1c
JG
172
173 if (!q->is_input_q)
174 nr += q->irq_ptr->nr_input_qs;
175again:
176 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
88bf319f
JW
177
178 switch (ccq) {
179 case 0:
180 case 32:
181 /* all done, or active buffer adapter-owned */
ce1d8014 182 WARN_ON_ONCE(tmp_count);
25f269f1 183 return count - tmp_count;
88bf319f
JW
184 case 96:
185 /* not all buffers processed */
22f99347 186 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
6486cda6 187 qperf_inc(q, sqbs_partial);
779e6e1c 188 goto again;
88bf319f
JW
189 default:
190 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
191 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
192 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
193 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
194 q->first_to_kick, count, q->irq_ptr->int_parm);
195 return 0;
779e6e1c 196 }
779e6e1c
JG
197}
198
0cf1e051
JW
199/*
200 * Returns number of examined buffers and their common state in *state.
201 * Requested number of buffers-to-examine must be > 0.
202 */
779e6e1c 203static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
50f769df 204 unsigned char *state, unsigned int count,
104ea556 205 int auto_ack, int merge_pending)
779e6e1c
JG
206{
207 unsigned char __state = 0;
a698e137 208 int i = 1;
779e6e1c 209
779e6e1c 210 if (is_qebsm(q))
50f769df 211 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
779e6e1c 212
0cf1e051
JW
213 /* get initial state: */
214 __state = q->slsb.val[bufnr];
a698e137
JW
215
216 /* Bail out early if there is no work on the queue: */
217 if (__state & SLSB_OWNER_CU)
218 goto out;
219
0cf1e051
JW
220 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
221 __state = SLSB_P_OUTPUT_EMPTY;
222
a698e137 223 for (; i < count; i++) {
779e6e1c 224 bufnr = next_buf(bufnr);
0cf1e051
JW
225
226 /* merge PENDING into EMPTY: */
227 if (merge_pending &&
228 q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
229 __state == SLSB_P_OUTPUT_EMPTY)
230 continue;
231
232 /* stop if next state differs from initial state: */
233 if (q->slsb.val[bufnr] != __state)
234 break;
779e6e1c 235 }
a698e137
JW
236
237out:
779e6e1c
JG
238 *state = __state;
239 return i;
240}
241
60b5df2f
JG
242static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
243 unsigned char *state, int auto_ack)
779e6e1c 244{
104ea556 245 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
779e6e1c
JG
246}
247
248/* wrap-around safe setting of slsb states, returns number of changed buffers */
249static inline int set_buf_states(struct qdio_q *q, int bufnr,
250 unsigned char state, int count)
251{
252 int i;
253
779e6e1c
JG
254 if (is_qebsm(q))
255 return qdio_do_sqbs(q, state, bufnr, count);
256
257 for (i = 0; i < count; i++) {
258 xchg(&q->slsb.val[bufnr], state);
259 bufnr = next_buf(bufnr);
260 }
261 return count;
262}
263
264static inline int set_buf_state(struct qdio_q *q, int bufnr,
265 unsigned char state)
266{
267 return set_buf_states(q, bufnr, state, 1);
268}
269
270/* set slsb states to initial state */
c4736d96 271static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
779e6e1c
JG
272{
273 struct qdio_q *q;
274 int i;
275
276 for_each_input_queue(irq_ptr, q, i)
277 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
278 QDIO_MAX_BUFFERS_PER_Q);
279 for_each_output_queue(irq_ptr, q, i)
280 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
281 QDIO_MAX_BUFFERS_PER_Q);
282}
283
60b5df2f 284static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
779e6e1c
JG
285 unsigned int input)
286{
958c0ba4
JG
287 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
288 unsigned int fc = QDIO_SIGA_SYNC;
779e6e1c
JG
289 int cc;
290
7a0b4cbc 291 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
6486cda6 292 qperf_inc(q, siga_sync);
779e6e1c 293
958c0ba4
JG
294 if (is_qebsm(q)) {
295 schid = q->irq_ptr->sch_token;
296 fc |= QDIO_SIGA_QEBSM_FLAG;
297 }
298
299 cc = do_siga_sync(schid, output, input, fc);
110da317 300 if (unlikely(cc))
22f99347 301 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
1549d13f 302 return (cc) ? -EIO : 0;
779e6e1c
JG
303}
304
60b5df2f 305static inline int qdio_siga_sync_q(struct qdio_q *q)
779e6e1c
JG
306{
307 if (q->is_input_q)
308 return qdio_siga_sync(q, 0, q->mask);
309 else
310 return qdio_siga_sync(q, q->mask, 0);
311}
312
104ea556 313static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
314 unsigned long aob)
779e6e1c 315{
958c0ba4
JG
316 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
317 unsigned int fc = QDIO_SIGA_WRITE;
7a0b4cbc 318 u64 start_time = 0;
be8d97a5 319 int retries = 0, cc;
104ea556 320 unsigned long laob = 0;
321
ec6674c6
EC
322 WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
323 !q->u.out.use_cq));
104ea556 324 if (q->u.out.use_cq && aob != 0) {
325 fc = QDIO_SIGA_WRITEQ;
326 laob = aob;
327 }
779e6e1c 328
7a0b4cbc 329 if (is_qebsm(q)) {
779e6e1c 330 schid = q->irq_ptr->sch_token;
958c0ba4 331 fc |= QDIO_SIGA_QEBSM_FLAG;
779e6e1c 332 }
779e6e1c 333again:
104ea556 334 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
7a0b4cbc
JG
335
336 /* hipersocket busy condition */
110da317 337 if (unlikely(*busy_bit)) {
be8d97a5 338 retries++;
58eb27cd 339
7a0b4cbc 340 if (!start_time) {
8c071b0f 341 start_time = get_tod_clock_fast();
7a0b4cbc
JG
342 goto again;
343 }
8c071b0f 344 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
779e6e1c
JG
345 goto again;
346 }
be8d97a5
JG
347 if (retries) {
348 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
349 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
350 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
351 }
779e6e1c
JG
352 return cc;
353}
354
355static inline int qdio_siga_input(struct qdio_q *q)
356{
958c0ba4
JG
357 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
358 unsigned int fc = QDIO_SIGA_READ;
779e6e1c
JG
359 int cc;
360
22f99347 361 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
6486cda6 362 qperf_inc(q, siga_read);
779e6e1c 363
958c0ba4
JG
364 if (is_qebsm(q)) {
365 schid = q->irq_ptr->sch_token;
366 fc |= QDIO_SIGA_QEBSM_FLAG;
367 }
368
369 cc = do_siga_input(schid, q->mask, fc);
110da317 370 if (unlikely(cc))
22f99347 371 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
1549d13f 372 return (cc) ? -EIO : 0;
779e6e1c
JG
373}
374
90adac58
JG
375#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
376#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
377
378static inline void qdio_sync_queues(struct qdio_q *q)
779e6e1c 379{
90adac58 380 /* PCI capable outbound queues will also be scanned so sync them too */
f85b2b29 381 if (pci_out_supported(q->irq_ptr))
90adac58
JG
382 qdio_siga_sync_all(q);
383 else
779e6e1c
JG
384 qdio_siga_sync_q(q);
385}
386
60b5df2f
JG
387int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
388 unsigned char *state)
389{
90adac58
JG
390 if (need_siga_sync(q))
391 qdio_siga_sync_q(q);
5a19d670 392 return get_buf_state(q, bufnr, state, 0);
60b5df2f
JG
393}
394
395static inline void qdio_stop_polling(struct qdio_q *q)
779e6e1c 396{
50f769df 397 if (!q->u.in.polling)
779e6e1c 398 return;
50f769df 399
779e6e1c 400 q->u.in.polling = 0;
6486cda6 401 qperf_inc(q, stop_polling);
779e6e1c
JG
402
403 /* show the card that we are not polling anymore */
50f769df 404 if (is_qebsm(q)) {
e85dea0e 405 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
50f769df
JG
406 q->u.in.ack_count);
407 q->u.in.ack_count = 0;
408 } else
e85dea0e 409 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
779e6e1c
JG
410}
411
92bdae5d 412static inline void account_sbals(struct qdio_q *q, unsigned int count)
d307297f 413{
92bdae5d 414 int pos;
d307297f
JG
415
416 q->q_stats.nr_sbal_total += count;
417 if (count == QDIO_MAX_BUFFERS_MASK) {
418 q->q_stats.nr_sbals[7]++;
419 return;
420 }
92bdae5d 421 pos = ilog2(count);
d307297f
JG
422 q->q_stats.nr_sbals[pos]++;
423}
424
5b2ad270
JW
425static void process_buffer_error(struct qdio_q *q, unsigned int start,
426 int count)
779e6e1c 427{
bffbbd2d
JG
428 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
429 SLSB_P_OUTPUT_NOT_INIT;
430
1549d13f 431 q->qdio_error = QDIO_ERROR_SLSB_STATE;
50f769df
JG
432
433 /* special handling for no target buffer empty */
b23481fb 434 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
5b2ad270 435 q->sbal[start]->element[15].sflags == 0x10) {
6486cda6 436 qperf_inc(q, target_full);
5b2ad270 437 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
2768b2de 438 goto set;
50f769df
JG
439 }
440
22f99347
JG
441 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
442 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
5b2ad270 443 DBF_ERROR("FTC:%3d C:%3d", start, count);
22f99347 444 DBF_ERROR("F14:%2x F15:%2x",
5b2ad270
JW
445 q->sbal[start]->element[14].sflags,
446 q->sbal[start]->element[15].sflags);
bffbbd2d 447
2768b2de 448set:
bffbbd2d
JG
449 /*
450 * Interrupts may be avoided as long as the error is present
451 * so change the buffer state immediately to avoid starvation.
452 */
5b2ad270 453 set_buf_states(q, start, state, count);
50f769df 454}
779e6e1c 455
5b2ad270
JW
456static inline void inbound_primed(struct qdio_q *q, unsigned int start,
457 int count)
50f769df
JG
458{
459 int new;
460
f83435c4 461 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
50f769df
JG
462
463 /* for QEBSM the ACK was already set by EQBS */
464 if (is_qebsm(q)) {
465 if (!q->u.in.polling) {
466 q->u.in.polling = 1;
467 q->u.in.ack_count = count;
5b2ad270 468 q->u.in.ack_start = start;
50f769df
JG
469 return;
470 }
471
472 /* delete the previous ACK's */
e85dea0e 473 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
50f769df
JG
474 q->u.in.ack_count);
475 q->u.in.ack_count = count;
5b2ad270 476 q->u.in.ack_start = start;
50f769df
JG
477 return;
478 }
479
480 /*
481 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
482 * or by the next inbound run.
483 */
5b2ad270 484 new = add_buf(start, count - 1);
50f769df
JG
485 if (q->u.in.polling) {
486 /* reset the previous ACK but first set the new one */
487 set_buf_state(q, new, SLSB_P_INPUT_ACK);
e85dea0e 488 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
3fdf1e18 489 } else {
50f769df 490 q->u.in.polling = 1;
3fdf1e18 491 set_buf_state(q, new, SLSB_P_INPUT_ACK);
50f769df
JG
492 }
493
e85dea0e 494 q->u.in.ack_start = new;
50f769df
JG
495 count--;
496 if (!count)
497 return;
6541f7b6 498 /* need to change ALL buffers to get more interrupts */
5b2ad270 499 set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
779e6e1c
JG
500}
501
6bcf74e2 502static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
779e6e1c 503{
6fa1098a 504 unsigned char state = 0;
152485bf 505 int count;
779e6e1c 506
8c071b0f 507 q->timestamp = get_tod_clock_fast();
a2b86019 508
779e6e1c
JG
509 /*
510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
511 * would return 0.
512 */
513 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
152485bf 514 if (!count)
b39544c6 515 return 0;
779e6e1c 516
36e3e721
JG
517 /*
518 * No siga sync here, as a PCI or we after a thin interrupt
519 * already sync'ed the queues.
520 */
5b2ad270 521 count = get_buf_states(q, start, &state, count, 1, 0);
779e6e1c 522 if (!count)
b39544c6 523 return 0;
779e6e1c
JG
524
525 switch (state) {
526 case SLSB_P_INPUT_PRIMED:
5b2ad270 527 inbound_primed(q, start, count);
eddf0d5b 528 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
6486cda6 529 qperf_inc(q, inbound_queue_full);
d307297f
JG
530 if (q->irq_ptr->perf_stat_enabled)
531 account_sbals(q, count);
b39544c6 532 return count;
779e6e1c 533 case SLSB_P_INPUT_ERROR:
5b2ad270 534 process_buffer_error(q, start, count);
0b926ac3
JW
535 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
536 qperf_inc(q, inbound_queue_full);
d307297f
JG
537 if (q->irq_ptr->perf_stat_enabled)
538 account_sbals_error(q, count);
b39544c6 539 return count;
779e6e1c
JG
540 case SLSB_CU_INPUT_EMPTY:
541 case SLSB_P_INPUT_NOT_INIT:
542 case SLSB_P_INPUT_ACK:
d307297f
JG
543 if (q->irq_ptr->perf_stat_enabled)
544 q->q_stats.nr_sbal_nop++;
f83435c4 545 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
5b2ad270 546 q->nr, start);
b39544c6 547 return 0;
779e6e1c 548 default:
ce1d8014 549 WARN_ON_ONCE(1);
b39544c6 550 return 0;
779e6e1c 551 }
779e6e1c
JG
552}
553
6bcf74e2 554static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
779e6e1c 555{
b39544c6 556 int count;
779e6e1c 557
6bcf74e2 558 count = get_inbound_buffer_frontier(q, start);
779e6e1c 559
dccbbaff
JW
560 if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
561 q->u.in.timestamp = get_tod_clock();
b39544c6
JW
562
563 return count;
779e6e1c
JG
564}
565
6bcf74e2 566static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
779e6e1c 567{
9a1ce28a 568 unsigned char state = 0;
779e6e1c
JG
569
570 if (!atomic_read(&q->nr_buf_used))
571 return 1;
572
90adac58
JG
573 if (need_siga_sync(q))
574 qdio_siga_sync_q(q);
5b2ad270 575 get_buf_state(q, start, &state, 0);
9a2c160a 576
4c52228d 577 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
9a2c160a 578 /* more work coming */
779e6e1c
JG
579 return 0;
580
9a2c160a
JG
581 if (is_thinint_irq(q->irq_ptr))
582 return 1;
583
584 /* don't poll under z/VM */
585 if (MACHINE_IS_VM)
779e6e1c
JG
586 return 1;
587
588 /*
589 * At this point we know, that inbound first_to_check
590 * has (probably) not moved (see qdio_inbound_processing).
591 */
8c071b0f 592 if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
5b2ad270 593 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
779e6e1c 594 return 1;
9a2c160a 595 } else
60b5df2f 596 return 0;
60b5df2f
JG
597}
598
104ea556 599static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
600{
601 unsigned char state = 0;
602 int j, b = start;
603
104ea556 604 for (j = 0; j < count; ++j) {
605 get_buf_state(q, b, &state, 0);
606 if (state == SLSB_P_OUTPUT_PENDING) {
607 struct qaob *aob = q->u.out.aobs[b];
608 if (aob == NULL)
609 continue;
610
104ea556 611 q->u.out.sbal_state[b].flags |=
612 QDIO_OUTBUF_STATE_FLAG_PENDING;
613 q->u.out.aobs[b] = NULL;
104ea556 614 }
615 b = next_buf(b);
616 }
617}
618
619static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
620 int bufnr)
621{
622 unsigned long phys_aob = 0;
623
624 if (!q->use_cq)
64e03ff7 625 return 0;
104ea556 626
627 if (!q->aobs[bufnr]) {
628 struct qaob *aob = qdio_allocate_aob();
629 q->aobs[bufnr] = aob;
630 }
631 if (q->aobs[bufnr]) {
104ea556 632 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
633 phys_aob = virt_to_phys(q->aobs[bufnr]);
ce1d8014 634 WARN_ON_ONCE(phys_aob & 0xFF);
104ea556 635 }
636
64e03ff7 637 q->sbal_state[bufnr].flags = 0;
104ea556 638 return phys_aob;
639}
640
65e4f776 641static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
779e6e1c 642{
9c8a08d7 643 int start = q->first_to_kick;
779e6e1c
JG
644
645 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
646 return;
647
9c8a08d7 648 if (q->is_input_q) {
6486cda6 649 qperf_inc(q, inbound_handler);
1d7e1500 650 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
bd6e8a16 651 } else {
6486cda6 652 qperf_inc(q, outbound_handler);
1d7e1500
JG
653 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
654 start, count);
ccc413f6
JW
655 if (q->u.out.use_cq)
656 qdio_handle_aobs(q, start, count);
bd6e8a16 657 }
9c8a08d7
JG
658
659 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
660 q->irq_ptr->int_parm);
779e6e1c
JG
661
662 /* for the next time */
65e4f776 663 q->first_to_kick = add_buf(start, count);
779e6e1c
JG
664 q->qdio_error = 0;
665}
666
9bce8b2c
UB
667static inline int qdio_tasklet_schedule(struct qdio_q *q)
668{
669 if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
670 tasklet_schedule(&q->tasklet);
671 return 0;
672 }
673 return -EPERM;
674}
675
779e6e1c
JG
676static void __qdio_inbound_processing(struct qdio_q *q)
677{
6bcf74e2 678 unsigned int start = q->first_to_check;
b39544c6
JW
679 int count;
680
6486cda6 681 qperf_inc(q, tasklet_inbound);
f3eb20fa 682
6bcf74e2 683 count = qdio_inbound_q_moved(q, start);
b39544c6 684 if (count == 0)
779e6e1c
JG
685 return;
686
6bcf74e2
JW
687 start = add_buf(start, count);
688 q->first_to_check = start;
65e4f776 689 qdio_kick_handler(q, count);
779e6e1c 690
6bcf74e2 691 if (!qdio_inbound_q_done(q, start)) {
779e6e1c 692 /* means poll time is not yet over */
6486cda6 693 qperf_inc(q, tasklet_inbound_resched);
9bce8b2c 694 if (!qdio_tasklet_schedule(q))
f3eb20fa 695 return;
6486cda6 696 }
779e6e1c
JG
697
698 qdio_stop_polling(q);
699 /*
700 * We need to check again to not lose initiative after
701 * resetting the ACK state.
702 */
6bcf74e2 703 if (!qdio_inbound_q_done(q, start)) {
6486cda6 704 qperf_inc(q, tasklet_inbound_resched2);
9bce8b2c 705 qdio_tasklet_schedule(q);
6486cda6 706 }
779e6e1c
JG
707}
708
779e6e1c
JG
709void qdio_inbound_processing(unsigned long data)
710{
711 struct qdio_q *q = (struct qdio_q *)data;
712 __qdio_inbound_processing(q);
713}
714
6bcf74e2 715static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
779e6e1c 716{
6fa1098a 717 unsigned char state = 0;
152485bf 718 int count;
779e6e1c 719
8c071b0f 720 q->timestamp = get_tod_clock_fast();
a2b86019 721
90adac58
JG
722 if (need_siga_sync(q))
723 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
f85b2b29 724 !pci_out_supported(q->irq_ptr)) ||
90adac58
JG
725 (queue_type(q) == QDIO_IQDIO_QFMT &&
726 multicast_outbound(q)))
727 qdio_siga_sync_q(q);
779e6e1c 728
3060781f 729 count = atomic_read(&q->nr_buf_used);
152485bf 730 if (!count)
b39544c6 731 return 0;
779e6e1c 732
5b2ad270 733 count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
779e6e1c 734 if (!count)
b39544c6 735 return 0;
779e6e1c
JG
736
737 switch (state) {
738 case SLSB_P_OUTPUT_EMPTY:
739 /* the adapter got it */
104ea556 740 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
741 "out empty:%1d %02x", q->nr, count);
779e6e1c
JG
742
743 atomic_sub(count, &q->nr_buf_used);
d307297f
JG
744 if (q->irq_ptr->perf_stat_enabled)
745 account_sbals(q, count);
b39544c6 746 return count;
779e6e1c 747 case SLSB_P_OUTPUT_ERROR:
5b2ad270 748 process_buffer_error(q, start, count);
779e6e1c 749 atomic_sub(count, &q->nr_buf_used);
d307297f
JG
750 if (q->irq_ptr->perf_stat_enabled)
751 account_sbals_error(q, count);
b39544c6 752 return count;
779e6e1c
JG
753 case SLSB_CU_OUTPUT_PRIMED:
754 /* the adapter has not fetched the output yet */
d307297f
JG
755 if (q->irq_ptr->perf_stat_enabled)
756 q->q_stats.nr_sbal_nop++;
104ea556 757 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
758 q->nr);
b39544c6 759 return 0;
779e6e1c
JG
760 case SLSB_P_OUTPUT_NOT_INIT:
761 case SLSB_P_OUTPUT_HALTED:
b39544c6 762 return 0;
779e6e1c 763 default:
ce1d8014 764 WARN_ON_ONCE(1);
b39544c6 765 return 0;
779e6e1c 766 }
779e6e1c
JG
767}
768
769/* all buffers processed? */
770static inline int qdio_outbound_q_done(struct qdio_q *q)
771{
772 return atomic_read(&q->nr_buf_used) == 0;
773}
774
6bcf74e2 775static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
779e6e1c 776{
b39544c6 777 int count;
779e6e1c 778
6bcf74e2 779 count = get_outbound_buffer_frontier(q, start);
779e6e1c 780
dccbbaff 781 if (count)
22f99347 782 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
b39544c6
JW
783
784 return count;
779e6e1c
JG
785}
786
104ea556 787static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
779e6e1c 788{
be8d97a5 789 int retries = 0, cc;
7a0b4cbc 790 unsigned int busy_bit;
779e6e1c
JG
791
792 if (!need_siga_out(q))
d303b6fd 793 return 0;
779e6e1c 794
7a0b4cbc 795 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
be8d97a5 796retry:
6486cda6 797 qperf_inc(q, siga_write);
7a0b4cbc 798
104ea556 799 cc = qdio_siga_output(q, &busy_bit, aob);
7a0b4cbc 800 switch (cc) {
779e6e1c 801 case 0:
779e6e1c 802 break;
7a0b4cbc
JG
803 case 2:
804 if (busy_bit) {
be8d97a5
JG
805 while (++retries < QDIO_BUSY_BIT_RETRIES) {
806 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
807 goto retry;
808 }
809 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
1549d13f
JG
810 cc = -EBUSY;
811 } else {
d303b6fd 812 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
1549d13f
JG
813 cc = -ENOBUFS;
814 }
7a0b4cbc
JG
815 break;
816 case 1:
817 case 3:
818 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
1549d13f 819 cc = -EIO;
7a0b4cbc 820 break;
779e6e1c 821 }
be8d97a5
JG
822 if (retries) {
823 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
824 DBF_ERROR("count:%u", retries);
825 }
d303b6fd 826 return cc;
779e6e1c
JG
827}
828
779e6e1c
JG
829static void __qdio_outbound_processing(struct qdio_q *q)
830{
6bcf74e2 831 unsigned int start = q->first_to_check;
b39544c6
JW
832 int count;
833
6486cda6 834 qperf_inc(q, tasklet_outbound);
ce1d8014 835 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
779e6e1c 836
6bcf74e2
JW
837 count = qdio_outbound_q_moved(q, start);
838 if (count) {
839 q->first_to_check = add_buf(start, count);
65e4f776 840 qdio_kick_handler(q, count);
6bcf74e2 841 }
779e6e1c 842
f85b2b29
JW
843 if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
844 !qdio_outbound_q_done(q))
845 goto sched;
779e6e1c 846
779e6e1c
JG
847 if (q->u.out.pci_out_enabled)
848 return;
849
850 /*
851 * Now we know that queue type is either qeth without pci enabled
25f269f1
JG
852 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
853 * is noticed and outbound_handler is called after some time.
779e6e1c
JG
854 */
855 if (qdio_outbound_q_done(q))
9bce8b2c 856 del_timer_sync(&q->u.out.timer);
6486cda6 857 else
9bce8b2c
UB
858 if (!timer_pending(&q->u.out.timer) &&
859 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
779e6e1c 860 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
c38f9608
JG
861 return;
862
863sched:
9bce8b2c 864 qdio_tasklet_schedule(q);
779e6e1c
JG
865}
866
867/* outbound tasklet */
868void qdio_outbound_processing(unsigned long data)
869{
870 struct qdio_q *q = (struct qdio_q *)data;
871 __qdio_outbound_processing(q);
872}
873
cb9f780a 874void qdio_outbound_timer(struct timer_list *t)
779e6e1c 875{
cb9f780a 876 struct qdio_q *q = from_timer(q, t, u.out.timer);
c38f9608 877
9bce8b2c 878 qdio_tasklet_schedule(q);
779e6e1c
JG
879}
880
2f2f3839 881static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
779e6e1c
JG
882{
883 struct qdio_q *out;
884 int i;
885
f85b2b29 886 if (!pci_out_supported(irq))
779e6e1c
JG
887 return;
888
f85b2b29 889 for_each_output_queue(irq, out, i)
779e6e1c 890 if (!qdio_outbound_q_done(out))
9bce8b2c 891 qdio_tasklet_schedule(out);
779e6e1c
JG
892}
893
60b5df2f
JG
894static void __tiqdio_inbound_processing(struct qdio_q *q)
895{
6bcf74e2 896 unsigned int start = q->first_to_check;
b39544c6
JW
897 int count;
898
6486cda6 899 qperf_inc(q, tasklet_inbound);
90adac58
JG
900 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
901 qdio_sync_queues(q);
60b5df2f 902
2f2f3839
JW
903 /* The interrupt could be caused by a PCI request: */
904 qdio_check_outbound_pci_queues(q->irq_ptr);
60b5df2f 905
6bcf74e2 906 count = qdio_inbound_q_moved(q, start);
b39544c6 907 if (count == 0)
60b5df2f
JG
908 return;
909
6bcf74e2
JW
910 start = add_buf(start, count);
911 q->first_to_check = start;
65e4f776 912 qdio_kick_handler(q, count);
60b5df2f 913
6bcf74e2 914 if (!qdio_inbound_q_done(q, start)) {
6486cda6 915 qperf_inc(q, tasklet_inbound_resched);
9bce8b2c 916 if (!qdio_tasklet_schedule(q))
e2910bcf 917 return;
60b5df2f
JG
918 }
919
920 qdio_stop_polling(q);
921 /*
922 * We need to check again to not lose initiative after
923 * resetting the ACK state.
924 */
6bcf74e2 925 if (!qdio_inbound_q_done(q, start)) {
6486cda6 926 qperf_inc(q, tasklet_inbound_resched2);
9bce8b2c 927 qdio_tasklet_schedule(q);
60b5df2f
JG
928 }
929}
930
931void tiqdio_inbound_processing(unsigned long data)
932{
933 struct qdio_q *q = (struct qdio_q *)data;
934 __tiqdio_inbound_processing(q);
935}
936
779e6e1c
JG
937static inline void qdio_set_state(struct qdio_irq *irq_ptr,
938 enum qdio_irq_states state)
939{
22f99347 940 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
779e6e1c
JG
941
942 irq_ptr->state = state;
943 mb();
944}
945
22f99347 946static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
779e6e1c 947{
779e6e1c 948 if (irb->esw.esw0.erw.cons) {
22f99347
JG
949 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
950 DBF_ERROR_HEX(irb, 64);
951 DBF_ERROR_HEX(irb->ecw, 64);
779e6e1c
JG
952 }
953}
954
955/* PCI interrupt handler */
956static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
957{
958 int i;
959 struct qdio_q *q;
960
9bce8b2c 961 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
c38f9608
JG
962 return;
963
d36deae7
JG
964 for_each_input_queue(irq_ptr, q, i) {
965 if (q->u.in.queue_start_poll) {
966 /* skip if polling is enabled or already in work */
967 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
968 &q->u.in.queue_irq_state)) {
969 qperf_inc(q, int_discarded);
970 continue;
971 }
972 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
973 q->irq_ptr->int_parm);
104ea556 974 } else {
d36deae7 975 tasklet_schedule(&q->tasklet);
104ea556 976 }
d36deae7 977 }
779e6e1c 978
f85b2b29 979 if (!pci_out_supported(irq_ptr))
779e6e1c
JG
980 return;
981
982 for_each_output_queue(irq_ptr, q, i) {
983 if (qdio_outbound_q_done(q))
984 continue;
90adac58 985 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
779e6e1c 986 qdio_siga_sync_q(q);
9bce8b2c 987 qdio_tasklet_schedule(q);
779e6e1c
JG
988 }
989}
990
991static void qdio_handle_activate_check(struct ccw_device *cdev,
992 unsigned long intparm, int cstat, int dstat)
993{
994 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
995 struct qdio_q *q;
dfe5bb50 996 int count;
779e6e1c 997
22f99347
JG
998 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
999 DBF_ERROR("intp :%lx", intparm);
1000 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
779e6e1c
JG
1001
1002 if (irq_ptr->nr_input_qs) {
1003 q = irq_ptr->input_qs[0];
1004 } else if (irq_ptr->nr_output_qs) {
1005 q = irq_ptr->output_qs[0];
1006 } else {
1007 dump_stack();
1008 goto no_handler;
1009 }
dfe5bb50
SS
1010
1011 count = sub_buf(q->first_to_check, q->first_to_kick);
1549d13f 1012 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
dfe5bb50 1013 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
779e6e1c
JG
1014no_handler:
1015 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
3ab121ab
MH
1016 /*
1017 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1018 * Therefore we call the LGR detection function here.
1019 */
1020 lgr_info_log();
779e6e1c
JG
1021}
1022
4c575423
JG
1023static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1024 int dstat)
779e6e1c
JG
1025{
1026 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1027
4c575423 1028 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
779e6e1c 1029
4c575423 1030 if (cstat)
779e6e1c 1031 goto error;
4c575423 1032 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
779e6e1c 1033 goto error;
4c575423
JG
1034 if (!(dstat & DEV_STAT_DEV_END))
1035 goto error;
1036 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1037 return;
1038
779e6e1c 1039error:
22f99347
JG
1040 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1041 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
779e6e1c 1042 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
779e6e1c
JG
1043}
1044
1045/* qdio interrupt handler */
1046void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1047 struct irb *irb)
1048{
1049 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
9080c924 1050 struct subchannel_id schid;
779e6e1c 1051 int cstat, dstat;
779e6e1c 1052
779e6e1c 1053 if (!intparm || !irq_ptr) {
9080c924
SO
1054 ccw_device_get_schid(cdev, &schid);
1055 DBF_ERROR("qint:%4x", schid.sch_no);
779e6e1c
JG
1056 return;
1057 }
1058
09a308f3
JG
1059 if (irq_ptr->perf_stat_enabled)
1060 irq_ptr->perf_stat.qdio_int++;
1061
779e6e1c 1062 if (IS_ERR(irb)) {
ce1d8014
JG
1063 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1064 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1065 wake_up(&cdev->private->wait_q);
1066 return;
779e6e1c 1067 }
22f99347 1068 qdio_irq_check_sense(irq_ptr, irb);
779e6e1c
JG
1069 cstat = irb->scsw.cmd.cstat;
1070 dstat = irb->scsw.cmd.dstat;
1071
1072 switch (irq_ptr->state) {
1073 case QDIO_IRQ_STATE_INACTIVE:
1074 qdio_establish_handle_irq(cdev, cstat, dstat);
1075 break;
779e6e1c
JG
1076 case QDIO_IRQ_STATE_CLEANUP:
1077 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1078 break;
779e6e1c
JG
1079 case QDIO_IRQ_STATE_ESTABLISHED:
1080 case QDIO_IRQ_STATE_ACTIVE:
1081 if (cstat & SCHN_STAT_PCI) {
1082 qdio_int_handler_pci(irq_ptr);
779e6e1c
JG
1083 return;
1084 }
4c575423 1085 if (cstat || dstat)
779e6e1c
JG
1086 qdio_handle_activate_check(cdev, intparm, cstat,
1087 dstat);
4c575423 1088 break;
959153d3
JG
1089 case QDIO_IRQ_STATE_STOPPED:
1090 break;
779e6e1c 1091 default:
ce1d8014 1092 WARN_ON_ONCE(1);
779e6e1c
JG
1093 }
1094 wake_up(&cdev->private->wait_q);
1095}
1096
1097/**
1098 * qdio_get_ssqd_desc - get qdio subchannel description
1099 * @cdev: ccw device to get description for
bbd50e17 1100 * @data: where to store the ssqd
779e6e1c 1101 *
bbd50e17
JG
1102 * Returns 0 or an error code. The results of the chsc are stored in the
1103 * specified structure.
779e6e1c 1104 */
bbd50e17
JG
1105int qdio_get_ssqd_desc(struct ccw_device *cdev,
1106 struct qdio_ssqd_desc *data)
779e6e1c 1107{
9080c924 1108 struct subchannel_id schid;
779e6e1c 1109
bbd50e17
JG
1110 if (!cdev || !cdev->private)
1111 return -EINVAL;
1112
9080c924
SO
1113 ccw_device_get_schid(cdev, &schid);
1114 DBF_EVENT("get ssqd:%4x", schid.sch_no);
1115 return qdio_setup_get_ssqd(NULL, &schid, data);
779e6e1c
JG
1116}
1117EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1118
779e6e1c
JG
1119static void qdio_shutdown_queues(struct ccw_device *cdev)
1120{
1121 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1122 struct qdio_q *q;
1123 int i;
1124
1125 for_each_input_queue(irq_ptr, q, i)
c38f9608 1126 tasklet_kill(&q->tasklet);
779e6e1c
JG
1127
1128 for_each_output_queue(irq_ptr, q, i) {
9bce8b2c 1129 del_timer_sync(&q->u.out.timer);
c38f9608 1130 tasklet_kill(&q->tasklet);
779e6e1c
JG
1131 }
1132}
1133
1134/**
1135 * qdio_shutdown - shut down a qdio subchannel
1136 * @cdev: associated ccw device
1137 * @how: use halt or clear to shutdown
1138 */
1139int qdio_shutdown(struct ccw_device *cdev, int how)
1140{
22f99347 1141 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
9080c924 1142 struct subchannel_id schid;
779e6e1c 1143 int rc;
779e6e1c 1144
779e6e1c
JG
1145 if (!irq_ptr)
1146 return -ENODEV;
1147
ce1d8014 1148 WARN_ON_ONCE(irqs_disabled());
9080c924
SO
1149 ccw_device_get_schid(cdev, &schid);
1150 DBF_EVENT("qshutdown:%4x", schid.sch_no);
22f99347 1151
779e6e1c
JG
1152 mutex_lock(&irq_ptr->setup_mutex);
1153 /*
1154 * Subchannel was already shot down. We cannot prevent being called
1155 * twice since cio may trigger a shutdown asynchronously.
1156 */
1157 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1158 mutex_unlock(&irq_ptr->setup_mutex);
1159 return 0;
1160 }
1161
c38f9608
JG
1162 /*
1163 * Indicate that the device is going down. Scheduling the queue
1164 * tasklets is forbidden from here on.
1165 */
1166 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1167
779e6e1c
JG
1168 tiqdio_remove_input_queues(irq_ptr);
1169 qdio_shutdown_queues(cdev);
aa2383f8 1170 qdio_shutdown_debug_entries(irq_ptr);
779e6e1c
JG
1171
1172 /* cleanup subchannel */
a48ed867 1173 spin_lock_irq(get_ccwdev_lock(cdev));
779e6e1c
JG
1174
1175 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1176 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1177 else
1178 /* default behaviour is halt */
1179 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1180 if (rc) {
22f99347
JG
1181 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1182 DBF_ERROR("rc:%4d", rc);
779e6e1c
JG
1183 goto no_cleanup;
1184 }
1185
1186 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
a48ed867 1187 spin_unlock_irq(get_ccwdev_lock(cdev));
779e6e1c
JG
1188 wait_event_interruptible_timeout(cdev->private->wait_q,
1189 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1190 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1191 10 * HZ);
a48ed867 1192 spin_lock_irq(get_ccwdev_lock(cdev));
779e6e1c
JG
1193
1194no_cleanup:
1195 qdio_shutdown_thinint(irq_ptr);
1196
1197 /* restore interrupt handler */
89286320 1198 if ((void *)cdev->handler == (void *)qdio_int_handler) {
779e6e1c 1199 cdev->handler = irq_ptr->orig_handler;
89286320
JW
1200 cdev->private->intparm = 0;
1201 }
a48ed867 1202 spin_unlock_irq(get_ccwdev_lock(cdev));
779e6e1c
JG
1203
1204 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1205 mutex_unlock(&irq_ptr->setup_mutex);
779e6e1c
JG
1206 if (rc)
1207 return rc;
1208 return 0;
1209}
1210EXPORT_SYMBOL_GPL(qdio_shutdown);
1211
1212/**
1213 * qdio_free - free data structures for a qdio subchannel
1214 * @cdev: associated ccw device
1215 */
1216int qdio_free(struct ccw_device *cdev)
1217{
22f99347 1218 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
9080c924 1219 struct subchannel_id schid;
58eb27cd 1220
779e6e1c
JG
1221 if (!irq_ptr)
1222 return -ENODEV;
1223
9080c924
SO
1224 ccw_device_get_schid(cdev, &schid);
1225 DBF_EVENT("qfree:%4x", schid.sch_no);
613c4e04 1226 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
779e6e1c 1227 mutex_lock(&irq_ptr->setup_mutex);
22f99347 1228
613c4e04 1229 irq_ptr->debug_area = NULL;
779e6e1c
JG
1230 cdev->private->qdio_data = NULL;
1231 mutex_unlock(&irq_ptr->setup_mutex);
1232
1233 qdio_release_memory(irq_ptr);
1234 return 0;
1235}
1236EXPORT_SYMBOL_GPL(qdio_free);
1237
779e6e1c
JG
1238/**
1239 * qdio_allocate - allocate qdio queues and associated data
1240 * @init_data: initialization data
1241 */
1242int qdio_allocate(struct qdio_initialize *init_data)
1243{
9080c924 1244 struct subchannel_id schid;
779e6e1c 1245 struct qdio_irq *irq_ptr;
779e6e1c 1246
9080c924
SO
1247 ccw_device_get_schid(init_data->cdev, &schid);
1248 DBF_EVENT("qallocate:%4x", schid.sch_no);
779e6e1c
JG
1249
1250 if ((init_data->no_input_qs && !init_data->input_handler) ||
1251 (init_data->no_output_qs && !init_data->output_handler))
1252 return -EINVAL;
1253
1254 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1255 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1256 return -EINVAL;
1257
1258 if ((!init_data->input_sbal_addr_array) ||
1259 (!init_data->output_sbal_addr_array))
1260 return -EINVAL;
1261
779e6e1c
JG
1262 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1263 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1264 if (!irq_ptr)
1265 goto out_err;
779e6e1c
JG
1266
1267 mutex_init(&irq_ptr->setup_mutex);
613c4e04
SR
1268 if (qdio_allocate_dbf(init_data, irq_ptr))
1269 goto out_rel;
779e6e1c
JG
1270
1271 /*
1272 * Allocate a page for the chsc calls in qdio_establish.
1273 * Must be pre-allocated since a zfcp recovery will call
1274 * qdio_establish. In case of low memory and swap on a zfcp disk
1275 * we may not be able to allocate memory otherwise.
1276 */
1277 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1278 if (!irq_ptr->chsc_page)
1279 goto out_rel;
1280
1281 /* qdr is used in ccw1.cda which is u32 */
3b8e3004 1282 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
779e6e1c
JG
1283 if (!irq_ptr->qdr)
1284 goto out_rel;
779e6e1c 1285
779e6e1c
JG
1286 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1287 init_data->no_output_qs))
1288 goto out_rel;
1289
1290 init_data->cdev->private->qdio_data = irq_ptr;
1291 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1292 return 0;
1293out_rel:
1294 qdio_release_memory(irq_ptr);
1295out_err:
1296 return -ENOMEM;
1297}
1298EXPORT_SYMBOL_GPL(qdio_allocate);
1299
104ea556 1300static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1301{
1302 struct qdio_q *q = irq_ptr->input_qs[0];
1303 int i, use_cq = 0;
1304
1305 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1306 use_cq = 1;
1307
1308 for_each_output_queue(irq_ptr, q, i) {
1309 if (use_cq) {
1310 if (qdio_enable_async_operation(&q->u.out) < 0) {
1311 use_cq = 0;
1312 continue;
1313 }
1314 } else
1315 qdio_disable_async_operation(&q->u.out);
1316 }
1317 DBF_EVENT("use_cq:%d", use_cq);
1318}
1319
779e6e1c
JG
1320/**
1321 * qdio_establish - establish queues on a qdio subchannel
1322 * @init_data: initialization data
1323 */
1324int qdio_establish(struct qdio_initialize *init_data)
1325{
779e6e1c 1326 struct ccw_device *cdev = init_data->cdev;
9080c924
SO
1327 struct subchannel_id schid;
1328 struct qdio_irq *irq_ptr;
779e6e1c
JG
1329 int rc;
1330
9080c924
SO
1331 ccw_device_get_schid(cdev, &schid);
1332 DBF_EVENT("qestablish:%4x", schid.sch_no);
58eb27cd 1333
779e6e1c
JG
1334 irq_ptr = cdev->private->qdio_data;
1335 if (!irq_ptr)
1336 return -ENODEV;
1337
779e6e1c
JG
1338 mutex_lock(&irq_ptr->setup_mutex);
1339 qdio_setup_irq(init_data);
1340
1341 rc = qdio_establish_thinint(irq_ptr);
1342 if (rc) {
1343 mutex_unlock(&irq_ptr->setup_mutex);
1344 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1345 return rc;
1346 }
1347
1348 /* establish q */
1349 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1350 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1351 irq_ptr->ccw.count = irq_ptr->equeue.count;
1352 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1353
a48ed867 1354 spin_lock_irq(get_ccwdev_lock(cdev));
779e6e1c
JG
1355 ccw_device_set_options_mask(cdev, 0);
1356
1357 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
ddebf661 1358 spin_unlock_irq(get_ccwdev_lock(cdev));
779e6e1c 1359 if (rc) {
22f99347
JG
1360 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1361 DBF_ERROR("rc:%4x", rc);
779e6e1c
JG
1362 mutex_unlock(&irq_ptr->setup_mutex);
1363 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1364 return rc;
1365 }
1366
1367 wait_event_interruptible_timeout(cdev->private->wait_q,
1368 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1369 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1370
1371 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1372 mutex_unlock(&irq_ptr->setup_mutex);
1373 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1374 return -EIO;
1375 }
1376
1377 qdio_setup_ssqd_info(irq_ptr);
779e6e1c 1378
104ea556 1379 qdio_detect_hsicq(irq_ptr);
1380
779e6e1c
JG
1381 /* qebsm is now setup if available, initialize buffer states */
1382 qdio_init_buf_states(irq_ptr);
1383
1384 mutex_unlock(&irq_ptr->setup_mutex);
1385 qdio_print_subchannel_info(irq_ptr, cdev);
1386 qdio_setup_debug_entries(irq_ptr, cdev);
1387 return 0;
1388}
1389EXPORT_SYMBOL_GPL(qdio_establish);
1390
1391/**
1392 * qdio_activate - activate queues on a qdio subchannel
1393 * @cdev: associated cdev
1394 */
1395int qdio_activate(struct ccw_device *cdev)
1396{
9080c924 1397 struct subchannel_id schid;
779e6e1c
JG
1398 struct qdio_irq *irq_ptr;
1399 int rc;
779e6e1c 1400
9080c924
SO
1401 ccw_device_get_schid(cdev, &schid);
1402 DBF_EVENT("qactivate:%4x", schid.sch_no);
58eb27cd 1403
779e6e1c
JG
1404 irq_ptr = cdev->private->qdio_data;
1405 if (!irq_ptr)
1406 return -ENODEV;
1407
779e6e1c
JG
1408 mutex_lock(&irq_ptr->setup_mutex);
1409 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1410 rc = -EBUSY;
1411 goto out;
1412 }
1413
779e6e1c
JG
1414 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1415 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1416 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1417 irq_ptr->ccw.cda = 0;
1418
a48ed867 1419 spin_lock_irq(get_ccwdev_lock(cdev));
779e6e1c
JG
1420 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1421
1422 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1423 0, DOIO_DENY_PREFETCH);
ddebf661 1424 spin_unlock_irq(get_ccwdev_lock(cdev));
779e6e1c 1425 if (rc) {
22f99347
JG
1426 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1427 DBF_ERROR("rc:%4x", rc);
779e6e1c 1428 goto out;
ddebf661 1429 }
779e6e1c
JG
1430
1431 if (is_thinint_irq(irq_ptr))
1432 tiqdio_add_input_queues(irq_ptr);
1433
1434 /* wait for subchannel to become active */
1435 msleep(5);
1436
1437 switch (irq_ptr->state) {
1438 case QDIO_IRQ_STATE_STOPPED:
1439 case QDIO_IRQ_STATE_ERR:
e4c14e20
JG
1440 rc = -EIO;
1441 break;
779e6e1c
JG
1442 default:
1443 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1444 rc = 0;
1445 }
1446out:
1447 mutex_unlock(&irq_ptr->setup_mutex);
1448 return rc;
1449}
1450EXPORT_SYMBOL_GPL(qdio_activate);
1451
1452static inline int buf_in_between(int bufnr, int start, int count)
1453{
1454 int end = add_buf(start, count);
1455
1456 if (end > start) {
1457 if (bufnr >= start && bufnr < end)
1458 return 1;
1459 else
1460 return 0;
1461 }
1462
1463 /* wrap-around case */
1464 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1465 (bufnr < end))
1466 return 1;
1467 else
1468 return 0;
1469}
1470
1471/**
1472 * handle_inbound - reset processed input buffers
1473 * @q: queue containing the buffers
1474 * @callflags: flags
1475 * @bufnr: first buffer to process
1476 * @count: how many buffers are emptied
1477 */
d303b6fd
JG
1478static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1479 int bufnr, int count)
779e6e1c 1480{
dae7fd42 1481 int diff;
779e6e1c 1482
6486cda6
JG
1483 qperf_inc(q, inbound_call);
1484
50f769df
JG
1485 if (!q->u.in.polling)
1486 goto set;
1487
1488 /* protect against stop polling setting an ACK for an emptied slsb */
1489 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1490 /* overwriting everything, just delete polling status */
1491 q->u.in.polling = 0;
1492 q->u.in.ack_count = 0;
1493 goto set;
e85dea0e 1494 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
50f769df 1495 if (is_qebsm(q)) {
e85dea0e 1496 /* partial overwrite, just update ack_start */
50f769df 1497 diff = add_buf(bufnr, count);
e85dea0e 1498 diff = sub_buf(diff, q->u.in.ack_start);
50f769df
JG
1499 q->u.in.ack_count -= diff;
1500 if (q->u.in.ack_count <= 0) {
1501 q->u.in.polling = 0;
1502 q->u.in.ack_count = 0;
50f769df
JG
1503 goto set;
1504 }
e85dea0e 1505 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
50f769df
JG
1506 }
1507 else
1508 /* the only ACK will be deleted, so stop polling */
779e6e1c 1509 q->u.in.polling = 0;
50f769df 1510 }
779e6e1c 1511
50f769df 1512set:
779e6e1c 1513 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
dae7fd42 1514 atomic_add(count, &q->nr_buf_used);
779e6e1c 1515
d303b6fd
JG
1516 if (need_siga_in(q))
1517 return qdio_siga_input(q);
9cb7284f 1518
d303b6fd 1519 return 0;
779e6e1c
JG
1520}
1521
1522/**
1523 * handle_outbound - process filled outbound buffers
1524 * @q: queue containing the buffers
1525 * @callflags: flags
1526 * @bufnr: first buffer to process
1527 * @count: how many buffers are filled
1528 */
d303b6fd
JG
1529static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1530 int bufnr, int count)
779e6e1c 1531{
c26001d4 1532 unsigned char state = 0;
d303b6fd 1533 int used, rc = 0;
779e6e1c 1534
6486cda6 1535 qperf_inc(q, outbound_call);
779e6e1c
JG
1536
1537 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1538 used = atomic_add_return(count, &q->nr_buf_used);
779e6e1c 1539
0195843b
JG
1540 if (used == QDIO_MAX_BUFFERS_PER_Q)
1541 qperf_inc(q, outbound_queue_full);
1542
6486cda6 1543 if (callflags & QDIO_FLAG_PCI_OUT) {
779e6e1c 1544 q->u.out.pci_out_enabled = 1;
6486cda6 1545 qperf_inc(q, pci_request_int);
110da317 1546 } else
779e6e1c
JG
1547 q->u.out.pci_out_enabled = 0;
1548
1549 if (queue_type(q) == QDIO_IQDIO_QFMT) {
104ea556 1550 unsigned long phys_aob = 0;
1551
1552 /* One SIGA-W per buffer required for unicast HSI */
110da317
JG
1553 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1554
104ea556 1555 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1556
1557 rc = qdio_kick_outbound_q(q, phys_aob);
90adac58 1558 } else if (need_siga_sync(q)) {
110da317
JG
1559 rc = qdio_siga_sync_q(q);
1560 } else {
1561 /* try to fast requeue buffers */
1562 get_buf_state(q, prev_buf(bufnr), &state, 0);
1563 if (state != SLSB_CU_OUTPUT_PRIMED)
104ea556 1564 rc = qdio_kick_outbound_q(q, 0);
779e6e1c 1565 else
110da317 1566 qperf_inc(q, fast_requeue);
779e6e1c
JG
1567 }
1568
3d6c76ff
JG
1569 /* in case of SIGA errors we must process the error immediately */
1570 if (used >= q->u.out.scan_threshold || rc)
9bce8b2c 1571 qdio_tasklet_schedule(q);
3d6c76ff
JG
1572 else
1573 /* free the SBALs in case of no further traffic */
9bce8b2c
UB
1574 if (!timer_pending(&q->u.out.timer) &&
1575 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
3d6c76ff 1576 mod_timer(&q->u.out.timer, jiffies + HZ);
d303b6fd 1577 return rc;
779e6e1c
JG
1578}
1579
1580/**
1581 * do_QDIO - process input or output buffers
1582 * @cdev: associated ccw_device for the qdio subchannel
1583 * @callflags: input or output and special flags from the program
1584 * @q_nr: queue number
1585 * @bufnr: buffer number
1586 * @count: how many buffers to process
1587 */
1588int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
6618241b 1589 int q_nr, unsigned int bufnr, unsigned int count)
779e6e1c
JG
1590{
1591 struct qdio_irq *irq_ptr;
779e6e1c 1592
6618241b 1593 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
779e6e1c
JG
1594 return -EINVAL;
1595
779e6e1c
JG
1596 irq_ptr = cdev->private->qdio_data;
1597 if (!irq_ptr)
1598 return -ENODEV;
1599
1d7e1500
JG
1600 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1601 "do%02x b:%02x c:%02x", callflags, bufnr, count);
779e6e1c
JG
1602
1603 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1549d13f 1604 return -EIO;
9a26513e
JG
1605 if (!count)
1606 return 0;
779e6e1c 1607 if (callflags & QDIO_FLAG_SYNC_INPUT)
d303b6fd
JG
1608 return handle_inbound(irq_ptr->input_qs[q_nr],
1609 callflags, bufnr, count);
779e6e1c 1610 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
d303b6fd
JG
1611 return handle_outbound(irq_ptr->output_qs[q_nr],
1612 callflags, bufnr, count);
1613 return -EINVAL;
779e6e1c
JG
1614}
1615EXPORT_SYMBOL_GPL(do_QDIO);
1616
d36deae7
JG
1617/**
1618 * qdio_start_irq - process input buffers
1619 * @cdev: associated ccw_device for the qdio subchannel
1620 * @nr: input queue number
1621 *
1622 * Return codes
1623 * 0 - success
1624 * 1 - irqs not started since new data is available
1625 */
1626int qdio_start_irq(struct ccw_device *cdev, int nr)
1627{
1628 struct qdio_q *q;
1629 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1630
1631 if (!irq_ptr)
1632 return -ENODEV;
1633 q = irq_ptr->input_qs[nr];
1634
5f4026f8 1635 clear_nonshared_ind(irq_ptr);
d36deae7
JG
1636 qdio_stop_polling(q);
1637 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1638
1639 /*
1640 * We need to check again to not lose initiative after
1641 * resetting the ACK state.
1642 */
5f4026f8 1643 if (test_nonshared_ind(irq_ptr))
d36deae7 1644 goto rescan;
6bcf74e2 1645 if (!qdio_inbound_q_done(q, q->first_to_check))
d36deae7
JG
1646 goto rescan;
1647 return 0;
1648
1649rescan:
1650 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1651 &q->u.in.queue_irq_state))
1652 return 0;
1653 else
1654 return 1;
1655
1656}
1657EXPORT_SYMBOL(qdio_start_irq);
1658
1659/**
1660 * qdio_get_next_buffers - process input buffers
1661 * @cdev: associated ccw_device for the qdio subchannel
1662 * @nr: input queue number
1663 * @bufnr: first filled buffer number
1664 * @error: buffers are in error state
1665 *
1666 * Return codes
1667 * < 0 - error
1668 * = 0 - no new buffers found
1669 * > 0 - number of processed buffers
1670 */
1671int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1672 int *error)
1673{
1674 struct qdio_q *q;
d36deae7 1675 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
6bcf74e2 1676 unsigned int start;
b39544c6 1677 int count;
d36deae7
JG
1678
1679 if (!irq_ptr)
1680 return -ENODEV;
1681 q = irq_ptr->input_qs[nr];
6bcf74e2 1682 start = q->first_to_check;
d36deae7 1683
d36deae7 1684 /*
90adac58
JG
1685 * Cannot rely on automatic sync after interrupt since queues may
1686 * also be examined without interrupt.
d36deae7 1687 */
90adac58
JG
1688 if (need_siga_sync(q))
1689 qdio_sync_queues(q);
1690
2f2f3839 1691 qdio_check_outbound_pci_queues(irq_ptr);
d36deae7 1692
6bcf74e2 1693 count = qdio_inbound_q_moved(q, start);
b39544c6 1694 if (count == 0)
d36deae7
JG
1695 return 0;
1696
6bcf74e2
JW
1697 start = add_buf(start, count);
1698 q->first_to_check = start;
1699
d36deae7
JG
1700 /* Note: upper-layer MUST stop processing immediately here ... */
1701 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1702 return -EIO;
1703
65e4f776 1704 *bufnr = q->first_to_kick;
d36deae7
JG
1705 *error = q->qdio_error;
1706
1707 /* for the next time */
65e4f776 1708 q->first_to_kick = add_buf(q->first_to_kick, count);
d36deae7 1709 q->qdio_error = 0;
65e4f776
JW
1710
1711 return count;
d36deae7
JG
1712}
1713EXPORT_SYMBOL(qdio_get_next_buffers);
1714
1715/**
1716 * qdio_stop_irq - disable interrupt processing for the device
1717 * @cdev: associated ccw_device for the qdio subchannel
1718 * @nr: input queue number
1719 *
1720 * Return codes
1721 * 0 - interrupts were already disabled
1722 * 1 - interrupts successfully disabled
1723 */
1724int qdio_stop_irq(struct ccw_device *cdev, int nr)
1725{
1726 struct qdio_q *q;
1727 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1728
1729 if (!irq_ptr)
1730 return -ENODEV;
1731 q = irq_ptr->input_qs[nr];
1732
1733 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1734 &q->u.in.queue_irq_state))
1735 return 0;
1736 else
1737 return 1;
1738}
1739EXPORT_SYMBOL(qdio_stop_irq);
1740
1c59a861
EC
1741/**
1742 * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
1743 * @schid: Subchannel ID.
1744 * @cnc: Boolean Change-Notification Control
1745 * @response: Response code will be stored at this address
1746 * @cb: Callback function will be executed for each element
1747 * of the address list
1c59a861
EC
1748 * @priv: Pointer to pass to the callback function.
1749 *
1750 * Performs "Store-network-bridging-information list" operation and calls
1751 * the callback function for every entry in the list. If "change-
1752 * notification-control" is set, further changes in the address list
1753 * will be reported via the IPA command.
1754 */
1755int qdio_pnso_brinfo(struct subchannel_id schid,
1756 int cnc, u16 *response,
1757 void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
1758 void *entry),
1759 void *priv)
1760{
1761 struct chsc_pnso_area *rr;
1762 int rc;
1763 u32 prev_instance = 0;
1764 int isfirstblock = 1;
1765 int i, size, elems;
1766
1767 rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
1768 if (rr == NULL)
1769 return -ENOMEM;
1770 do {
1771 /* on the first iteration, naihdr.resume_token will be zero */
1772 rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
1773 if (rc != 0 && rc != -EBUSY)
1774 goto out;
1775 if (rr->response.code != 1) {
1776 rc = -EIO;
1777 continue;
1778 } else
1779 rc = 0;
1780
1781 if (cb == NULL)
1782 continue;
1783
1784 size = rr->naihdr.naids;
1785 elems = (rr->response.length -
1786 sizeof(struct chsc_header) -
1787 sizeof(struct chsc_brinfo_naihdr)) /
1788 size;
1789
1790 if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
1791 /* Inform the caller that they need to scrap */
1792 /* the data that was already reported via cb */
1793 rc = -EAGAIN;
1794 break;
1795 }
1796 isfirstblock = 0;
1797 prev_instance = rr->naihdr.instance;
1798 for (i = 0; i < elems; i++)
1799 switch (size) {
1800 case sizeof(struct qdio_brinfo_entry_l3_ipv6):
1801 (*cb)(priv, l3_ipv6_addr,
1802 &rr->entries.l3_ipv6[i]);
1803 break;
1804 case sizeof(struct qdio_brinfo_entry_l3_ipv4):
1805 (*cb)(priv, l3_ipv4_addr,
1806 &rr->entries.l3_ipv4[i]);
1807 break;
1808 case sizeof(struct qdio_brinfo_entry_l2):
1809 (*cb)(priv, l2_addr_lnid,
1810 &rr->entries.l2[i]);
1811 break;
1812 default:
1813 WARN_ON_ONCE(1);
1814 rc = -EIO;
1815 goto out;
1816 }
1817 } while (rr->response.code == 0x0107 || /* channel busy */
1818 (rr->response.code == 1 && /* list stored */
1819 /* resume token is non-zero => list incomplete */
1820 (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
1821 (*response) = rr->response.code;
1822
1823out:
1824 free_page((unsigned long)rr);
1825 return rc;
1826}
1827EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
1828
779e6e1c
JG
1829static int __init init_QDIO(void)
1830{
1831 int rc;
1832
aa5c8df3 1833 rc = qdio_debug_init();
779e6e1c
JG
1834 if (rc)
1835 return rc;
aa5c8df3
SO
1836 rc = qdio_setup_init();
1837 if (rc)
1838 goto out_debug;
779e6e1c
JG
1839 rc = tiqdio_allocate_memory();
1840 if (rc)
1841 goto out_cache;
779e6e1c
JG
1842 rc = tiqdio_register_thinints();
1843 if (rc)
aa5c8df3 1844 goto out_ti;
779e6e1c
JG
1845 return 0;
1846
779e6e1c
JG
1847out_ti:
1848 tiqdio_free_memory();
1849out_cache:
1850 qdio_setup_exit();
aa5c8df3
SO
1851out_debug:
1852 qdio_debug_exit();
779e6e1c
JG
1853 return rc;
1854}
1855
1856static void __exit exit_QDIO(void)
1857{
1858 tiqdio_unregister_thinints();
1859 tiqdio_free_memory();
779e6e1c 1860 qdio_setup_exit();
aa5c8df3 1861 qdio_debug_exit();
779e6e1c
JG
1862}
1863
1864module_init(init_QDIO);
1865module_exit(exit_QDIO);