]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/s390/cio/vfio_ccw_cp.c
Merge remote-tracking branches 'asoc/topic/ac97', 'asoc/topic/ac97-mfd', 'asoc/topic...
[mirror_ubuntu-focal-kernel.git] / drivers / s390 / cio / vfio_ccw_cp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * channel program interfaces
4 *
5 * Copyright IBM Corp. 2017
6 *
7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/iommu.h>
14 #include <linux/vfio.h>
15 #include <asm/idals.h>
16
17 #include "vfio_ccw_cp.h"
18
19 /*
20 * Max length for ccw chain.
21 * XXX: Limit to 256, need to check more?
22 */
23 #define CCWCHAIN_LEN_MAX 256
24
25 struct pfn_array {
26 unsigned long pa_iova;
27 unsigned long *pa_iova_pfn;
28 unsigned long *pa_pfn;
29 int pa_nr;
30 };
31
32 struct pfn_array_table {
33 struct pfn_array *pat_pa;
34 int pat_nr;
35 };
36
37 struct ccwchain {
38 struct list_head next;
39 struct ccw1 *ch_ccw;
40 /* Guest physical address of the current chain. */
41 u64 ch_iova;
42 /* Count of the valid ccws in chain. */
43 int ch_len;
44 /* Pinned PAGEs for the original data. */
45 struct pfn_array_table *ch_pat;
46 };
47
48 /*
49 * pfn_array_pin() - pin user pages in memory
50 * @pa: pfn_array on which to perform the operation
51 * @mdev: the mediated device to perform pin/unpin operations
52 *
53 * Attempt to pin user pages in memory.
54 *
55 * Usage of pfn_array:
56 * @pa->pa_iova starting guest physical I/O address. Assigned by caller.
57 * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated
58 * by caller.
59 * @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by
60 * caller.
61 * @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by
62 * caller.
63 * number of pages pinned. Assigned by callee.
64 *
65 * Returns:
66 * Number of pages pinned on success.
67 * If @pa->pa_nr is 0 or negative, returns 0.
68 * If no pages were pinned, returns -errno.
69 */
70 static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
71 {
72 int i, ret;
73
74 if (pa->pa_nr <= 0) {
75 pa->pa_nr = 0;
76 return 0;
77 }
78
79 pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
80 for (i = 1; i < pa->pa_nr; i++)
81 pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
82
83 ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
84 IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
85
86 if (ret > 0 && ret != pa->pa_nr) {
87 vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
88 pa->pa_nr = 0;
89 return 0;
90 }
91
92 return ret;
93 }
94
95 /* Unpin the pages before releasing the memory. */
96 static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
97 {
98 vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
99 pa->pa_nr = 0;
100 kfree(pa->pa_iova_pfn);
101 }
102
103 /* Alloc memory for PFNs, then pin pages with them. */
104 static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
105 u64 iova, unsigned int len)
106 {
107 int ret = 0;
108
109 if (!len || pa->pa_nr)
110 return -EINVAL;
111
112 pa->pa_iova = iova;
113
114 pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
115 if (!pa->pa_nr)
116 return -EINVAL;
117
118 pa->pa_iova_pfn = kcalloc(pa->pa_nr,
119 sizeof(*pa->pa_iova_pfn) +
120 sizeof(*pa->pa_pfn),
121 GFP_KERNEL);
122 if (unlikely(!pa->pa_iova_pfn))
123 return -ENOMEM;
124 pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
125
126 ret = pfn_array_pin(pa, mdev);
127
128 if (ret > 0)
129 return ret;
130 else if (!ret)
131 ret = -EINVAL;
132
133 kfree(pa->pa_iova_pfn);
134
135 return ret;
136 }
137
138 static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
139 {
140 pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
141 if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) {
142 pat->pat_nr = 0;
143 return -ENOMEM;
144 }
145
146 pat->pat_nr = nr;
147
148 return 0;
149 }
150
151 static void pfn_array_table_unpin_free(struct pfn_array_table *pat,
152 struct device *mdev)
153 {
154 int i;
155
156 for (i = 0; i < pat->pat_nr; i++)
157 pfn_array_unpin_free(pat->pat_pa + i, mdev);
158
159 if (pat->pat_nr) {
160 kfree(pat->pat_pa);
161 pat->pat_pa = NULL;
162 pat->pat_nr = 0;
163 }
164 }
165
166 static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
167 unsigned long iova)
168 {
169 struct pfn_array *pa = pat->pat_pa;
170 unsigned long iova_pfn = iova >> PAGE_SHIFT;
171 int i, j;
172
173 for (i = 0; i < pat->pat_nr; i++, pa++)
174 for (j = 0; j < pa->pa_nr; j++)
175 if (pa->pa_iova_pfn[i] == iova_pfn)
176 return true;
177
178 return false;
179 }
180 /* Create the list idal words for a pfn_array_table. */
181 static inline void pfn_array_table_idal_create_words(
182 struct pfn_array_table *pat,
183 unsigned long *idaws)
184 {
185 struct pfn_array *pa;
186 int i, j, k;
187
188 /*
189 * Idal words (execept the first one) rely on the memory being 4k
190 * aligned. If a user virtual address is 4K aligned, then it's
191 * corresponding kernel physical address will also be 4K aligned. Thus
192 * there will be no problem here to simply use the phys to create an
193 * idaw.
194 */
195 k = 0;
196 for (i = 0; i < pat->pat_nr; i++) {
197 pa = pat->pat_pa + i;
198 for (j = 0; j < pa->pa_nr; j++) {
199 idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT;
200 if (k == 0)
201 idaws[k] += pa->pa_iova & (PAGE_SIZE - 1);
202 k++;
203 }
204 }
205 }
206
207
208 /*
209 * Within the domain (@mdev), copy @n bytes from a guest physical
210 * address (@iova) to a host physical address (@to).
211 */
212 static long copy_from_iova(struct device *mdev,
213 void *to, u64 iova,
214 unsigned long n)
215 {
216 struct pfn_array pa = {0};
217 u64 from;
218 int i, ret;
219 unsigned long l, m;
220
221 ret = pfn_array_alloc_pin(&pa, mdev, iova, n);
222 if (ret <= 0)
223 return ret;
224
225 l = n;
226 for (i = 0; i < pa.pa_nr; i++) {
227 from = pa.pa_pfn[i] << PAGE_SHIFT;
228 m = PAGE_SIZE;
229 if (i == 0) {
230 from += iova & (PAGE_SIZE - 1);
231 m -= iova & (PAGE_SIZE - 1);
232 }
233
234 m = min(l, m);
235 memcpy(to + (n - l), (void *)from, m);
236
237 l -= m;
238 if (l == 0)
239 break;
240 }
241
242 pfn_array_unpin_free(&pa, mdev);
243
244 return l;
245 }
246
247 static long copy_ccw_from_iova(struct channel_program *cp,
248 struct ccw1 *to, u64 iova,
249 unsigned long len)
250 {
251 struct ccw0 ccw0;
252 struct ccw1 *pccw1;
253 int ret;
254 int i;
255
256 ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1));
257 if (ret)
258 return ret;
259
260 if (!cp->orb.cmd.fmt) {
261 pccw1 = to;
262 for (i = 0; i < len; i++) {
263 ccw0 = *(struct ccw0 *)pccw1;
264 if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
265 pccw1->cmd_code = CCW_CMD_TIC;
266 pccw1->flags = 0;
267 pccw1->count = 0;
268 } else {
269 pccw1->cmd_code = ccw0.cmd_code;
270 pccw1->flags = ccw0.flags;
271 pccw1->count = ccw0.count;
272 }
273 pccw1->cda = ccw0.cda;
274 pccw1++;
275 }
276 }
277
278 return ret;
279 }
280
281 /*
282 * Helpers to operate ccwchain.
283 */
284 #define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0)
285
286 #define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
287
288 #define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
289
290 #define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
291
292
293 #define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
294
295 static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
296 {
297 struct ccwchain *chain;
298 void *data;
299 size_t size;
300
301 /* Make ccw address aligned to 8. */
302 size = ((sizeof(*chain) + 7L) & -8L) +
303 sizeof(*chain->ch_ccw) * len +
304 sizeof(*chain->ch_pat) * len;
305 chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
306 if (!chain)
307 return NULL;
308
309 data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L);
310 chain->ch_ccw = (struct ccw1 *)data;
311
312 data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
313 chain->ch_pat = (struct pfn_array_table *)data;
314
315 chain->ch_len = len;
316
317 list_add_tail(&chain->next, &cp->ccwchain_list);
318
319 return chain;
320 }
321
322 static void ccwchain_free(struct ccwchain *chain)
323 {
324 list_del(&chain->next);
325 kfree(chain);
326 }
327
328 /* Free resource for a ccw that allocated memory for its cda. */
329 static void ccwchain_cda_free(struct ccwchain *chain, int idx)
330 {
331 struct ccw1 *ccw = chain->ch_ccw + idx;
332
333 if (!ccw->count)
334 return;
335
336 kfree((void *)(u64)ccw->cda);
337 }
338
339 /* Unpin the pages then free the memory resources. */
340 static void cp_unpin_free(struct channel_program *cp)
341 {
342 struct ccwchain *chain, *temp;
343 int i;
344
345 list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
346 for (i = 0; i < chain->ch_len; i++) {
347 pfn_array_table_unpin_free(chain->ch_pat + i,
348 cp->mdev);
349 ccwchain_cda_free(chain, i);
350 }
351 ccwchain_free(chain);
352 }
353 }
354
355 /**
356 * ccwchain_calc_length - calculate the length of the ccw chain.
357 * @iova: guest physical address of the target ccw chain
358 * @cp: channel_program on which to perform the operation
359 *
360 * This is the chain length not considering any TICs.
361 * You need to do a new round for each TIC target.
362 *
363 * Returns: the length of the ccw chain or -errno.
364 */
365 static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
366 {
367 struct ccw1 *ccw, *p;
368 int cnt;
369
370 /*
371 * Copy current chain from guest to host kernel.
372 * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256).
373 * So copying 2K is enough (safe).
374 */
375 p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL);
376 if (!ccw)
377 return -ENOMEM;
378
379 cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX);
380 if (cnt) {
381 kfree(ccw);
382 return cnt;
383 }
384
385 cnt = 0;
386 do {
387 cnt++;
388
389 if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
390 break;
391
392 ccw++;
393 } while (cnt < CCWCHAIN_LEN_MAX + 1);
394
395 if (cnt == CCWCHAIN_LEN_MAX + 1)
396 cnt = -EINVAL;
397
398 kfree(p);
399 return cnt;
400 }
401
402 static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
403 {
404 struct ccwchain *chain;
405 u32 ccw_head, ccw_tail;
406
407 list_for_each_entry(chain, &cp->ccwchain_list, next) {
408 ccw_head = chain->ch_iova;
409 ccw_tail = ccw_head + (chain->ch_len - 1) * sizeof(struct ccw1);
410
411 if ((ccw_head <= tic->cda) && (tic->cda <= ccw_tail))
412 return 1;
413 }
414
415 return 0;
416 }
417
418 static int ccwchain_loop_tic(struct ccwchain *chain,
419 struct channel_program *cp);
420
421 static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
422 {
423 struct ccwchain *chain;
424 int len, ret;
425
426 /* May transfer to an existing chain. */
427 if (tic_target_chain_exists(tic, cp))
428 return 0;
429
430 /* Get chain length. */
431 len = ccwchain_calc_length(tic->cda, cp);
432 if (len < 0)
433 return len;
434
435 /* Need alloc a new chain for this one. */
436 chain = ccwchain_alloc(cp, len);
437 if (!chain)
438 return -ENOMEM;
439 chain->ch_iova = tic->cda;
440
441 /* Copy the new chain from user. */
442 ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len);
443 if (ret) {
444 ccwchain_free(chain);
445 return ret;
446 }
447
448 /* Loop for tics on this new chain. */
449 return ccwchain_loop_tic(chain, cp);
450 }
451
452 /* Loop for TICs. */
453 static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
454 {
455 struct ccw1 *tic;
456 int i, ret;
457
458 for (i = 0; i < chain->ch_len; i++) {
459 tic = chain->ch_ccw + i;
460
461 if (!ccw_is_tic(tic))
462 continue;
463
464 ret = ccwchain_handle_tic(tic, cp);
465 if (ret)
466 return ret;
467 }
468
469 return 0;
470 }
471
472 static int ccwchain_fetch_tic(struct ccwchain *chain,
473 int idx,
474 struct channel_program *cp)
475 {
476 struct ccw1 *ccw = chain->ch_ccw + idx;
477 struct ccwchain *iter;
478 u32 ccw_head, ccw_tail;
479
480 list_for_each_entry(iter, &cp->ccwchain_list, next) {
481 ccw_head = iter->ch_iova;
482 ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
483
484 if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
485 ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
486 (ccw->cda - ccw_head));
487 return 0;
488 }
489 }
490
491 return -EFAULT;
492 }
493
494 static int ccwchain_fetch_direct(struct ccwchain *chain,
495 int idx,
496 struct channel_program *cp)
497 {
498 struct ccw1 *ccw;
499 struct pfn_array_table *pat;
500 unsigned long *idaws;
501 int idaw_nr;
502
503 ccw = chain->ch_ccw + idx;
504
505 /*
506 * Pin data page(s) in memory.
507 * The number of pages actually is the count of the idaws which will be
508 * needed when translating a direct ccw to a idal ccw.
509 */
510 pat = chain->ch_pat + idx;
511 if (pfn_array_table_init(pat, 1))
512 return -ENOMEM;
513 idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev,
514 ccw->cda, ccw->count);
515 if (idaw_nr < 0)
516 return idaw_nr;
517
518 /* Translate this direct ccw to a idal ccw. */
519 idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
520 if (!idaws) {
521 pfn_array_table_unpin_free(pat, cp->mdev);
522 return -ENOMEM;
523 }
524 ccw->cda = (__u32) virt_to_phys(idaws);
525 ccw->flags |= CCW_FLAG_IDA;
526
527 pfn_array_table_idal_create_words(pat, idaws);
528
529 return 0;
530 }
531
532 static int ccwchain_fetch_idal(struct ccwchain *chain,
533 int idx,
534 struct channel_program *cp)
535 {
536 struct ccw1 *ccw;
537 struct pfn_array_table *pat;
538 unsigned long *idaws;
539 u64 idaw_iova;
540 unsigned int idaw_nr, idaw_len;
541 int i, ret;
542
543 ccw = chain->ch_ccw + idx;
544
545 /* Calculate size of idaws. */
546 ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
547 if (ret)
548 return ret;
549 idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count);
550 idaw_len = idaw_nr * sizeof(*idaws);
551
552 /* Pin data page(s) in memory. */
553 pat = chain->ch_pat + idx;
554 ret = pfn_array_table_init(pat, idaw_nr);
555 if (ret)
556 return ret;
557
558 /* Translate idal ccw to use new allocated idaws. */
559 idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
560 if (!idaws) {
561 ret = -ENOMEM;
562 goto out_unpin;
563 }
564
565 ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len);
566 if (ret)
567 goto out_free_idaws;
568
569 ccw->cda = virt_to_phys(idaws);
570
571 for (i = 0; i < idaw_nr; i++) {
572 idaw_iova = *(idaws + i);
573 if (IS_ERR_VALUE(idaw_iova)) {
574 ret = -EFAULT;
575 goto out_free_idaws;
576 }
577
578 ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
579 idaw_iova, 1);
580 if (ret < 0)
581 goto out_free_idaws;
582 }
583
584 pfn_array_table_idal_create_words(pat, idaws);
585
586 return 0;
587
588 out_free_idaws:
589 kfree(idaws);
590 out_unpin:
591 pfn_array_table_unpin_free(pat, cp->mdev);
592 return ret;
593 }
594
595 /*
596 * Fetch one ccw.
597 * To reduce memory copy, we'll pin the cda page in memory,
598 * and to get rid of the cda 2G limitiaion of ccw1, we'll translate
599 * direct ccws to idal ccws.
600 */
601 static int ccwchain_fetch_one(struct ccwchain *chain,
602 int idx,
603 struct channel_program *cp)
604 {
605 struct ccw1 *ccw = chain->ch_ccw + idx;
606
607 if (ccw_is_test(ccw) || ccw_is_noop(ccw))
608 return 0;
609
610 if (ccw_is_tic(ccw))
611 return ccwchain_fetch_tic(chain, idx, cp);
612
613 if (ccw_is_idal(ccw))
614 return ccwchain_fetch_idal(chain, idx, cp);
615
616 return ccwchain_fetch_direct(chain, idx, cp);
617 }
618
619 /**
620 * cp_init() - allocate ccwchains for a channel program.
621 * @cp: channel_program on which to perform the operation
622 * @mdev: the mediated device to perform pin/unpin operations
623 * @orb: control block for the channel program from the guest
624 *
625 * This creates one or more ccwchain(s), and copies the raw data of
626 * the target channel program from @orb->cmd.iova to the new ccwchain(s).
627 *
628 * Limitations:
629 * 1. Supports only prefetch enabled mode.
630 * 2. Supports idal(c64) ccw chaining.
631 * 3. Supports 4k idaw.
632 *
633 * Returns:
634 * %0 on success and a negative error value on failure.
635 */
636 int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
637 {
638 u64 iova = orb->cmd.cpa;
639 struct ccwchain *chain;
640 int len, ret;
641
642 /*
643 * XXX:
644 * Only support prefetch enable mode now.
645 * Only support 64bit addressing idal.
646 * Only support 4k IDAW.
647 */
648 if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k)
649 return -EOPNOTSUPP;
650
651 INIT_LIST_HEAD(&cp->ccwchain_list);
652 memcpy(&cp->orb, orb, sizeof(*orb));
653 cp->mdev = mdev;
654
655 /* Get chain length. */
656 len = ccwchain_calc_length(iova, cp);
657 if (len < 0)
658 return len;
659
660 /* Alloc mem for the head chain. */
661 chain = ccwchain_alloc(cp, len);
662 if (!chain)
663 return -ENOMEM;
664 chain->ch_iova = iova;
665
666 /* Copy the head chain from guest. */
667 ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len);
668 if (ret) {
669 ccwchain_free(chain);
670 return ret;
671 }
672
673 /* Now loop for its TICs. */
674 ret = ccwchain_loop_tic(chain, cp);
675 if (ret)
676 cp_unpin_free(cp);
677
678 return ret;
679 }
680
681
682 /**
683 * cp_free() - free resources for channel program.
684 * @cp: channel_program on which to perform the operation
685 *
686 * This unpins the memory pages and frees the memory space occupied by
687 * @cp, which must have been returned by a previous call to cp_init().
688 * Otherwise, undefined behavior occurs.
689 */
690 void cp_free(struct channel_program *cp)
691 {
692 cp_unpin_free(cp);
693 }
694
695 /**
696 * cp_prefetch() - translate a guest physical address channel program to
697 * a real-device runnable channel program.
698 * @cp: channel_program on which to perform the operation
699 *
700 * This function translates the guest-physical-address channel program
701 * and stores the result to ccwchain list. @cp must have been
702 * initialized by a previous call with cp_init(). Otherwise, undefined
703 * behavior occurs.
704 *
705 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
706 * as helpers to do ccw chain translation inside the kernel. Basically
707 * they accept a channel program issued by a virtual machine, and
708 * translate the channel program to a real-device runnable channel
709 * program.
710 *
711 * These APIs will copy the ccws into kernel-space buffers, and update
712 * the guest phsical addresses with their corresponding host physical
713 * addresses. Then channel I/O device drivers could issue the
714 * translated channel program to real devices to perform an I/O
715 * operation.
716 *
717 * These interfaces are designed to support translation only for
718 * channel programs, which are generated and formatted by a
719 * guest. Thus this will make it possible for things like VFIO to
720 * leverage the interfaces to passthrough a channel I/O mediated
721 * device in QEMU.
722 *
723 * We support direct ccw chaining by translating them to idal ccws.
724 *
725 * Returns:
726 * %0 on success and a negative error value on failure.
727 */
728 int cp_prefetch(struct channel_program *cp)
729 {
730 struct ccwchain *chain;
731 int len, idx, ret;
732
733 list_for_each_entry(chain, &cp->ccwchain_list, next) {
734 len = chain->ch_len;
735 for (idx = 0; idx < len; idx++) {
736 ret = ccwchain_fetch_one(chain, idx, cp);
737 if (ret)
738 return ret;
739 }
740 }
741
742 return 0;
743 }
744
745 /**
746 * cp_get_orb() - get the orb of the channel program
747 * @cp: channel_program on which to perform the operation
748 * @intparm: new intparm for the returned orb
749 * @lpm: candidate value of the logical-path mask for the returned orb
750 *
751 * This function returns the address of the updated orb of the channel
752 * program. Channel I/O device drivers could use this orb to issue a
753 * ssch.
754 */
755 union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
756 {
757 union orb *orb;
758 struct ccwchain *chain;
759 struct ccw1 *cpa;
760
761 orb = &cp->orb;
762
763 orb->cmd.intparm = intparm;
764 orb->cmd.fmt = 1;
765 orb->cmd.key = PAGE_DEFAULT_KEY >> 4;
766
767 if (orb->cmd.lpm == 0)
768 orb->cmd.lpm = lpm;
769
770 chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
771 cpa = chain->ch_ccw;
772 orb->cmd.cpa = (__u32) __pa(cpa);
773
774 return orb;
775 }
776
777 /**
778 * cp_update_scsw() - update scsw for a channel program.
779 * @cp: channel_program on which to perform the operation
780 * @scsw: I/O results of the channel program and also the target to be
781 * updated
782 *
783 * @scsw contains the I/O results of the channel program that pointed
784 * to by @cp. However what @scsw->cpa stores is a host physical
785 * address, which is meaningless for the guest, which is waiting for
786 * the I/O results.
787 *
788 * This function updates @scsw->cpa to its coressponding guest physical
789 * address.
790 */
791 void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
792 {
793 struct ccwchain *chain;
794 u32 cpa = scsw->cmd.cpa;
795 u32 ccw_head, ccw_tail;
796
797 /*
798 * LATER:
799 * For now, only update the cmd.cpa part. We may need to deal with
800 * other portions of the schib as well, even if we don't return them
801 * in the ioctl directly. Path status changes etc.
802 */
803 list_for_each_entry(chain, &cp->ccwchain_list, next) {
804 ccw_head = (u32)(u64)chain->ch_ccw;
805 ccw_tail = (u32)(u64)(chain->ch_ccw + chain->ch_len - 1);
806
807 if ((ccw_head <= cpa) && (cpa <= ccw_tail)) {
808 /*
809 * (cpa - ccw_head) is the offset value of the host
810 * physical ccw to its chain head.
811 * Adding this value to the guest physical ccw chain
812 * head gets us the guest cpa.
813 */
814 cpa = chain->ch_iova + (cpa - ccw_head);
815 break;
816 }
817 }
818
819 scsw->cmd.cpa = cpa;
820 }
821
822 /**
823 * cp_iova_pinned() - check if an iova is pinned for a ccw chain.
824 * @cmd: ccwchain command on which to perform the operation
825 * @iova: the iova to check
826 *
827 * If the @iova is currently pinned for the ccw chain, return true;
828 * else return false.
829 */
830 bool cp_iova_pinned(struct channel_program *cp, u64 iova)
831 {
832 struct ccwchain *chain;
833 int i;
834
835 list_for_each_entry(chain, &cp->ccwchain_list, next) {
836 for (i = 0; i < chain->ch_len; i++)
837 if (pfn_array_table_iova_pinned(chain->ch_pat + i,
838 iova))
839 return true;
840 }
841
842 return false;
843 }